diff --git a/.dockerignore b/.dockerignore
index 42e8a818a418..d51b5556178f 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -46,11 +46,11 @@ packages/beacon-node/mainnet_pubkeys.csv
# Autogenerated docs
packages/**/docs
packages/**/typedocs
-docs/packages
-docs/contributing.md
-docs/assets
-docs/reference/cli.md
-/site
+docs/pages/**/*-cli.md
+docs/pages/assets
+docs/pages/api/api-reference.md
+docs/pages/contribution/getting-started.md
+docs/site
# Lodestar artifacts
.lodestar
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index a19def8e72de..6e27a89c3044 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -4,21 +4,41 @@ on:
push:
branches:
- stable
+ workflow_dispatch:
+ inputs:
+ ref:
+ description: 'Ref to deploy, defaults to `unstable`'
+ required: false
+ default: 'unstable'
+ type: string
jobs:
docs:
runs-on: buildjet-4vcpu-ubuntu-2204
+ env:
+ DEPLOY_REF: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.ref || 'stable' }}
steps:
- # - Uses YAML anchors in the future
+ # Log out the ref being deployed
+ - name: Log Deployment Ref
+ if: github.event_name == 'workflow_dispatch'
+ run: |
+ echo "Deploying ref: $DEPLOY_REF"
+
+ # Checkout the correct ref being deployed
- uses: actions/checkout@v3
+ with:
+ ref: ${{ env.DEPLOY_REF }}
+
- uses: actions/setup-node@v3
with:
node-version: 20
check-latest: true
cache: yarn
+
- name: Node.js version
id: node
run: echo "v8CppApiVersion=$(node --print "process.versions.modules")" >> $GITHUB_OUTPUT
+
- name: Restore dependencies
uses: actions/cache@master
id: cache-deps
@@ -27,13 +47,14 @@ jobs:
node_modules
packages/*/node_modules
key: ${{ runner.os }}-${{ steps.node.outputs.v8CppApiVersion }}-${{ hashFiles('**/yarn.lock', '**/package.json') }}
+
- name: Install & build
if: steps.cache-deps.outputs.cache-hit != 'true'
run: yarn install --frozen-lockfile && yarn build
+
- name: Build
run: yarn build
if: steps.cache-deps.outputs.cache-hit == 'true'
- #
- name: Build and collect docs
run: yarn build:docs
@@ -45,15 +66,17 @@ jobs:
uses: actions/setup-python@v1
- name: Install dependencies
+ working-directory: docs
run: |
python -m pip install --upgrade pip
- pip install -r docs/requirements.txt
+ pip install -r requirements.txt
- name: Build docs
- run: mkdocs build --site-dir site -v --clean
+ working-directory: docs
+ run: mkdocs build --verbose --clean --site-dir site
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- publish_dir: ./site
+ publish_dir: ./docs/site
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index b1b305ca49ff..43ceee898d85 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -270,7 +270,10 @@ jobs:
packages/*/.git-data.json
key: ${{ runner.os }}-node-${{ matrix.node }}-${{ github.sha }}
fail-on-cache-miss: true
-
+ - name: Install Chrome browser
+ run: npx @puppeteer/browsers install chromedriver@latest --path /tmp
+ - name: Install Firefox browser
+ run: npx @puppeteer/browsers install firefox@latest --path /tmp
- name: Browser tests
run: |
export DISPLAY=':99.0'
diff --git a/.gitignore b/.gitignore
index ce1ec6074979..a0deed473c4a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,11 +40,15 @@ packages/api/oapi-schemas
# Autogenerated docs
packages/**/docs
packages/**/typedocs
-docs/assets
-docs/packages
-docs/reference
-docs/contributing.md
-/site
+docs/pages/**/*-cli.md
+docs/pages/assets
+docs/pages/images
+docs/pages/security.md
+docs/pages/lightclient-prover/lightclient.md
+docs/pages/lightclient-prover/prover.md
+docs/pages/api/api-reference.md
+docs/pages/contribution/getting-started.md
+docs/site
# Testnet artifacts
.lodestar
diff --git a/.wordlist.txt b/.wordlist.txt
index b7cff203f57c..42510b175a07 100644
--- a/.wordlist.txt
+++ b/.wordlist.txt
@@ -1,14 +1,19 @@
APIs
+Andreas
+Antonopoulos
AssemblyScript
BLS
BeaconNode
Besu
+Buterin
CLA
CLI
CTRL
+Casper
Chai
ChainSafe
Customizations
+DPoS
Discv
DockerHub
Dockerized
@@ -19,22 +24,33 @@ ENR
ENRs
ESLint
ETH
+Edgington
Erigon
EthStaker
+EtherScan
Ethereum
+EthereumJS
+FINDNODE
FX
Flamegraph
Flamegraphs
+Geth
Github
Gossipsub
Grafana
HackMD
+Homebrew
+IPFS
IPv
Infura
JSON
+JSObjects
JWT
+KDE
LGPL
LGPLv
+LMD
+LPoS
LTS
Lerna
MEV
@@ -45,10 +61,12 @@ NVM
Nethermind
NodeJS
NodeSource
+OSI
PR
PRs
Plaintext
PoS
+Prysm
Quickstart
RPC
SHA
@@ -57,64 +75,102 @@ SSZ
Stakehouse
TOC
TTD
+Teku
TypeScript
UI
UID
+UPnP
UTF
VM
Vitalik
Wagyu
api
async
+backfill
beaconcha
+blockchain
bootnode
bootnodes
chainConfig
chainsafe
+chiado
cli
cmd
+codebase
config
configs
const
constantish
coreutils
cors
+cryptocurrency
cryptographic
dApp
dApps
+ddos
decrypt
deserialization
+dev
devnet
devnets
+devtools
+eg
+enodes
enum
+env
envs
+ephemery
flamegraph
flamegraphs
+gnosis
goerli
+heapdump
+heaptrack
+holesky
interop
+js
keypair
keystore
keystores
+libp
lightclient
linter
+lldb
+llnode
lockfile
mainnet
+malloc
mdns
merkle
merkleization
monorepo
+multiaddr
+multifork
namespace
namespaced
namespaces
nodemodule
+orchestrator
+osx
overriden
params
+pid
plaintext
+pre
+premined
produceBlockV
+protolambda
prover
+repo
+repos
req
reqresp
+responder
+ropsten
runtime
+scalability
+secp
+sepolia
sharding
ssz
stakers
@@ -131,4 +187,6 @@ utils
validator
validators
wip
+xcode
yaml
+yamux
diff --git a/dashboards/lodestar_block_processor.json b/dashboards/lodestar_block_processor.json
index d1a856f2f71d..8e68d611cc0d 100644
--- a/dashboards/lodestar_block_processor.json
+++ b/dashboards/lodestar_block_processor.json
@@ -110,6 +110,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -192,6 +193,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -276,6 +278,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
@@ -359,6 +362,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
@@ -442,6 +446,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
@@ -525,6 +530,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -607,6 +613,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -758,7 +765,7 @@
"reverse": false
}
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.1.1",
"targets": [
{
"datasource": {
@@ -862,7 +869,7 @@
"reverse": false
}
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.1.1",
"targets": [
{
"datasource": {
@@ -942,7 +949,7 @@
"reverse": false
}
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.1.1",
"targets": [
{
"datasource": {
@@ -987,6 +994,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 4,
@@ -1072,6 +1080,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -1148,22 +1157,22 @@
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
- "fillOpacity": 22,
- "gradientMode": "opacity",
+ "fillOpacity": 0,
+ "gradientMode": "none",
"hideFrom": {
- "graph": false,
"legend": false,
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
- "pointSize": 4,
+ "pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
- "showPoints": "never",
- "spanNulls": true,
+ "showPoints": "auto",
+ "spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
@@ -1183,21 +1192,19 @@
"x": 0,
"y": 50
},
- "id": 524,
+ "id": 534,
"options": {
- "graph": {},
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
- "showLegend": false
+ "showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
- "pluginVersion": "7.4.5",
"targets": [
{
"datasource": {
@@ -1205,15 +1212,14 @@
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
- "exemplar": false,
- "expr": "rate(lodestar_stfn_epoch_transition_commit_seconds_sum[$rate_interval])\n/\nrate(lodestar_stfn_epoch_transition_commit_seconds_count[$rate_interval])",
- "interval": "",
- "legendFormat": "epoch transition",
+ "expr": "rate(lodestar_stfn_epoch_transition_step_seconds_sum[$rate_interval])\n/\nrate(lodestar_stfn_epoch_transition_step_seconds_count[$rate_interval])",
+ "instant": false,
+ "legendFormat": "{{step}}",
"range": true,
"refId": "A"
}
],
- "title": "Epoch transition commit step avg time",
+ "title": "Epoch Transition By Steps",
"type": "timeseries"
},
{
@@ -1241,6 +1247,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -1325,9 +1332,10 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
- "pointSize": 5,
+ "pointSize": 4,
"scaleDistribution": {
"type": "linear"
},
@@ -1342,25 +1350,9 @@
}
},
"mappings": [],
- "unit": "percentunit"
+ "unit": "s"
},
- "overrides": [
- {
- "matcher": {
- "id": "byName",
- "options": "process block time"
- },
- "properties": [
- {
- "id": "color",
- "value": {
- "fixedColor": "orange",
- "mode": "fixed"
- }
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
"h": 8,
@@ -1368,7 +1360,7 @@
"x": 0,
"y": 58
},
- "id": 122,
+ "id": 524,
"options": {
"graph": {},
"legend": {
@@ -1389,14 +1381,16 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "editorMode": "code",
"exemplar": false,
- "expr": "rate(lodestar_stfn_epoch_transition_seconds_sum[13m])",
+ "expr": "rate(lodestar_stfn_epoch_transition_commit_seconds_sum[$rate_interval])\n/\nrate(lodestar_stfn_epoch_transition_commit_seconds_count[$rate_interval])",
"interval": "",
- "legendFormat": "process block time",
+ "legendFormat": "epoch transition",
+ "range": true,
"refId": "A"
}
],
- "title": "Epoch transition utilization rate",
+ "title": "Epoch transition commit step avg time",
"type": "timeseries"
},
{
@@ -1424,6 +1418,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -1523,6 +1518,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -1540,20 +1536,19 @@
}
},
"mappings": [],
- "min": 0,
- "unit": "none"
+ "unit": "percentunit"
},
"overrides": [
{
"matcher": {
"id": "byName",
- "options": "number of epoch transition"
+ "options": "process block time"
},
"properties": [
{
"id": "color",
"value": {
- "fixedColor": "yellow",
+ "fixedColor": "orange",
"mode": "fixed"
}
}
@@ -1567,7 +1562,7 @@
"x": 0,
"y": 66
},
- "id": 124,
+ "id": 122,
"options": {
"graph": {},
"legend": {
@@ -1589,13 +1584,13 @@
"uid": "${DS_PROMETHEUS}"
},
"exemplar": false,
- "expr": "384 * rate(lodestar_stfn_epoch_transition_seconds_count[13m])",
+ "expr": "rate(lodestar_stfn_epoch_transition_seconds_sum[13m])",
"interval": "",
- "legendFormat": "number of epoch transition",
+ "legendFormat": "process block time",
"refId": "A"
}
],
- "title": "Epoch transitions / epoch",
+ "title": "Epoch transition utilization rate",
"type": "timeseries"
},
{
@@ -1623,6 +1618,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -1722,6 +1718,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -1739,9 +1736,26 @@
}
},
"mappings": [],
- "unit": "s"
+ "min": 0,
+ "unit": "none"
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "number of epoch transition"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
@@ -1749,7 +1763,7 @@
"x": 0,
"y": 74
},
- "id": 526,
+ "id": 124,
"options": {
"graph": {},
"legend": {
@@ -1770,15 +1784,14 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
- "editorMode": "code",
- "expr": "rate(lodestar_stfn_hash_tree_root_seconds_sum[$rate_interval])\n/ on(source)\nrate(lodestar_stfn_hash_tree_root_seconds_count[$rate_interval])",
+ "exemplar": false,
+ "expr": "384 * rate(lodestar_stfn_epoch_transition_seconds_count[13m])",
"interval": "",
- "legendFormat": "__auto",
- "range": true,
+ "legendFormat": "number of epoch transition",
"refId": "A"
}
],
- "title": "State hash_tree_root avg time",
+ "title": "Epoch transitions / epoch",
"type": "timeseries"
},
{
@@ -1806,6 +1819,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -1901,6 +1915,91 @@
"title": "State SSZ cache miss rate on preState",
"type": "timeseries"
},
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 22,
+ "gradientMode": "opacity",
+ "hideFrom": {
+ "graph": false,
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 82
+ },
+ "id": 526,
+ "options": {
+ "graph": {},
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "7.4.5",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "rate(lodestar_stfn_hash_tree_root_seconds_sum[$rate_interval])\n/ on(source)\nrate(lodestar_stfn_hash_tree_root_seconds_count[$rate_interval])",
+ "interval": "",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "State hash_tree_root avg time",
+ "type": "timeseries"
+ },
{
"collapsed": false,
"datasource": {
@@ -1911,7 +2010,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 82
+ "y": 90
},
"id": 92,
"panels": [],
@@ -1936,7 +2035,7 @@
"h": 3,
"w": 24,
"x": 0,
- "y": 83
+ "y": 91
},
"id": 154,
"options": {
@@ -1948,7 +2047,7 @@
"content": "Verifies signature sets in a thread pool of workers. Must ensure that signatures are verified fast and efficiently.",
"mode": "markdown"
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.1.1",
"targets": [
{
"datasource": {
@@ -1989,6 +2088,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2014,7 +2114,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 86
+ "y": 94
},
"id": 94,
"options": {
@@ -2069,6 +2169,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2093,7 +2194,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 86
+ "y": 94
},
"id": 519,
"options": {
@@ -2150,6 +2251,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2175,7 +2277,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 94
+ "y": 102
},
"id": 151,
"options": {
@@ -2236,6 +2338,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2261,7 +2364,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 94
+ "y": 102
},
"id": 96,
"options": {
@@ -2322,6 +2425,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2347,7 +2451,7 @@
"h": 5,
"w": 12,
"x": 0,
- "y": 102
+ "y": 110
},
"id": 150,
"options": {
@@ -2408,6 +2512,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2433,7 +2538,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 102
+ "y": 110
},
"id": 95,
"options": {
@@ -2494,6 +2599,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2520,7 +2626,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 107
+ "y": 115
},
"id": 148,
"options": {
@@ -2591,6 +2697,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2616,7 +2723,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 110
+ "y": 118
},
"id": 147,
"options": {
@@ -2677,6 +2784,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2702,7 +2810,7 @@
"h": 5,
"w": 12,
"x": 0,
- "y": 113
+ "y": 121
},
"id": 98,
"options": {
@@ -2759,6 +2867,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2800,7 +2909,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 117
+ "y": 125
},
"id": 153,
"options": {
@@ -2870,6 +2979,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -2895,7 +3005,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 118
+ "y": 126
},
"id": 97,
"options": {
@@ -2937,7 +3047,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 124
+ "y": 132
},
"id": 309,
"panels": [],
@@ -2977,6 +3087,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3032,7 +3143,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 125
+ "y": 133
},
"id": 305,
"options": {
@@ -3088,6 +3199,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3128,7 +3240,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 125
+ "y": 133
},
"id": 307,
"options": {
@@ -3195,6 +3307,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3219,7 +3332,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 133
+ "y": 141
},
"id": 335,
"options": {
@@ -3286,6 +3399,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3310,7 +3424,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 133
+ "y": 141
},
"id": 334,
"options": {
@@ -3351,7 +3465,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 141
+ "y": 149
},
"id": 136,
"panels": [],
@@ -3393,6 +3507,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3418,7 +3533,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 142
+ "y": 150
},
"id": 130,
"options": {
@@ -3477,6 +3592,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3517,7 +3633,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 142
+ "y": 150
},
"id": 140,
"options": {
@@ -3577,6 +3693,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3618,7 +3735,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 150
+ "y": 158
},
"id": 132,
"options": {
@@ -3701,6 +3818,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineStyle": {
"fill": "solid"
@@ -3745,7 +3863,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 150
+ "y": 158
},
"id": 138,
"options": {
@@ -3817,6 +3935,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3866,7 +3985,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 158
+ "y": 166
},
"id": 531,
"options": {
@@ -3957,6 +4076,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -3981,7 +4101,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 158
+ "y": 166
},
"id": 533,
"options": {
@@ -4026,7 +4146,7 @@
}
],
"refresh": "10s",
- "schemaVersion": 37,
+ "schemaVersion": 38,
"style": "dark",
"tags": [
"lodestar"
diff --git a/dashboards/lodestar_bls_thread_pool.json b/dashboards/lodestar_bls_thread_pool.json
index a8021ace1102..160312a92d57 100644
--- a/dashboards/lodestar_bls_thread_pool.json
+++ b/dashboards/lodestar_bls_thread_pool.json
@@ -13,7 +13,10 @@
"list": [
{
"builtIn": 1,
- "datasource": "-- Grafana --",
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
@@ -32,7 +35,6 @@
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": null,
- "iteration": 1661342107287,
"links": [
{
"asDropdown": true,
@@ -53,6 +55,10 @@
"panels": [
{
"collapsed": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -61,10 +67,23 @@
},
"id": 92,
"panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "refId": "A"
+ }
+ ],
"title": "BLS worker pool",
"type": "row"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"gridPos": {
"h": 3,
"w": 24,
@@ -73,12 +92,21 @@
},
"id": 154,
"options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
"content": "Verifies signature sets in a thread pool of workers. Must ensure that signatures are verified fast and efficiently.",
"mode": "markdown"
},
- "pluginVersion": "8.4.2",
+ "pluginVersion": "10.1.1",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "rate(lodestar_bls_thread_pool_time_seconds_sum[$rate_interval])",
"interval": "",
"legendFormat": "{{workerId}}",
@@ -89,6 +117,10 @@
"type": "text"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Utilization rate = total CPU time per worker per second. Graph is stacked. This ratios should be high since BLS verification is the limiting factor in the node's throughput.",
"fieldConfig": {
"defaults": {
@@ -96,6 +128,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -107,6 +141,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -139,7 +174,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "multi",
@@ -149,6 +185,10 @@
"pluginVersion": "8.4.0-beta1",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "rate(lodestar_bls_thread_pool_time_seconds_sum[$rate_interval])",
"interval": "",
"legendFormat": "{{workerId}}",
@@ -159,12 +199,18 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -176,6 +222,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -207,7 +254,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "single",
@@ -231,6 +279,10 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Average sync time to validate a single signature set. Note that the set may have been verified in batch. In most normal hardware this value should be ~1-2ms",
"fieldConfig": {
"defaults": {
@@ -238,6 +290,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -250,6 +304,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -282,8 +337,9 @@
"graph": {},
"legend": {
"calcs": [],
- "displayMode": "hidden",
- "placement": "bottom"
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
},
"tooltip": {
"mode": "single",
@@ -296,6 +352,10 @@
"pluginVersion": "7.4.5",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "sum(rate(lodestar_bls_thread_pool_time_seconds_sum[$rate_interval]))/sum(rate(lodestar_bls_thread_pool_success_jobs_signature_sets_count[$rate_interval]))",
"interval": "",
"legendFormat": "pool",
@@ -306,6 +366,10 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Raw throughput of the thread pool. How many individual signature sets are successfully validated per second",
"fieldConfig": {
"defaults": {
@@ -313,6 +377,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -325,6 +391,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -357,8 +424,9 @@
"graph": {},
"legend": {
"calcs": [],
- "displayMode": "hidden",
- "placement": "bottom"
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
},
"tooltip": {
"mode": "single",
@@ -371,6 +439,10 @@
"pluginVersion": "7.4.5",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "rate(lodestar_bls_thread_pool_success_jobs_signature_sets_count[$rate_interval])",
"interval": "",
"legendFormat": "pool",
@@ -381,6 +453,10 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Total length of the job queue. Note: this queue is not bounded",
"fieldConfig": {
"defaults": {
@@ -388,6 +464,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -400,6 +478,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -432,8 +511,9 @@
"graph": {},
"legend": {
"calcs": [],
- "displayMode": "hidden",
- "placement": "bottom"
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
},
"tooltip": {
"mode": "single",
@@ -446,6 +526,10 @@
"pluginVersion": "7.4.5",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "lodestar_bls_thread_pool_queue_length",
"interval": "",
"legendFormat": "pool",
@@ -456,6 +540,10 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "How much async time job spent waiting in the job queue before being picked up. This number should be really low <100ms to ensure signatures are validated fast.",
"fieldConfig": {
"defaults": {
@@ -463,6 +551,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -475,6 +565,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -507,8 +598,9 @@
"graph": {},
"legend": {
"calcs": [],
- "displayMode": "hidden",
- "placement": "bottom"
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
},
"tooltip": {
"mode": "single",
@@ -521,6 +613,10 @@
"pluginVersion": "7.4.5",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "rate(lodestar_bls_thread_pool_queue_job_wait_time_seconds_sum[$rate_interval])/rate(lodestar_bls_thread_pool_queue_job_wait_time_seconds_count[$rate_interval])",
"interval": "",
"legendFormat": "pool",
@@ -531,6 +627,10 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Async time from sending a message to the worker and the worker receiving it.",
"fieldConfig": {
"defaults": {
@@ -538,6 +638,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -550,6 +652,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -584,7 +687,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "multi",
@@ -621,6 +725,10 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "What percentage of total signature sets were verified in batch, which is an optimization to reduce verification costs by x2. For a synced node this should be ~100%",
"fieldConfig": {
"defaults": {
@@ -628,6 +736,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -640,6 +750,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -672,8 +783,9 @@
"graph": {},
"legend": {
"calcs": [],
- "displayMode": "hidden",
- "placement": "bottom"
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
},
"tooltip": {
"mode": "single",
@@ -686,6 +798,10 @@
"pluginVersion": "7.4.5",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "rate(lodestar_bls_thread_pool_batch_sigs_success_total[$rate_interval])/rate(lodestar_bls_thread_pool_success_jobs_signature_sets_count[$rate_interval])",
"interval": "",
"legendFormat": "pool",
@@ -696,6 +812,10 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Average signatures per set. This number is decided by the time of object submitted to the pool:\n- Sync blocks: 128\n- Aggregates: 3\n- Attestations: 1",
"fieldConfig": {
"defaults": {
@@ -703,6 +823,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -715,6 +837,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -747,8 +870,9 @@
"graph": {},
"legend": {
"calcs": [],
- "displayMode": "hidden",
- "placement": "bottom"
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
},
"tooltip": {
"mode": "multi",
@@ -758,6 +882,10 @@
"pluginVersion": "7.4.5",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "rate(lodestar_bls_thread_pool_sig_sets_started_total[$rate_interval])/(rate(lodestar_bls_thread_pool_jobs_started_total[$rate_interval])>0)",
"interval": "",
"legendFormat": "pool",
@@ -768,6 +896,10 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "How many individual signature sets are invalid vs (valid + invalid). We don't control this number since peers may send us invalid signatures. This number should be very low since we should ban bad peers. If it's too high the batch optimization may not be worth it.",
"fieldConfig": {
"defaults": {
@@ -775,6 +907,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -786,6 +920,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -834,7 +969,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "multi",
@@ -871,6 +1007,10 @@
"type": "timeseries"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Average sets per job. A set may contain +1 signatures. This number should be higher than 1 to reduce communication costs",
"fieldConfig": {
"defaults": {
@@ -878,6 +1018,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -890,6 +1032,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -922,8 +1065,9 @@
"graph": {},
"legend": {
"calcs": [],
- "displayMode": "hidden",
- "placement": "bottom"
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
},
"tooltip": {
"mode": "multi",
@@ -933,6 +1077,10 @@
"pluginVersion": "7.4.5",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "rate(lodestar_bls_thread_pool_jobs_started_total[$rate_interval])/rate(lodestar_bls_thread_pool_job_groups_started_total[$rate_interval])",
"interval": "",
"legendFormat": "pool",
@@ -941,10 +1089,105 @@
],
"title": "BLS worker pool - sets per job",
"type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 42
+ },
+ "id": 520,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "rate(lodestar_bls_thread_pool_signature_deserialization_main_thread_time_seconds_sum[$rate_interval]) * 384",
+ "instant": false,
+ "legendFormat": "signature_deserialization",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "rate(lodestar_bls_thread_pool_pubkeys_aggregation_main_thread_time_seconds_sum[$rate_interval]) * 384",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "pubkey_aggregation",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "BLS jobItemWorkReq cpu time per epoch",
+ "type": "timeseries"
}
],
"refresh": "10s",
- "schemaVersion": 35,
+ "schemaVersion": 38,
"style": "dark",
"tags": [
"lodestar"
diff --git a/docs/images/heap-dumps/devtools.png b/docs/images/heap-dumps/devtools.png
new file mode 100644
index 000000000000..9bdef24f7e20
Binary files /dev/null and b/docs/images/heap-dumps/devtools.png differ
diff --git a/docs/images/heap-dumps/load-profile.png b/docs/images/heap-dumps/load-profile.png
new file mode 100644
index 000000000000..c6e04d0922f4
Binary files /dev/null and b/docs/images/heap-dumps/load-profile.png differ
diff --git a/docs/images/heap-dumps/memory-tab.png b/docs/images/heap-dumps/memory-tab.png
new file mode 100644
index 000000000000..857309571971
Binary files /dev/null and b/docs/images/heap-dumps/memory-tab.png differ
diff --git a/docs/install/docker.md b/docs/install/docker.md
deleted file mode 100644
index 40468e7ad7aa..000000000000
--- a/docs/install/docker.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Install with Docker
-
-The [`chainsafe/lodestar`](https://hub.docker.com/r/chainsafe/lodestar) Docker Hub repository is maintained actively. It contains the `lodestar` CLI preinstalled.
-
-
-!!! info
- The Docker Hub image tagged as `chainsafe/lodestar:next` is run on CI every commit on our `unstable` branch.
- For `stable` releases, the image is tagged as `chainsafe/lodestar:latest`.
-
-
-Ensure you have Docker installed by issuing the command:
-
-```bash
-docker -v
-```
-
-It should return a non error message such as `Docker version xxxx, build xxxx`.
-
-Pull, run the image and Lodestar should now be ready to use
-
-```bash
-docker pull chainsafe/lodestar
-docker run chainsafe/lodestar --help
-```
-
-
-!!! info
- Docker is the recommended setup for Lodestar. Use our [Lodestar Quickstart scripts](https://github.com/ChainSafe/lodestar-quickstart) with Docker for detailed instructions.
-
diff --git a/docs/install/npm.md b/docs/install/npm.md
deleted file mode 100644
index 805141d01523..000000000000
--- a/docs/install/npm.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Install from NPM [not recommended]
-
-
-!!! danger
- For mainnet (production) usage, we only recommend installing with docker due to [NPM supply chain attacks](https://hackaday.com/2021/10/22/supply-chain-attack-npm-library-used-by-facebook-and-others-was-compromised/). Until a [safer installation method has been found](https://github.com/ChainSafe/lodestar/issues/3596), do not use this install method except for experimental purposes only.
-
diff --git a/docs/install/source.md b/docs/install/source.md
deleted file mode 100644
index 4fba0a625111..000000000000
--- a/docs/install/source.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# Install from source
-
-## Prerequisites
-
-Make sure to have [Yarn installed](https://classic.yarnpkg.com/en/docs/install). It is also recommended to [install NVM (Node Version Manager)](https://github.com/nvm-sh/nvm) and use the LTS version (currently v20) of [NodeJS](https://nodejs.org/en/).
-
-
-!!! info
- NodeJS versions older than the current LTS are not supported by Lodestar. We recommend running the latest Node LTS.
- It is important to make sure the NodeJS version is not changed after reboot by setting a default `nvm alias default && nvm use default`.
-
-!!! note
- Node Version Manager (NVM) will only install NodeJS for use with the active user. If you intend on setting up Lodestar to run under another user, we recommend using [NodeSource's source for NodeJS](https://github.com/nodesource/distributions/blob/master/README.md#installation-instructions) so you can install NodeJS globally.
-
-
-## Clone repository
-
-Clone the repository locally and build from the stable release branch.
-
-```bash
-git clone -b stable https://github.com/chainsafe/lodestar.git
-```
-
-Switch to created directory.
-
-```bash
-cd lodestar
-```
-
-## Install packages
-
-Install across all packages. Lodestar follows a [monorepo](https://github.com/lerna/lerna) structure, so all commands below must be run in the project root.
-
-```bash
-yarn install
-```
-
-## Build source code
-
-Build across all packages.
-
-```bash
-yarn run build
-```
-
-## Lodestar CLI
-
-Lodestar should now be ready for use.
-
-```bash
-./lodestar --help
-```
-
-See [Command Line Reference](./../reference/cli.md) for further information.
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
new file mode 100644
index 000000000000..270a01b311de
--- /dev/null
+++ b/docs/mkdocs.yml
@@ -0,0 +1,144 @@
+site_name: Lodestar Documentation
+site_description: Lodestar Documentation - Typescript Ethereum Consensus client
+site_url: https://chainsafe.github.io/lodestar
+
+repo_name: chainsafe/lodestar
+repo_url: https://github.com/chainsafe/lodestar
+
+docs_dir: pages
+
+# Configuration
+theme:
+ name: material
+ logo: assets/lodestar_icon_300.png
+ favicon: assets/round-icon.ico
+ nav_style: dark
+ palette:
+ - scheme: preference
+ media: "(prefers-color-scheme: light)"
+ primary: black
+ accent: deep purple
+ toggle:
+ icon: material/weather-night
+ name: Switch to dark mode
+ - scheme: slate
+ media: "(prefers-color-scheme: dark)"
+ primary: black
+ accent: deep purple
+ toggle:
+ icon: material/weather-sunny
+ name: Switch to light mode
+
+plugins:
+ - search
+ - mermaid2:
+ version: 8.6.4
+ arguments:
+ theme: |
+ ^(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches) ? 'dark' : 'light'
+
+markdown_extensions:
+ - meta
+ - codehilite:
+ guess_lang: false
+ - admonition
+ - toc:
+ permalink: true
+ - pymdownx.superfences:
+ # make exceptions to highlighting of code (for mermaid):
+ custom_fences:
+ - name: mermaid
+ class: mermaid
+ format: !!python/name:mermaid2.fence_mermaid
+ - pymdownx.emoji:
+ emoji_index: !!python/name:material.extensions.emoji.twemoji
+ emoji_generator: !!python/name:material.extensions.emoji.to_svg
+
+extra_css:
+ - stylesheets/extras.css
+
+# Socials
+extra:
+ social:
+ - icon: fontawesome/brands/github-alt
+ link: https://github.com/ChainSafe/lodestar
+ - icon: fontawesome/brands/twitter
+ link: https://twitter.com/lodestar_eth
+ - icon: fontawesome/brands/discord
+ link: https://discord.gg/yjyvFRP
+ - icon: fontawesome/brands/medium
+ link: https://blog.chainsafe.io
+
+# Customize left navigation menu
+nav:
+ - Home: index.md
+ - Introduction: introduction.md
+ - Security: security.md
+ - Getting Started:
+ - Quick Start: getting-started/quick-start.md
+ - Installation: getting-started/installation.md
+ # - Creating a JWT: getting-started/creating-a-jwt.md
+ - Starting a Node: getting-started/starting-a-node.md
+ - Data Retention: data-retention.md
+ - Beacon Node:
+ - Configuration: beacon-management/beacon-cli.md
+ - Networking: beacon-management/networking.md
+ - MEV and Builder Integration: beacon-management/mev-and-builder-integration.md
+ - Syncing: beacon-management/syncing.md
+ - Validator:
+ - Configuration: validator-management/validator-cli.md
+ # - Key Management: validator-management/key-management.md
+ # - Withdrawals: validator-management/withdrawals.md
+ # - Multiple and Fall-Back Validation: validator-management/multiple-and-fallback-validation.md
+ - Bootnode:
+ - Configuration: bootnode/bootnode-cli.md
+ - Light Client and Prover:
+ - Light Client: lightclient-prover/lightclient.md
+ - Light Client Configuration: lightclient-prover/lightclient-cli.md
+ - Prover: lightclient-prover/prover.md
+ # - Prover Configuration: lightclient-prover/prover-cli.md
+ - Logging and Metrics:
+ - Prometheus and Grafana: logging-and-metrics/prometheus-grafana.md
+ - Client Monitoring: logging-and-metrics/client-monitoring.md
+ # - Log Management: logging-and-metrics/log-management.md
+ # - Metrics Management: logging-and-metrics/metrics-management.md
+ # - Dashboards: logging-and-metrics/dashboards.md
+ # - Api:
+ # - Using the API: api/using-the-api.md
+ # - API Reference: api/api-reference.md // Auto-generate from API endpoint
+ # - Troubleshooting:
+ # - Installation Issues: troubleshooting/installation-issues.md
+ # - Syncing Issues: troubleshooting/syncing-issues.md
+ # - Validation Issues: troubleshooting/validation-issues.md
+ # - Execution Layer Issues: troubleshooting/execution-layer-issues.md
+ - Supporting Libraries: supporting-libraries/index.md
+ # - libp2p: supporting-libraries/libp2p.md
+ # - "@chainsafe/ssz": supporting-libraries/ssz.md
+ # - "@chainsafe/blst": supporting-libraries/blst.md
+ # - "@chainsafe/libp2p-gossipsub": supporting-libraries/gossipsub.md
+ - Contributing:
+ - Getting Started: contribution/getting-started.md
+ # - Bug Reports: contribution/bug-reports.md
+ - Dependency Graph: contribution/depgraph.md
+ # - Repo: contribution/repo.md
+ - Testing:
+ - Overview: contribution/testing/index.md
+ # - Unit Tests: contribution/testing/unit-tests.md
+ # - Integration Tests: contribution/testing/integration-tests.md
+ # - E2E Tests: contribution/testing/e2e-tests.md
+ - Simulation Tests: contribution/testing/simulation-tests.md
+ # - Spec Tests: contribution/testing/spec-tests.md
+ # - Performance Tests: contribution/testing/performance-tests.md
+ # - PR Submission: contribution/pr-submission.md
+ - Tools:
+ # - Debugging: tools/debugging.md
+ # - perf: tools/perf.md
+ - Flame Graphs: tools/flamegraphs.md
+ - Heap Dumps: tools/heap-dumps.md
+ - Core Dumps: tools/core-dumps.md
+ - Advanced Topics:
+ # - Migrating from Other Clients: advanced-topics/migrating-from-other-clients.md
+ # - Block Exploration: advanced-topics/block-exploration.md
+ # - Slashing Protection: advanced-topics/slashing-protection.md
+ - Setting Up a Testnet: advanced-topics/setting-up-a-testnet.md
+ # - Doppelganger Detection: advanced-topics/doppelganger-detection.md
\ No newline at end of file
diff --git a/docs/pages/advanced-topics/block-exploration.md b/docs/pages/advanced-topics/block-exploration.md
new file mode 100644
index 000000000000..05ee657bb607
--- /dev/null
+++ b/docs/pages/advanced-topics/block-exploration.md
@@ -0,0 +1 @@
+# Block Exploration
diff --git a/docs/pages/advanced-topics/doppelganger-detection.md b/docs/pages/advanced-topics/doppelganger-detection.md
new file mode 100644
index 000000000000..165590bda55a
--- /dev/null
+++ b/docs/pages/advanced-topics/doppelganger-detection.md
@@ -0,0 +1 @@
+# Doppelganger Detection
diff --git a/docs/pages/advanced-topics/migrating-from-other-clients.md b/docs/pages/advanced-topics/migrating-from-other-clients.md
new file mode 100644
index 000000000000..302314a27b23
--- /dev/null
+++ b/docs/pages/advanced-topics/migrating-from-other-clients.md
@@ -0,0 +1 @@
+# Migration From Other Clients
diff --git a/docs/usage/local.md b/docs/pages/advanced-topics/setting-up-a-testnet.md
similarity index 99%
rename from docs/usage/local.md
rename to docs/pages/advanced-topics/setting-up-a-testnet.md
index 51465d68c92b..a6350b3a03de 100644
--- a/docs/usage/local.md
+++ b/docs/pages/advanced-topics/setting-up-a-testnet.md
@@ -1,4 +1,4 @@
-# Local testnet
+# Setting-Up a Testnet
To quickly test and run Lodestar we recommend starting a local testnet. We recommend a simple configuration of two beacon nodes with multiple validators
diff --git a/docs/pages/advanced-topics/slashing-protection.md b/docs/pages/advanced-topics/slashing-protection.md
new file mode 100644
index 000000000000..527cbb06040a
--- /dev/null
+++ b/docs/pages/advanced-topics/slashing-protection.md
@@ -0,0 +1 @@
+# Slashing Protection
diff --git a/docs/pages/api/using-the-api.md b/docs/pages/api/using-the-api.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/usage/mev-integration.md b/docs/pages/beacon-management/mev-and-builder-integration.md
similarity index 97%
rename from docs/usage/mev-integration.md
rename to docs/pages/beacon-management/mev-and-builder-integration.md
index c2f2529edbe6..c2f9db9b6846 100644
--- a/docs/usage/mev-integration.md
+++ b/docs/pages/beacon-management/mev-and-builder-integration.md
@@ -20,7 +20,7 @@ All you have to do is:
1. Provide lodestar beacon node with a Builder endpoint (which corresponds to the network you are running) via these additional flags:
```shell
- --builder --builder.urls
+ --builder --builder.url
```
2. Run lodestar validator client with these additional flags
```shell
diff --git a/docs/pages/beacon-management/networking.md b/docs/pages/beacon-management/networking.md
new file mode 100644
index 000000000000..993b1cdfda26
--- /dev/null
+++ b/docs/pages/beacon-management/networking.md
@@ -0,0 +1,91 @@
+# Networking
+
+Starting up Lodestar will automatically connect it to peers on the network. Peers are found through the discv5 protocol and once peers are established communications happen via gossipsub over libp2p. While not necessary, having a basic understanding of how the various protocols and transport work will help with debugging and troubleshooting as some of the more common challenges come up with [firewalls](#firewall-management) and [NAT traversal](#nat-traversal).
+
+## Networking Flags
+
+Some of the important Lodestar flags related to networking are:
+
+- [`--discv5`](./beacon-cli.md#-discv5)
+- [`--listenAddress`](./beacon-cli.md#-listenaddress)
+- [`--port`](./beacon-cli.md#-port)
+- [`--discoveryPort`](./beacon-cli.md#-discoveryport)
+- [`--listenAddress6`](./beacon-cli.md#-listenaddress6)
+- [`--port6`](./beacon-cli.md#-port6)
+- [`--discoveryPort6`](./beacon-cli.md#-discoveryport6)
+- [`--bootnodes`](./beacon-cli.md#-bootnodes)
+- [`--deterministicLongLivedAttnets`](./beacon-cli.md#-deterministiclonglivedattnets)
+- [`--subscribeAllSubnets`](./beacon-cli.md#-subscribeallsubnets)
+- [`--disablePeerScoring`](./beacon-cli.md#-disablepeerscoring)
+- [`--enr.ip`](./beacon-cli.md#-enrip)
+- [`--enr.tcp`](./beacon-cli.md#-enrtcp)
+- [`--enr.udp`](./beacon-cli.md#-enrudp)
+- [`--enr.ip6`](./beacon-cli.md#-enrip6)
+- [`--enr.tcp6`](./beacon-cli.md#-enrtcp6)
+- [`--enr.udp6`](./beacon-cli.md#-enrudp6)
+- [`--nat`](./beacon-cli.md#-nat)
+- [`--private`](./beacon-cli.md#`-private`)
+
+## Peer Discovery (Discv5)
+
+In Ethereum, discv5 plays a pivotal role in the peer discovery process, facilitating nodes to find and locate each other in order to form the peer-to-peer network. The process begins with an interaction between new nodes and bootnodes at start-up. Bootnodes are nodes with hard-coded addresses, or can be overridden via the cli flag [`--bootnodes`](./beacon-cli.md#-bootnodes), to bootstrap the discovery process. Through a method called FINDNODE-NODES, a new node establishes a bond with each bootnode, and it returns a list of peers for the new node to connect to. Following this trail, the new node engages through FINDNODE-NODES with the provided peers to further establish a web of connections.
+
+Discv5 operates as a peer advertisement medium in this network, where nodes can act as both providers and consumers of data. Every participating node in the Discv5 protocol discovers peer data from other nodes and later relays it, making the discovery process dynamic and efficient.
+
+Discv5 is designed to be a standalone protocol running via UDP on a dedicated port solely for peer discovery. Peer data is exchanged via self-certified, flexible peer records (ENRs). These key features cater to the Ethereum network and being a good peer often means running a discv5 worker. Lodestar offers simple configuration to setup and run a bootnode independently of a beacon node. See the [bootnode cli](../bootnode/bootnode-cli.md) page for more information and configuration options.
+
+## ENR
+
+Ethereum Node Records (ENRs) are a standardized format utilized for peer discovery - see [EIP-778](https://eips.ethereum.org/EIPS/eip-778) for the specification. An ENR consists of a set of key-value pairs. These pairs include crucial information such as the node's ID, IP address, the port on which it's listening, and the protocols it supports. This information helps other nodes in the network locate and connect to the node.
+
+The primary purpose of ENRs is to facilitate node discovery and connectivity in the Ethereum network. Nodes use ENRs to announce their presence and capabilities to other nodes, making it easier to establish and maintain a robust, interconnected network.
+
+Note that bootnodes are announced via ENR.
+
+## Peer Communication (gossipsub and ReqResp)
+
+Gossipsub and ReqResp are the two mechanisms that beacon nodes use to exchange chain data. Gossipsub is used disseminate the most recent relevant data proactively throughout the network. ReqResp is used to directly ask specific peers for specific information (eg: during syncing).
+
+### Gossipsub
+
+GossipSub is a foundational protocol in peer-to-peer (P2P) communication, particularly decentralized networks like Ethereum and IPFS. At its core, GossipSub efficiently propagates data, filtered by topic, through a P2P network. It organizes peers into a collection of overlay networks, each associated with a distinct topic. By routing data through relevant overlay networks based on topics of interest, large amounts of data can be efficiently disseminated without excessive bandwidth, latency, etc.
+
+In GossipSub, nodes can subscribe to topics, effectively joining the corresponding overlay to receive messages published to a specific topic. This topic-based structure enables nodes to congregate around shared interests, ensuring that relevant messages are delivered to all interested parties. Each message published to a topic gets disseminated and relayed to all subscribed peers, similar to a chat room.
+
+Messages are propagated through a blend of eager-push and lazy-pull models. Specifically, the protocol employs "mesh links" to carry full messages actively and "gossip links" to carry only message identifiers (lazy-pull propagation model). This hybrid approach allows for both active message propagation and reactive message retrieval which is an extension of the traditional hub-and-spoke pub/sub model.
+
+### ReqResp
+
+ReqResp is the domain of protocols that establish a flexible, on-demand mechanism to retrieve historical data and data missed by gossip. This family of methods, implemented as separate libp2p protocols, operate between a single requester and responder. A method is initiated via a libp2p protocol ID, with the initiator sending a request message and the responder sending a response message. Every method defines a specific request and response message type, and a specific protocol ID. This framework also facilitates streaming responses and robust error handling.
+
+## Data Transport (libp2p)
+
+Libp2p is a modular and extensible network stack that serves as the data transport layer below both gossipsub and ReqResp and facilitates the lower-level peer-to-peer communications. It provides a suite of protocols for various networking functionalities including network transports, connection encryption and protocol multiplexing. Its modular design allows for the easy addition, replacement, or upgrading of protocols, ensuring an adaptable and evolving networking stack.
+
+Libp2p operates at the lower levels of the OSI model, particularly at the Transport and Network layers. Libp2p supports both TCP and UDP protocols for establishing connections and data transmission. Combined with libp2p's modular design it can integrate with various networking technologies to facilitating both routing and addressing.
+
+## Firewall Management
+
+If your setup is behind a firewall there are a few ports that will need to be opened to allow for P2P discovery and communication. There are also some ports that need to be protected to prevent unwanted access or DDOS attacks on your node.
+
+Ports that should be opened:
+
+- 30303/TCP+UDP - Execution layer p2p communication port
+- 9000/TCP+UDP - Beacon Node P2P communication port
+- 9090/TCP - Lodestar IPv6 P2P communication port
+- 13000/TCP - Prysm P2P communication port
+- 12000/UDP - Prysm P2P communication port
+
+Ports that should be inbound protected:
+
+- 9596/TCP - Lodestar Beacon-Node JSON RPC api calls
+- 5062/TCP - Lodestar validator key manager api calls
+- 18550/TCP - Lodestar MEV Boost/Builder port
+- 8008/TCP - Lodestar Metrics
+- 5064/TCP - Validator Metrics
+- 8545/TCP - Execution client JSON RPC port api calls
+- 8551/TCP - Execution engine port for Lodestar to communicate with the execution client
+
+## NAT Traversal
+
+Lodestar does not support UPnP. If you are behind a NAT you will need to manually forward the ports listed above.
diff --git a/docs/pages/beacon-management/syncing.md b/docs/pages/beacon-management/syncing.md
new file mode 100644
index 000000000000..40b5b4ba96b5
--- /dev/null
+++ b/docs/pages/beacon-management/syncing.md
@@ -0,0 +1,42 @@
+# Syncing
+
+Syncing an Ethereum node involves obtaining a copy of the blockchain data from other peers in the network to reach a consistent state. This process is crucial for new nodes or nodes that have been offline and need to catch up with the network's current state. Syncing can be performed for both the execution layer and the beacon chain, although the focus here will be primarily on the beacon chain.
+
+Lodestar allows for several methods of syncing however the recommended method is `checkpoint sync` as it is the fastest and least resource intensive. It is generally a good idea to sync via a [`--checkpointSyncUrl`](./beacon-cli.md#-checkpointsyncurl). If starting at a specific point is necessary specify the [`--checkpointState`](./beacon-cli.md#-checkpointstate) that should be where the sync begins.
+
+## Weak Subjectivity
+
+Weak subjectivity is a concept specific to Proof of Stake (PoS) systems, addressing how new nodes can safely join the network and synchronize with the correct blockchain history. Unlike in Proof of Work (PoW) systems, where a node can trust the longest chain due to the significant computational effort required to forge it, PoS systems present different challenges. In PoS, the cost of creating or altering blockchain history is lower, as it is not based on computational work but on the stake held by validators. This difference raises the possibility that an attacker, if possessing sufficient stake, could feasibly create a misleading version of the blockchain history.
+
+The concept of weak subjectivity becomes particularly crucial in two scenarios: when new nodes join the network and when existing nodes reconnect after a significant period of being offline. During these times, the 'weak subjectivity period' defines a time frame within which a client, upon rejoining, can reliably process blocks to reach the consensus chain head. This approach is essential for mitigating the risks associated with long-range attacks, which could occur if nodes relied solely on the longest chain principle without any initial trust in a specific network state.
+
+To counter these risks, weak subjectivity requires new nodes to obtain a recent, trusted state of the blockchain from a reliable source upon joining the network. This state includes vital information about the current set of validators and their stakes. Starting from this trusted state helps new nodes avoid being misled by false histories, as any attempt to rewrite history beyond this point would require an unrealistically large portion of the total stake.
+
+## Syncing Methods
+
+### Checkpoint Sync
+
+Checkpoint sync, also known as state sync, allows a node to sync to a specific state checkpoint without having to process all historical data leading up to that point. In the context of a beacon node, this involves syncing to a recent finalized checkpoint, allowing the node to quickly join the network and participate in consensus activities. This is especially beneficial for new nodes or nodes that have been offline for a considerable duration.
+
+### Historical Sync
+
+Historical sync involves processing all blocks from the genesis block or from a specified starting point to the current block. This is the most comprehensive sync method but also the most resource and time-intensive. For beacon nodes, historical sync is crucial for nodes that aim to maintain a complete history of the beacon chain, facilitating a deeper understanding and analysis of the network's history. In the execution layer, it ensures a complete historical record of the execution layer data.
+
+### Range Sync
+
+Range sync involves syncing blocks within a specified range, beneficial when a node is only temporarily offline and needs to catch up over a short range. In the beacon node context, this entails requesting and processing blocks within a defined range, ensuring the node quickly gets updated to the current network state.
+
+### Backfill Sync
+
+This is another version of checkpoint sync that allows a node that has not been historically synchronized to verify data prior to the checkpoint. It is done via downloading a checkpoint and then fetch blocks backwards from that point until the desired data can be verified. It is a relatively inexpensive sync from a cpu perspective because it only checks the block hashes and verifies the proposer signatures along the way.
+
+## Syncing Lodestar
+
+The implementation of the different syncing styles in Lodestar are actually one of two types under the hood, range sync and unknown-parent sync. Range sync is used when the start point of syncing is known. In the case of historical and checkpoint sync the starting points are well defined, genesis and the last finalized epoch boundary. Snapshot sync is not supported by Lodestar. If the starting point for sync is not known Lodestar must first determine where the starting point is. While the discussion about how that happens is out of scope for this document, the gist is that the beacon node will listen to gossipsub for blocks being broadcast on the network. It will also request [`MetaData`](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#getmetadata) from its peers and use that to start requesting the correct blocks from the network.
+
+There are several flags that can be used to configure the sync process.
+
+- [`--checkpointSyncUrl`](./beacon-cli.md#-checkpointsyncurl)
+- [`--checkpointState`](./beacon-cli.md#-checkpointstate)
+- [`--wssCheckpoint`](./beacon-cli.md#-wsscheckpoint)
+- [`--forceCheckpointSync`](./beacon-cli.md#-forcecheckpointsync)
diff --git a/docs/pages/contribution/bug-reports.md b/docs/pages/contribution/bug-reports.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/design/depgraph.md b/docs/pages/contribution/depgraph.md
similarity index 100%
rename from docs/design/depgraph.md
rename to docs/pages/contribution/depgraph.md
diff --git a/docs/pages/contribution/pr-submission.md b/docs/pages/contribution/pr-submission.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/pages/contribution/repo.md b/docs/pages/contribution/repo.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/pages/contribution/testing/end-to-end-tests.md b/docs/pages/contribution/testing/end-to-end-tests.md
new file mode 100644
index 000000000000..3f405128c7cb
--- /dev/null
+++ b/docs/pages/contribution/testing/end-to-end-tests.md
@@ -0,0 +1,3 @@
+# End-To-End Tests
+
+Check back soon for more information!! We are in the process of updating our docs.
diff --git a/docs/pages/contribution/testing/index.md b/docs/pages/contribution/testing/index.md
new file mode 100644
index 000000000000..9de62895323c
--- /dev/null
+++ b/docs/pages/contribution/testing/index.md
@@ -0,0 +1,27 @@
+# Testing
+
+Testing is critical to the Lodestar project and there are many types of tests that are run to build a product that is both effective AND efficient. This page will help to break down the different types of tests you will find in the Lodestar repo.
+
+### Unit Tests
+
+This is the most fundamental type of test in most code bases. In all instances mocks, stubs and other forms of isolation are used to test code on a functional, unit level. See the [Unit Tests](./unit-tests.md) page for more information.
+
+### Spec Tests
+
+The Ethereum Consensus Specifications are what ensure that the various consensus clients do not diverge on critical computations and will work harmoniously on the network. See the [Spec Tests](./spec-tests.md) page for more information.
+
+### Performance Tests
+
+Node.js is an unforgiving virtual machine when it comes to high performance, multi-threaded applications. In order to ensure that Lodestar can not only keep up with the chain, but to push the boundary of what is possible, there are lots of performance tests that benchmark programming paradigms and prevent regression. See the [Performance Testing](./performance-tests.md) page for more information.
+
+### End-To-End Tests
+
+E2E test are where Lodestar is run in its full form, often from the CLI as a user would to check that the system as a whole works as expected. These tests are meant to exercise the entire system in isolation and there is no network interaction, nor interaction with any other code outside of Lodestar. See the [End-To-End Testing](./end-to-end-tests.md) page for more information.
+
+### Integration Tests
+
+Integration tests are meant to test how Lodestar interacts with other clients, but are not considered full simulations. This is where Lodestar may make API calls or otherwise work across the process boundary, but there is required mocking, stubbing, or class isolation. An example of this is using the `ExecutionEngine` class to make API calls to a Geth instance to check that the http requests are properly formatted.
+
+### Simulation Tests
+
+These are the most comprehensive types of tests. They aim to test Lodestar in a fully functioning ephemeral devnet environment. See the [Simulation Testing](./simulation-tests.md) page for more information.
diff --git a/docs/pages/contribution/testing/integration-tests.md b/docs/pages/contribution/testing/integration-tests.md
new file mode 100644
index 000000000000..b45110033460
--- /dev/null
+++ b/docs/pages/contribution/testing/integration-tests.md
@@ -0,0 +1,27 @@
+# Integration Tests
+
+The following tests are found in `packages/beacon-node`
+
+#### `test:sim:withdrawals`
+
+This test simulates capella blocks with withdrawals. It tests lodestar against Geth and EthereumJS.
+
+There are two ENV variables that are required to run this test:
+
+- `EL_BINARY_DIR`: the docker image setup to handle the test case
+- `EL_SCRIPT_DIR`: the script that will be used to start the EL client. All of the scripts can be found in `packages/beacon-node/test/scripts/el-interop` and the `EL_SCRIPT_DIR` is the sub-directory name in that root that should be used to run the test.
+
+The command to run this test is:
+
+`EL_BINARY_DIR=g11tech/geth:withdrawals EL_SCRIPT_DIR=gethdocker yarn mocha test/sim/withdrawal-interop.test.ts`
+
+The images used by this test during CI are:
+
+- `GETH_WITHDRAWALS_IMAGE: g11tech/geth:withdrawalsfeb8`
+- `ETHEREUMJS_WITHDRAWALS_IMAGE: g11tech/ethereumjs:blobs-b6b63`
+
+#### `test:sim:merge-interop`
+
+#### `test:sim:mergemock`
+
+#### `yarn test:sim:blobs`
diff --git a/docs/pages/contribution/testing/performance-tests.md b/docs/pages/contribution/testing/performance-tests.md
new file mode 100644
index 000000000000..6e2d9c86319b
--- /dev/null
+++ b/docs/pages/contribution/testing/performance-tests.md
@@ -0,0 +1,3 @@
+# Performance Tests
+
+Check back soon for more information!! We are in the process of updating our docs.
diff --git a/docs/pages/contribution/testing/simulation-tests.md b/docs/pages/contribution/testing/simulation-tests.md
new file mode 100644
index 000000000000..c1059e5c4177
--- /dev/null
+++ b/docs/pages/contribution/testing/simulation-tests.md
@@ -0,0 +1,141 @@
+# Simulation Tests
+
+"Sim" testing for Lodestar is the most comprehensive, and complex, testing that is run. The goal is to fully simulate a testnet and to actuate the code in a way that closely mimics what will happen when turning on Lodestar in the wild. This is a very complex task and requires a lot of moving parts to work together. The following sections will describe the various components and how they work together.
+
+At a very high level, simulation testing will setup a testnet from genesis and let proceed through "normal" execution exactly as the nodes would under production circumstances. To get feedback there are regular checks along the way to asses how the testnet nodes are working. These "assertions" can be added and removed at will to allow developers to check for specific conditions in a tightly controlled, reproducible, environment to get high quality and actionable feedback on how Lodestar performs. The end goal of these tests is to to run a full Lodestar client in an environment that is as close to what an end user would experience.
+
+These tests usually setup full testnets with multiple consensus clients and their paired execution node. In many instance we are looking to just exercise the Lodestar code but there are some places where there is also testing to see how Lodestar works in relation to the other consensus clients, like Lighthouse. As you can imagine, there is quite a bit of machinery that is responsible for setting up and managing the simulations and assertions. This section will help to go over those bits and pieces. Many, but not all, of these classes can be found in `packages/cli/test/utils/simulation`.
+
+## Running Sim Tests
+
+There are a number of sim tests that are available and each has a slightly different purpose. All are run by CI and must pass for a PR to be valid for merging. Most tests require a couple of environment variables to be set.
+
+### Environment Variables
+
+To see what typical values for these are check out the `test-sim.yaml` workflow file in the `.github/workflows` directory.
+
+- `GETH_DOCKER_IMAGE`: The geth docker image that will be used
+- `NETHERMIND_IMAGE`: The nethermind docker image that will be used
+- `LIGHTHOUSE_IMAGE`: The lighthouse docker image that will be used
+
+### `test:sim:multifork`
+
+The multi-fork sim test checks most of the functionality Lodestar provides. Is verifies that Lodestar is capable of peering, moving through all of the forks and using various sync methods in a testnet environment. Lodestar is tested with both Geth and Nethermind as the execution client. It also checks a Lighthouse/Geth node for cross client compatibility.
+
+```sh
+GETH_DOCKER_IMAGE=ethereum/client-go:v1.11.6 \
+ LIGHTHOUSE_DOCKER_IMAGE=sigp/lighthouse:latest-amd64-modern-dev \
+ NETHERMIND_DOCKER_IMAGE=nethermind/nethermind:1.18.0 \
+ yarn workspace @chainsafe/lodestar test:sim:multifork
+```
+
+### `test:sim:endpoints`
+
+This tests that various endpoints of the beacon node and validator client are working as expected.
+
+```sh
+GETH_DOCKER_IMAGE=ethereum/client-go:v1.11.6 \
+ yarn workspace @chainsafe/lodestar test:sim:endpoints
+```
+
+### `test:sim:deneb`
+
+This test is still included in our CI but is no longer as important as it once was. Lodestar is often the first client to implement new features and this test was created before geth was upgraded with the features required to support the Deneb fork. To test that Lodestar was ready this test uses mocked geth instances. It is left as a placeholder for when the next fork comes along that requires a similar approach.
+
+### `test:sim:mixedcleint`
+
+Checks that Lodestar is compatible with other consensus validators and vice-versa. All tests use Geth as the EL.
+
+```sh
+GETH_DOCKER_IMAGE=ethereum/client-go:v1.11.6 \
+ LIGHTHOUSE_DOCKER_IMAGE=sigp/lighthouse:latest-amd64-modern-dev \
+ yarn workspace @chainsafe/lodestar test:sim:mixedclient
+```
+
+## Sim Test Infrastructure
+
+When setting up and running the simulations, interactions with the nodes is through the published node API's. All functionality is actuated via http request and by "plugging in" this way it is possible to run the nodes in a stand-alone fashion, as they would be run in production, but to still achieve a tightly monitored and controlled environment. If code needs to be executed on a "class by class" basis or with mocking involved then the test is not a simulation test and would fall into one of the other testing categories. See the [Testing Overview](./index.md) page for more information on the other types of tests available for Lodestar.
+
+### Simulation Environment
+
+The simulation environment has many pieces and those are orchestrated by the `SimulationEnvironment` class. The testnet nodes will be run as a mixture of Docker containers and bare metal code execution via Node.js. In order to monitor the various clients there is a `SimulationTracker` that's primary function is to `register` assertions that will track and gauge how the nodes are doing during the simulation. See the section on [Simulation Assertions](#simulation-assertions) below for more information on them. There is an `EpochClock` that has helper functions related to timing of slots and epochs and there is also a `Runner` that will help to start/stop the various Docker container and spawn the Node.js child processes as necessary.
+
+The `SimulationEnvironment` is the orchestrator for all the various functions to great the test net and start it from genesis. It is also how the various forks are configured to exercise code through various fork transitions.
+
+### Simulation Assertions
+
+These are the secret sauce for making the simulation tests meaningful. There are several predefined assertions that can be added to a simulation tracker and one can also create custom assertions and add them to the environment. Assertions can be added per slot, per epoch, per fork or per node. They can even be added to check conditions across nodes.
+
+Assertions are added to the `SimulationTracker` with the `register` method and the tracker follows the environment to make sure that assertions are run at the appropriate times, and on the correct targets.
+
+Assertions are implemented via API calls to the various targets and meta from the API calls is stored and used to assert that the desired conditions were met. Any information that can be retrieved via API call can be added to the assertion `stores` for validation, and validations can be asserted at a specific time or on an interval.
+
+There are a number of assertions that are added to simulations by default. They are:
+
+- `inclusionDelayAssertion`
+- `attestationsCountAssertion`
+- `attestationParticipationAssertion`
+- `connectedPeerCountAssertion`
+- `finalizedAssertion`
+- `headAssertion`
+- `missedBlocksAssertion`
+- `syncCommitteeParticipationAssertion`
+
+Because of the flexibility, and complexity, there is a section specifically for how to create custom assertions below. See [custom assertions](#custom-assertions) for more info.
+
+### Custom Assertions
+
+Check back soon for more information on how to create custom assertions.
+
+### Simulation Reports
+
+Sim tests that are run using the simulation framework output a table of information to the console. The table summarizes the state of all of the nodes and the network at each slot.
+
+Here is an example of the table and how to interpret it:
+
+```sh
+┼─────────────────────────────────────────────────────────────────────────────────────────────────┼
+│ fork │ eph │ slot │ head │ finzed │ peers │ attCount │ incDelay │ errors │
+┼─────────────────────────────────────────────────────────────────────────────────────────────────┼
+│ capella │ 9/0 │ 72 │ 0x95c4.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │
+│ capella │ 9/1 │ 73 │ 0x9dfc.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │
+│ capella │ 9/2 │ 74 │ 0xdf3f.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │
+│ capella │ 9/3 │ 75 │ 0xbeae.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │
+│ capella │ 9/4 │ 76 │ 0x15fa.. │ 56 │ 3 │ 16 │ 1.00 │ 0 │
+│ capella │ 9/5 │ 77 │ 0xf8ff.. │ 56 │ 2,3,3,2 │ 16 │ 1.00 │ 0 │
+│ capella │ 9/6 │ 78 │ 0x8199.. │ 56 │ 2,3,3,2 │ 16 │ 1.20 │ 0 │
+│ capella │ 9/7 │ 79 │ different │ 56 │ 2,3,3,2 │ 16 │ 1.50 │ 2 │
+┼─────────────────────────────────────────────────────────────────────────────────────────────────┼
+│ Att Participation: H: 0.75, S: 1.00, T: 0.75 - SC Participation: 1.00 │
+┼─────────────────────────────────────────────────────────────────────────────────────────────────┼
+```
+
+#### Slot Information
+
+- `fork`: shows what fork is currently being tested
+- `eph`: During simulation tests the Lodestar repo is setup to use 8 slot per epoch so what is shown is the epoch number and the slot number within that epoch as `epoch/slot`
+- `slot`: The slot number that is currently being processed
+- `head`: If all clients have the the same head the first couple of bytes of the hash are shown. If all clients do not have the same head `different` is reported.
+- `finzed`: Shows the number of the last finalized slot
+- `peers`: The number of peers that each node is connected to. If all have the same number then only a single value is shown. If they do not have the same number of peers count for each node is reported in a comma-separated list
+- `attCount`: The number of attestations that the node has seen.
+- `incDelay`: The average number of slots inclusion delay was experienced for the attestations. Often attestations for the current head arrive more than one slot behind and this value tracks that
+- `errors`: The number of errors that were encountered during the slot
+
+#### Epoch Information
+
+- `H`: The percentage of nodes, at epoch transition, that voted for the head block
+- `S`: The percentage of nodes, at epoch transition, that voted for the source block
+- `T`: The percentage of nodes, at epoch transition, that voted for the target block
+- `SC Participation`: The sync committee participation rate
+
+### Simulation Logging
+
+The simulation environment will capture all of the logs from all nodes that are running. The logs can be found in the `packages/cli/test-logs` directory. The logs are named with the following convention:
+
+`-_.log`
+
+Some examples are:
+
+- `node-1-beacon_lodestar.log`: The is the first node in the simulation. It is the consensus layer. It is running the lodestar validator client.
+- `range-sync-execution_geth.log`: This is the node that was added to test pulling history in range sync mode. It was the execution layer and was running the geth execution client.
diff --git a/docs/pages/contribution/testing/spec-tests.md b/docs/pages/contribution/testing/spec-tests.md
new file mode 100644
index 000000000000..b7a65dafd072
--- /dev/null
+++ b/docs/pages/contribution/testing/spec-tests.md
@@ -0,0 +1,3 @@
+# Specification Tests
+
+Check back soon for more information!! We are in the process of updating our docs.
diff --git a/docs/pages/contribution/testing/unit-tests.md b/docs/pages/contribution/testing/unit-tests.md
new file mode 100644
index 000000000000..cbf4b4ae2264
--- /dev/null
+++ b/docs/pages/contribution/testing/unit-tests.md
@@ -0,0 +1,3 @@
+# Unit Tests
+
+Check back soon for more information!! We are in the process of updating our docs.
diff --git a/docs/pages/data-retention.md b/docs/pages/data-retention.md
new file mode 100644
index 000000000000..41daa8dc458d
--- /dev/null
+++ b/docs/pages/data-retention.md
@@ -0,0 +1,54 @@
+# Data Retention
+
+There are two components for an ethereum node database, the execution client and the beacon node. Both need to hold data for a full node to work correctly. In particular the execution node holds state such as wallet information and smart contract code. It also holds the execution blocks with the transaction record. The beacon node is responsible for holding beacon node blocks and state. The beacon state is responsible primarily for the validator information.
+
+There are several processes that need to store data for Lodestar. These data sets can grow quite large over time so it is important to understand how to manage them so the host machine can support operations effectively.
+
+```bash
+$executionDir # this changes depending on the execution client
+ └── execution-db
+
+$dataDir # specified by --dataDir on the beacon command
+├── .log_rotate_audit.json
+├── beacon.log # there can be many of these
+├── enr
+├── peer-id.json
+├── chain-db # default if --dbDir not specified
+│ └── (db files)
+└── peerstore # default if --peerStoreDir not specified
+ └── (peerstore files)
+
+$dataDir # specified by --dataDir on the validator command
+├── .log_rotate_audit.json
+├── validator.log # there can be many of these
+├── validator-db # default if --validatorsDbDir not specified
+│ └── (db files)
+├── proposerConfigs # default if --proposerDir not specified
+│ └── (config files)
+├── cache # default if --cacheDir not specified
+│ └── (cache files)
+├── secrets # default if --secretsDir not specified
+│ ├── 0x8e41b969493454318c27ec6fac90645769331c07ebc8db5037...
+│ └── 0xa329f988c16993768299643d918a2694892c012765d896a16f...
+├── keystores # default if --keystoresDir not specified
+│ ├── 0x8e41b969493454318c27ec6fac90645769331c07ebc8db5037...
+│ │ └── voting-keystore.json
+│ └── 0xa329f988c16993768299643d918a2694892c012765d896a16f...
+│ └── voting-keystore.json
+└── remoteKeys # default if --remoteKeysDir not specified
+ └── 0xa329f988c16993768299643d918a2694892c012765d896a16f.json
+```
+
+## Data Management
+
+Configuring your node to store and prune data is key to success. On average you can expect for the database to grow by the follow amounts:
+
+- `execution-db` grows at 2-30GB per week
+- `chain-db` grows at 1GB per month
+- `validator-db` grows at less than 2MB per year, per key (2000 keys = 4GB per year)
+
+`keystores`, `keystore-cache` and `peerstore` are not usually very large and are not expected to grow much during normal operation.
+
+Logs can also become quite large so please check out the section on [log management](./logging-and-metrics/log-management.md) for more information.
+
+There is really only one flag that is needed to manage the data for Lodestar, [`--dataDir`](./beacon-management/beacon-cli.md#-datadir). Other than that handling log management is really the heart of the data management story. Beacon node data is what it is. Depending on the execution client that is chosen, there may be flags to help with data storage growth but that is outside the scope of this document.
diff --git a/docs/pages/getting-started/installation.md b/docs/pages/getting-started/installation.md
new file mode 100644
index 000000000000..4fdfc3e82367
--- /dev/null
+++ b/docs/pages/getting-started/installation.md
@@ -0,0 +1,93 @@
+# Installation
+
+## Docker Installation
+
+The [`chainsafe/lodestar`](https://hub.docker.com/r/chainsafe/lodestar) Docker Hub repository is maintained actively. It contains the `lodestar` CLI preinstalled.
+
+
+!!! info
+ The Docker Hub image tagged as `chainsafe/lodestar:next` is run on CI every commit on our `unstable` branch.
+ For `stable` releases, the image is tagged as `chainsafe/lodestar:latest`.
+
+
+Ensure you have Docker installed by issuing the command:
+
+```bash
+docker -v
+```
+
+It should return a non error message such as `Docker version xxxx, build xxxx`.
+
+Pull, run the image and Lodestar should now be ready to use
+
+```bash
+docker pull chainsafe/lodestar
+docker run chainsafe/lodestar --help
+```
+
+
+!!! info
+ Docker is the recommended setup for Lodestar. Use our [Lodestar Quickstart scripts](https://github.com/ChainSafe/lodestar-quickstart) with Docker for detailed instructions.
+
+
+## Build from Source
+
+### Prerequisites
+
+Make sure to have [Yarn installed](https://classic.yarnpkg.com/en/docs/install). It is also recommended to [install NVM (Node Version Manager)](https://github.com/nvm-sh/nvm) and use the LTS version (currently v20) of [NodeJS](https://nodejs.org/en/).
+
+
+!!! info
+ NodeJS versions older than the current LTS are not supported by Lodestar. We recommend running the latest Node LTS.
+ It is important to make sure the NodeJS version is not changed after reboot by setting a default `nvm alias default && nvm use default`.
+
+!!! note
+ Node Version Manager (NVM) will only install NodeJS for use with the active user. If you intend on setting up Lodestar to run under another user, we recommend using [NodeSource's source for NodeJS](https://github.com/nodesource/distributions/blob/master/README.md#installation-instructions) so you can install NodeJS globally.
+
+
+### Clone repository
+
+Clone the repository locally and build from the stable release branch.
+
+```bash
+git clone -b stable https://github.com/chainsafe/lodestar.git
+```
+
+Switch to created directory.
+
+```bash
+cd lodestar
+```
+
+### Install packages
+
+Install across all packages. Lodestar follows a [monorepo](https://github.com/lerna/lerna) structure, so all commands below must be run in the project root.
+
+```bash
+yarn install
+```
+
+### Build source code
+
+Build across all packages.
+
+```bash
+yarn run build
+```
+
+### Lodestar CLI
+
+Lodestar should now be ready for use.
+
+```bash
+./lodestar --help
+```
+
+See [Command Line Reference](./../reference/cli.md) for further information.
+
+## Install from NPM [not recommended]
+
+
+!!! danger
+ For mainnet (production) usage, we only recommend installing with docker due to [NPM supply chain attacks](https://hackaday.com/2021/10/22/supply-chain-attack-npm-library-used-by-facebook-and-others-was-compromised/). Until a [safer installation method has been found](https://github.com/ChainSafe/lodestar/issues/3596), do not use this install method except for experimental purposes only.
+
diff --git a/docs/quickstart.md b/docs/pages/getting-started/quick-start.md
similarity index 100%
rename from docs/quickstart.md
rename to docs/pages/getting-started/quick-start.md
diff --git a/docs/usage/beacon-management.md b/docs/pages/getting-started/starting-a-node.md
similarity index 98%
rename from docs/usage/beacon-management.md
rename to docs/pages/getting-started/starting-a-node.md
index 46b6f2e456c8..dd11381bde10 100644
--- a/docs/usage/beacon-management.md
+++ b/docs/pages/getting-started/starting-a-node.md
@@ -14,7 +14,7 @@ Make sure Lodestar is installed in your local environment, following the chosen
./lodestar --help
```
-For a complete list of beacon node CLI commands and options, see the [Command Line Reference](../../reference/cli/)
+For a complete list of beacon node CLI commands and options, see the [`beacon` CLI Command](../beacon-management/beacon-cli.md) section.
To select a known testnet or mainnet, use the `--network` flag. `mainnet` is selected by default, and a list of available networks is listed with the `--help` flag. Setting the `--network` flag will conveniently configure the beacon node or validator client for the selected network. For power users, any configuration option should be able to be overridden.
@@ -181,4 +181,4 @@ Apr-20 15:16:17.017[] info: Synced - slot: 6264979 - head: 0xde9
6. Peer info: Current total number of outbound or inbound peers, for e.g.: `peers: 27`
-For more insight into how a Lodestar beacon node is functioning, you may setup lodestar metrics and use the prepared Grafana dashboards that are found in the repository. Check out our section on [Prometheus and Grafana](./prometheus-grafana.md) for more details.
+For more insight into how a Lodestar beacon node is functioning, you may setup lodestar metrics and use the prepared Grafana dashboards that are found in the repository. Check out our section on [Prometheus and Grafana](../logging-and-metrics/prometheus-grafana.md) for more details.
diff --git a/docs/pages/getting-started/starting-a-node.new.md b/docs/pages/getting-started/starting-a-node.new.md
new file mode 100644
index 000000000000..b66e797b29ed
--- /dev/null
+++ b/docs/pages/getting-started/starting-a-node.new.md
@@ -0,0 +1,21 @@
+# Starting a Node
+
+## Prerequisites
+
+### Creating a Client Communication JWT
+
+### Creating a Validator Keystore
+
+## Base Considerations
+
+### Execution Client
+
+### Beacon Node
+
+### Validator Client
+
+## Production Considerations
+
+### Ingress/Egress
+
+### Fail-Over
diff --git a/docs/pages/google0c42298b7ec08b7e.html b/docs/pages/google0c42298b7ec08b7e.html
new file mode 100644
index 000000000000..7edebde149af
--- /dev/null
+++ b/docs/pages/google0c42298b7ec08b7e.html
@@ -0,0 +1 @@
+google-site-verification: google0c42298b7ec08b7e.html
\ No newline at end of file
diff --git a/docs/index.md b/docs/pages/index.md
similarity index 62%
rename from docs/index.md
rename to docs/pages/index.md
index 82674eb89fe8..4af149a7a0ef 100644
--- a/docs/index.md
+++ b/docs/pages/index.md
@@ -1,19 +1,19 @@
![lodestar logo](assets/lodestar_icon_text_black_stroke.png)
-## Welcome to the Lodestar documentation!
+## Welcome to the Lodestar documentation
> **Lodestar is an open-source Ethereum Consensus client and Typescript ecosystem, maintained by ChainSafe Systems**
### Getting started
-- Follow the installation method for [source install](install/source.md), [NPM install](install/npm.md), or [Docker install](install/docker.md) to install Lodestar. Or use our [Lodestar Quickstart scripts](https://github.com/ChainSafe/lodestar-quickstart).
-- Use [Lodestar libraries](libraries) in your next Ethereum Typescript project.
-- Run a beacon node on [mainnet or a public testnet](usage/beacon-management.md).
-- Utilize the whole stack by [starting a local testnet](usage/local).
-- View the Lodestar [CLI commands and options](https://chainsafe.github.io/lodestar/reference/cli/)
-- Prospective contributors can read the [contributing section](https://chainsafe.github.io/lodestar/contributing/) to understand how we develop and test on Lodestar.
+- Follow the installation method for [source install](./getting-started/installation.md/#build-from-source) or [Docker install](./getting-started/installation.md/#docker-installation) to install Lodestar. Or use our [Lodestar Quickstart scripts](https://github.com/ChainSafe/lodestar-quickstart).
+- Use [Lodestar libraries](./supporting-libraries/index.md) in your next Ethereum Typescript project.
+- Run a beacon node on [mainnet or a public testnet](./getting-started/starting-a-node.md).
+- Utilize the whole stack by [starting a local testnet](./advanced-topics/setting-up-a-testnet.md).
+- View the Lodestar [CLI commands and options](./beacon-management/beacon-cli.md)
+- Prospective contributors can read the [contributing section](./contribution/getting-started.md) to understand how we develop and test on Lodestar.
- If you have questions [submit an issue](https://github.com/ChainSafe/lodestar/issues/new) or join us on [Discord](https://discord.gg/yjyvFRP)!
-- Please note our [security policy](https://github.com/ChainSafe/lodestar/blob/unstable/SECURITY.md).
+- Please note our [security policy](./security.md).
- Sign up to our [mailing list](https://chainsafe.typeform.com/lodestar) for announcements and any critical information about Lodestar.
## Specifications
diff --git a/docs/pages/introduction.md b/docs/pages/introduction.md
new file mode 100644
index 000000000000..776b018641b8
--- /dev/null
+++ b/docs/pages/introduction.md
@@ -0,0 +1,34 @@
+# Introduction
+
+Ethereum is one of the most profoundly important inventions in recent history. It is a decentralized, open-source blockchain featuring smart contract functionality. It is the second-largest cryptocurrency by market capitalization, after Bitcoin, and is the most actively used blockchain. Ethereum was proposed in 2013 by programmer Vitalik Buterin. Development was crowdfunded in 2014, and the network went live on 30 July 2015, with 72 million coins premined. ChainSafe was founded not too long afterwards and has been actively working in the Ethereum space ever since. We are proud to develop Lodestar and to present this documentation as a resource for the Ethereum community.
+
+## Proof of Stake
+
+In Ethereum's Proof of Stake (PoS) model, validators replace miners from the Proof of Work (PoW) system. Validators are Ethereum stakeholders who lock up a portion of their Ether as a stake. The protocol randomly selects these validators to propose new blocks. The chance of being chosen is tied to the size of their stake: the more Ether staked, the higher the probability of being selected to propose the block. Proposers receive transaction fees and block rewards as incentives. Validators are also responsible for voting on the validity of blocks proposed by other validators. However, they face penalties, known as slashing, for actions like double-signing, votes on a block that is not in the majority or going offline, ensuring network integrity and reliability. The PoS mechanism significantly reduces energy consumption compared to PoW, because it does not require extensive computational power. Moreover, PoS tends to facilitate faster transaction validations and block creations, enhancing the overall performance and scalability of the network.
+
+## Consensus Clients
+
+In an effort to promote client diversity there are several beacon-nodes being developed. Each is programmed in a different language and by a different team. The following is a list of the current beacon-node clients:
+
+- [Lodestar](https://chainsafe.io/lodestar.html)
+- [Prysm](https://prysmaticlabs.com/)
+- [Lighthouse](https://lighthouse.sigmaprime.io/)
+- [Teku](https://consensys.net/knowledge-base/ethereum-2/teku/)
+- [Nimbus](https://nimbus.team/)
+
+## Why Client Diversity?
+
+The Ethereum network's robustness is significantly enhanced by its client diversity, whereby multiple, independently-developed clients conforming to a common specification facilitate seamless interaction and function equivalently across nodes. This client variety not only fosters a rich ecosystem but also provides a buffer against network-wide issues stemming from bugs or malicious attacks targeted at particular clients. For instance, during the Shanghai denial-of-service attack in 2016, the diversified client structure enabled the network to withstand the assault, underscoring the resilience afforded by multiple client configurations.
+
+On the consensus layer, client distribution is crucial for maintaining network integrity and finality, ensuring transactions are irreversible once validated. A balanced spread of nodes across various clients helps mitigate risks associated with potential bugs or attacks that could, in extreme cases, derail the consensus process or lead to incorrect chain splits, thereby jeopardizing the network's stability and trust. While the data suggests a dominance of Prysm client on the consensus layer, efforts are ongoing to promote a more even distribution among others like Lighthouse, Teku, and Nimbus. Encouraging the adoption of minority clients, bolstering their documentation, and leveraging real-time client diversity dashboards are among the strategies being employed to enhance client diversity, which in turn fortifies the Ethereum consensus layer against adversities and fosters a healthier decentralized network ecosystem.
+
+The non-finality event in May 2023 on the Ethereum network posed a significant challenge. The issue arose from attestations for a fork, which necessitated state replays to validate the attestations, causing a notable strain on system resources. As a result, nodes fell out of sync, which deterred the accurate tracking of the actual head of the chain. This situation was exacerbated by a decline in attestations during specific epochs, further hampering the consensus mechanism. The Lodestar team noticed late attestations several weeks prior to the event and implemented a feature that attempted to address such challenges by not processing untimely attestations, and thus not requiring expensive state replays. While it was done for slightly different reasons, the result was the same. Lodestar was able to follow the chain correctly and helped to stabilize the network. This example underscored the importance of client diversity and network resilience against potential forks and replay attacks. These are considered realistic threats, especially in the context of system complexity like in Ethereum's consensus mechanism.
+
+## Ethereum Reading List
+
+- [Ethereum Docs](https://ethereum.org/en/developers/docs/)
+- [Upgrading Ethereum](https://eth2book.info/capella/) by Ben Edgington
+- [Ethereum Book](https://github.com/ethereumbook/ethereumbook) by Andreas M. Antonopoulos and Gavin Wood
+- [Ethereum Consensus Specification](https://github.com/ethereum/consensus-specs)
+- [Casper the Friendly Finality Gadget](https://browse.arxiv.org/pdf/1710.09437.pdf) by Vitalik Buterin and Virgil Griffith
+- [LMD Ghost](https://github.com/protolambda/lmd-ghost) by protolambda
diff --git a/docs/pages/lightclient-prover/.gitkeep b/docs/pages/lightclient-prover/.gitkeep
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/usage/client-monitoring.md b/docs/pages/logging-and-metrics/client-monitoring.md
similarity index 100%
rename from docs/usage/client-monitoring.md
rename to docs/pages/logging-and-metrics/client-monitoring.md
diff --git a/docs/pages/logging-and-metrics/dashboards.md b/docs/pages/logging-and-metrics/dashboards.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/pages/logging-and-metrics/log-management.md b/docs/pages/logging-and-metrics/log-management.md
new file mode 100644
index 000000000000..a0ee1d5fec07
--- /dev/null
+++ b/docs/pages/logging-and-metrics/log-management.md
@@ -0,0 +1,3 @@
+# Log Management
+
+Check back soon for more information!!
diff --git a/docs/pages/logging-and-metrics/metrics-management.md b/docs/pages/logging-and-metrics/metrics-management.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/usage/prometheus-grafana.md b/docs/pages/logging-and-metrics/prometheus-grafana.md
similarity index 100%
rename from docs/usage/prometheus-grafana.md
rename to docs/pages/logging-and-metrics/prometheus-grafana.md
diff --git a/docs/pages/reference/cli.md b/docs/pages/reference/cli.md
new file mode 100644
index 000000000000..1b57913b99fc
--- /dev/null
+++ b/docs/pages/reference/cli.md
@@ -0,0 +1,8 @@
+# Page relocated
+
+_**Welcome! This page has been moved. Please checkout our new docs layout from the Table of Contents! Below are some helpful links to the CLI pages that were split out from this original document**_
+
+- [Beacon Node CLI](../beacon-management/beacon-cli.md)
+- [Validator CLI](../validator-management/validator-cli.md)
+- [Bootnode CLI](../bootnode/bootnode-cli.md)
+- [Light Client CLI](../lightclient-prover/lightclient-cli.md)
diff --git a/docs/pages/supporting-libraries/index.md b/docs/pages/supporting-libraries/index.md
new file mode 100644
index 000000000000..555294393ec1
--- /dev/null
+++ b/docs/pages/supporting-libraries/index.md
@@ -0,0 +1,27 @@
+# Supporting Libraries
+
+## Networking
+
+### LibP2P
+
+- [`@chainsafe/js-libp2p-noise`](https://github.com/NodeFactoryIo/js-libp2p-noise) - [Noise](https://noiseprotocol.org/noise.html) handshake for `js-libp2p`
+- [`@chainsafe/js-libp2p-gossipsub`](https://github.com/ChainSafe/js-libp2p-gossipsub) - [Gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) protocol for `js-libp2p`
+- [`@chainsafe/libp2p-yamux`](https://github.com/ChainSafe/js-libp2p-yamux)
+
+### Discv5
+
+- [`discv5`](https://github.com/ChainSafe/discv5) - [Discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md) protocol
+
+## Serialization and Hashing
+
+- [`@chainsafe/ssz`](https://github.com/ChainSafe/ssz) - Simple Serialize (SSZ)
+- [`@chainsafe/persistent-merkle-tree`](https://github.com/ChainSafe/persistent-merkle-tree) - binary merkle tree implemented as a [persistent data structure](https://en.wikipedia.org/wiki/Persistent_data_structure)
+- [`@chainsafe/as-sha256`](https://github.com/ChainSafe/as-sha256) - Small AssemblyScript implementation of SHA256
+
+## BLS
+
+- [`@chainsafe/bls`](https://github.com/ChainSafe/bls) - Isomorphic Ethereum Consensus BLS sign / verify / aggregate
+- [`@chainsafe/blst-ts`](https://github.com/ChainSafe/blst-ts) - Node specific Ethereum Consensus BLS sign / verify / aggregate
+- [`@chainsafe/bls-keystore`](https://github.com/ChainSafe/bls-keystore) - store / retrieve a BLS secret key from an [EIP-2335](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2335.md) JSON keystore
+- [`@chainsafe/bls-keygen`](https://github.com/ChainSafe/bls-keygen) - utility functions to generate BLS secret keys, following [EIP-2333](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2333.md) and [EIP-2334](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2334.md)
+- [`@chainsafe/bls-hd-key`](https://github.com/ChainSafe/bls-hd-key) - low level [EIP-2333](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2333.md) and [EIP-2334](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2334.md) functionality
diff --git a/docs/libraries/index.md b/docs/pages/supporting-libraries/libraries.md
similarity index 100%
rename from docs/libraries/index.md
rename to docs/pages/supporting-libraries/libraries.md
diff --git a/docs/pages/tools/core-dumps.md b/docs/pages/tools/core-dumps.md
new file mode 100644
index 000000000000..98d564eb9308
--- /dev/null
+++ b/docs/pages/tools/core-dumps.md
@@ -0,0 +1,66 @@
+# Core Dump Analysis
+
+Core dump analysis is some ninja level stuff. Once you get the hang of it you will feel like you have super powers. It will up your game to a whole new level because you will be able to debug issues that seemed impossible before. Post-crash analysis is a very powerful tool to have in your tool belt. A core dump has all of the objects in memory as well as all of the stack frame information at the exact moment the dump was taken, usually when a hard crash occurs.
+
+It is important to note that debug symbols will greatly aid you in your debugging for issues related to native code like `C/C++`. When compiled languages are optimized the compiler will often strip out identifiers and all that will be remaining are mangled symbols and addresses. Compiling with debug symbols will leave all of the identifiers, file names and line numbers in-tact.
+
+While it is not always practical to be running code in a Debug version of node, if you run across a persistent issue it will be helpful to recreate it on a debug build and to use that for analysis.
+
+It is important to note that the EXACT binary that was running when the dump was created MUST be loaded when doing analysis. There is a lot of information in the dump that is specific to the binary that was running (like function offsets, etc). If you load a different binary you will get a lot of errors and the analysis will not be useful (if it loads at all).
+
+It is also a nice-to-know that you can create the dump on linux, using a linux compiled version of node, and then read it on a mac. All that is needed is to download the node binary and dump file to the mac. It is possible to load them into a mac compiled version of llnode and all will work as expected. Its just the meta in the linux binary that is needed for analysis, it doesn't actually run the code.
+
+## Installing `llnode`
+
+`llnode` is a Node.js plugin for the [LLDB](https://lldb.llvm.org/) debugger. It is the officially sanctioned tool from Node and powerful way to do postmortem analysis of Node.js processes. The process for install is pretty straight-forward unless you have an M1 mac. XCode ships with an instance of `lldb` and installing `llnode` is as simple as running `npm install -g llnode`.
+
+On an M1 mac the install will work fine but the plugin will crash at load time. See [this issue](https://github.com/nodejs/llnode/issues/430#issuecomment-1844628224) for updates. The workaround is to install `lldb` via homebrew.
+
+```sh
+# should only be necessary on M1 macs at time of writing
+$ brew install llvm
+$ echo 'export PATH="/opt/homebrew/opt/llvm/bin:$PATH"' >> ~/.zshrc
+$ # note that its before recopying PATH to make sure it resolves
+$ zsh ~/.zshrc
+$ which llvm-config
+/opt/homebrew/opt/llvm/bin/llvm-config # if this is not what comes up restart the shell
+$ npm install -g llnode
+$ llnode
+(lldb) plugin load '/Users/ninja_user/.nvm/versions/node/v20.5.1/lib/node_modules/llnode/llnode.dylib'
+(lldb) settings set prompt '(llnode) '
+(llnode)
+```
+
+## Collecting a core dump
+
+Before a core dump can be created the system must be enabled.
+
+```sh
+ulimit -c unlimited
+```
+
+This is a critical step. If that command is not run the core will not be dumped to disk.
+
+Core dumps are normally created by the kernel when certain process signals are encountered. `SIGSEGV` is the most common signal that will cause a dump and its sent by the kernel to the process when a segfault occurs. `SIGSEGV` is not the only signal that works and you can see the full list [here](https://man7.org/linux/man-pages/man7/signal.7.html) under the "Standard Signals" section (all the ones that say "Core" in the "Action" column).
+
+If you want to create a dump on demand you can use the `gcore` command on linux. This will create a dump of the process without killing it. If you don't mind termination you can also use `kill -SIGSEGV ` to send the a dump signal to the process.
+
+## Analyzing a core dump
+
+Once you collect the core dump you can load it into `llnode` for debugging.
+
+```sh
+# remember that the node binary must be the exact same one that was running when the core was created
+$ llnode -f /path/to/node_debug -c /Users/ninja_user/coredumps/node.coredump
+(lldb) target create "node_debug" --core "node.coredump"
+Core file '/Users/ninja_user/coredumps/node.coredump' (x86_64) was loaded.
+(lldb) plugin load '/Users/ninja_user/.nvm/versions/node/v20.5.1/lib/node_modules/llnode/llnode.dylib'
+(lldb) settings set prompt '(llnode) '
+(llnode)
+```
+
+Once the dump is loaded the first few steps will be to figure out what types of objects were in memory and what was the processor working on when the crash occurred. Lets start with the stack trace.
+
+There are two distinct commands for pulling the stack because node is both a native runtime and a virtual machine. The `bt`, back trace, command will pull the native stack frames and the `v8 bt` command will use the `llnode` plugin to pull the JavaScript stack frames. Newer versions of `llnode` will automatically pull the JavaScript stack frames when the `bt` command is run but it is still good to know the difference. It is also possible to add the `all` verb to the `bt` command and it will pull the back trace for all threads.
+
+To start looking through memory there are two commands that are helpful. The `v8 findjsobjects` command will list all of the JavaScript objects in memory. The `v8 findjsinstances` command will list all of the instances of a particular JavaScript object.
diff --git a/docs/pages/tools/debugging.md b/docs/pages/tools/debugging.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/tools/flamegraphs.md b/docs/pages/tools/flamegraphs.md
similarity index 100%
rename from docs/tools/flamegraphs.md
rename to docs/pages/tools/flamegraphs.md
diff --git a/docs/pages/tools/heap-dumps.md b/docs/pages/tools/heap-dumps.md
new file mode 100644
index 000000000000..379f7e4de2f2
--- /dev/null
+++ b/docs/pages/tools/heap-dumps.md
@@ -0,0 +1,279 @@
+# Heap Dump Analysis
+
+There are a number of reason why one would want to do a heap dump but in particular, they are helpful for find memory intensive operations and leaks. There are two major types of heap dumps that are available to node developers. The first is a JavaScript heap dump, and the second is a native heap dump. The JS heap dump is much more common and is the default heap dump that is generated by `node`. It is useful when analyzing JS generated objects that are managed by the runtime. However there is one major limitation to the JS heap dump, and that is that it does not include native objects. This is where the native heap dump comes in handy. The native heap dump is a snapshot of the entire process memory, and includes objects that are allocated by `C/C++` code, including native modules in use by the application. The limitation to the native heap dump is that it will not include any JS objects that are allocated by the `V8` runtime. Those are generally created within `mmap`'ed pages and the native heap dump tools are specific to `C` objects that are created with `malloc` and destroyed via `free`. `C++` is also covered as `new` and `delete` are wrappers around `malloc` and `free`. This is why it is important to understand how to analyze both types of memory usage.
+
+## JavaScript Heap Dump
+
+Node has built in `V8` heap dump access and its a very powerful tool for analyzing memory usage. Understanding how the dump is created will both help to understand how it is displayed and how to use the analysis more effectively.
+
+The `V8` heap dump is a stop the world process because walking the entire heap graph is necessary to create one. This is similar to a full, major garbage collection event. The VM starts at the heap entrance node and walks the entire graph and makes note of every edge that connects each node along the way. Nodes are JSObjects and edges are references between those objects.
+
+By time the whole heap is walked the full size and values of all nodes are known and all of the connections between those nodes is well understood. The object that is returned is a set of three arrays, the nodes, the edges and the string values that are encountered (because strings are themselves arrays of characters in `C` so they are treated a bit differently by `V8`).
+
+### Creating a `V8` heap dump
+
+There are two functions for creating a heap dump but both call the same functionality under the hood. One streams the result, `require("v8").getHeapSnapshot([options])`, and is primarily intended for use by the Chrome devtools button to "take a snapshot". The second writes the heap dump to a file, `require("v8").writeHeapSnapshot(filename[,options])`.
+
+The optional `options` argument, in both cases, is the same and contains two props.`exposeInternals` and `exposeNumericValues` to enrich the dump. In many cases its the application layer that one wants to debug so `exposeInternals` is not usually necessary. In `V8` numbers are stored as 32bit integers and the size of pointers is also 32bits. So as an optimization, the pointer to the numeric value can be eliminated and the value itself can be stored in the `Address` of the `Value` instead. `exposeNumericValues` transcribes those "pointers" to the actual numeric value and appends them to the dump.
+
+Because heap analysis happens frequently during Lodestar development there is a helper api endpoint to capture a heap dump. **It is IMPORTANT** that this endpoint is not public facing as it will open the threat of DDOS attack.
+
+The endpoint accepts a `POST` request and you may include an optional `dirpath` query parameter to specify the directory where the heap dump will be written. If the `dirpath` is not specified then the heap dump will be written to the current working directory.
+
+To create a Lodestar heap dump you can use the following command:
+
+```sh
+curl -X POST http://localhost:9596/eth/v1/lodestar/write_heapdump?dirpath=/some/directory/path
+```
+
+### Viewing a `V8` heap dump
+
+It is best to analyze on a local development machine so if Lodestar is running on a cloud instance download the dump to the local environment. Open Chrome, or any Chromium based browser (the example photos were taken using Brave). In the url bar type `chrome:://inspect` to bring up the DevTools menu (in brave the url will be rewritten to `brave://inspect`).
+
+![DevTools](../images/heap-dumps/devtools.png)
+
+Click on the `Open dedicated DevTools for Node` link to open the node specific window and click on the `Memory` tab as shown below.
+
+![Memory Tab](../images/heap-dumps/memory-tab.png)
+
+Load the profile by either right clicking on the left pane or by clicking the `Load` button at the bottom.
+
+![Load Profile](../images/heap-dumps/load-profile.png)
+
+### Analyzing a `V8` heap dump
+
+Analysis is as much an art as it is a science and the best way to learn is to do it a few times. Generally the goal is looking for memory leaks but reducing memory overhead is also something that happens. This guide will focus on leaks. With memory leaks one is looking for why objects have references that prevent them from being garbage collected.
+
+To spot sources of leaks, focus on objects that have large quantities or very large `retained size`. Retained size is the amount of memory that would be freed if the object was garbage collected. As an example if there is an object that has lots and lots of instances, like 100,000, and they are all pushed into an array then the array will have a very large retained size. This is because the array is holding references to all of the objects that it contains.
+
+
+
+
+If it is not immediately apparent what objects are being leaked then another tool in your arsenal will be to take a second snapshot and compare it to the first. This will show what objects have been created/changed since the first snapshot.
+
+If there is an object that has a large retained size but is roughly the same, but not exactly the same, changes are that is NOT the leak. Some objects can get quite large during runtime but if its roughly the same size over time, but not exactly the same, it means that the application is modifying the object (why its not exactly identical in size) but if it hasn't grown significantly over time it can be assumed it is probably the working size of the instances.
+
+Try to focus on objects that are growing in size or in number over time. Growing in size means the object is holding references to other objects and growing in number means a function closure somewhere is retaining the small instances.
+
+
+
+
+That is the science part, but these clues are just breadcrumbs to follow. In order to actually resolve the leak, one needs to go into the code to figure out where those objects are being created, or more often, why the references to them are being retained. This is where the art comes in.
+
+Having a good understanding of the codebase will help to narrow down where to look. It is also common that the leak is not coming directly from Lodestar code, but rather one of the dependencies so be careful not to rule those out.
+
+## Native Heap Dump
+
+_**note: collecting a native heap dump is only supported on linux, analysis can be done from linux or Mac**_
+
+There are several tools that can be used to do native heap dump analysis. The most common are [`massif`](https://valgrind.org/docs/manual/ms-manual.html) from the [`Valgrind`](https://valgrind.org/) suite, google's [`gperftools`](https://github.com/gperftools/gperftools) and `heaptrack` from [KDE](https://community.kde.org/Main_Page). Of the three, `heaptrack` is the most user friendly tool, and it is specifically designed for the task. It is much faster than `Valgrind`, easier to integrate than `gperftools` and also includes a gui for result analysis. Often times there are also memory allocations that are not related to memory leaks, and tools like `Valgrind` and `gperftools` become less useful. This is why `heaptrack` is the recommended tool for heap dump analysis on Lodestar.
+
+There are a few things that will make the results with `heaptrack` far better. The most important is using debug builds of all libraries included in a binary, including the application itself. This will make the results usable. Not to say that they will be useless without debug symbols but it will be kinda tough to optimize functions without knowing the function names nor the file and line numbers.
+
+This is the heart of what `heaptrack` will do for us. It hooks into the memory allocation and adds in stack traces for each `malloc` call site. That way every time memory is reserved there is a way to track back where it happened in the code. `heaptrack` also hooks into the `free` function and checks that versus the allocations to check for memory leaks and for temporary variables that can be optimized. This also allows for optimization of how many of each object is created by identifying high frequency allocations.
+
+Generally the .heapdump file will be created on a cloud server and then copied to a local machine for analysis, mostly because the gui is not available through ssh. The gui is not required for analysis but it is much easier to use than the command line tools. The first step will be to install `heaptrack` on the target server and to capture a profile.
+
+### Build collection tools
+
+Assume the following directory structure:
+
+```sh
+├── beacon-node
+│ ├── db
+│ ├── logs
+│ ├── start-lodestar.sh
+│ └── rc-config.yml
+├── lodestar
+└── node # step below will clone this repo
+```
+
+We will start from the directory that contains `lodestar` and the `beacon-node` files.
+
+```sh
+# Install heaptrack
+$ sudo apt-get update
+$ sudo apt-get -y install heaptrack
+
+# Using a debug build of node is recommended and it can be build
+# from source. Clone the node repo to get started.
+$ git clone https://github.com/nodejs/node.git
+$ cd node
+
+# Use whichever version of node you prefer
+$ git checkout v20.10.0
+$ ./configure --debug
+
+# This command only builds the debug version of node and assumes
+# that a release version of node is already installed on the system
+$ make -C out BUILDTYPE=Debug -j$(nproc --all)
+
+# Move the debug version of node the the same folder that the release
+# version is installed in and name it `node_debug`. This will put the
+# debug binary on the path and allow you to run it with the
+# `node_debug` command
+$ cp out/Debug/node "$(which node)_debug"
+$ which node_debug
+/your/home/directory/.nvm/versions/node/v20.10.0/bin/node_debug
+
+# Return to the lodestar repo
+$ cd ../lodestar
+
+# Clean the build artifacts and node_modules
+$ yarn clean && yarn clean:nm
+
+# Install the dependencies
+$ yarn install
+
+# Ensure that all native modules are rebuilt with debug symbols. Some
+# modules are prebuilt, like classic-level, and the debug symbols may
+# not be included. If the the debugging exercise is focussed around
+# one of these dependencies, then you will need to manually clone those
+# repos and manually build them with debug symbols.
+$ npm rebuild --debug
+```
+
+### Collect a heap dump
+
+```sh
+# Move to th `beacon-node` directory
+$ cd ../beacon-node
+
+# Start lodestar with profiling enabled
+$ heaptrack \
+$ --output ./lodestar.heapdump \
+$ node_debug \
+$ --max-old-space-size=8192 \
+$ ../lodestar/packages/cli/bin/lodestar.js \
+$ beacon \
+$ --rcConfig ./rc-config.yml \
+$ > /dev/null 2>&1 &
+# Wait some period of time for the heap dump data to be collected
+
+# The data will not be persisted until the process is stopped. You can gracefully
+# stop the process with the following command and if you want to hard kill it
+# add `-9` to the end of the `kill` command although that should not be necessary
+$ ps aux | grep lodestar | grep -v grep | awk '{print $2}' | head -n 1 | xargs kill
+```
+
+### Collecting a heap dump on a running process
+
+Collecting a heap dump can also be done on a running process. There are both advantages and disadvantages to this approach. The main advantage is that you can collect a heap dump without having to restart. The down side is that the dump will only include allocations/de-allocations while the tracker is running. This means that all the non-paired calls to malloc/free will register as leaks. It will also not give a true representation of how the heap is being used. On the upside, however the dump will be much smaller in size.
+
+It is important to note a warning that is in the `heaptrack` source code:
+
+_WARNING: Runtime-attaching heaptrack is UNSTABLE and can lead to CRASHES in your application, especially after you detach heaptrack again. You are hereby warned, use it at your own risk!_
+
+```sh
+# Move to th `beacon-node` directory
+$ cd ../beacon-node
+
+# Start lodestar
+$ node_debug \
+$ --max-old-space-size=8192 \
+$ ../lodestar/packages/cli/bin/lodestar.js \
+$ beacon \
+$ --rcConfig ./rc-config.yml \
+$ > /dev/null 2>&1 &
+# Wait some period of time to start collecting the dump
+
+# GDB is required to inject heaptrack into a running process
+# so you may need to install it
+$ sudo apt-get update
+$ sudo apt-get install -y gdb
+
+# Elevated `perf` permissions are also required depending on your
+# system configuration. Change until the next reboot
+$ echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope
+
+# Get the pid of the lodestar process
+$ export LODESTAR_PID=$(ps aux | grep lodestar | grep -v grep | awk '{print $2}' | head -n 1)
+
+# Inject heaptrack into the running process
+$ heaptrack --pid $LODESTAR_PID
+
+heaptrack output will be written to "/home/user/beacon-node/heaptrack.node_debug.111868.zst"
+/usr/lib/heaptrack/libheaptrack_preload.so
+injecting heaptrack into application via GDB, this might take some time...
+injection finished
+# Wait some period of time to collect the heap dump. See below
+# for the termination command that can be run from a separate
+# terminal when ready to stop collecting data
+Terminated
+removing heaptrack injection via GDB, this might take some time...
+Heaptrack finished! Now run the following to investigate the data:
+
+ heaptrack --analyze "/home/user/beacon-node/heaptrack.node_debug.111868.zst"
+```
+
+There is a trap in `heaptrack` but the process uses a nested shell to do the actual injection so it is not possible to just Ctrl+C out of the injected process without corrupting the output file. To properly kill the collection one needs to target the nested shell pid. Here is a helper command to target that process:
+
+```sh
+ps -ef | grep '[h]eaptrack --pid' | awk '$3 == '$(ps -ef | grep '[h]eaptrack --pid' | awk '$3 != 1 {print $2}' | head -n 1)' {print $2}' | xargs -r kill
+```
+
+After working with the injected process for a while, I cannot honestly recommend it. It can work in a pinch, and is best suited for when the profiled process can be exited gracefully without repercussions (not on mainnet for instance). The benefit, though, is that the heapdump will be much smaller and targeted to runtime (will not have the transient, startup allocations) which can make it easier to see what is happening.
+
+### Installing `heaptrack-gui` on Linux
+
+```sh
+# You can you apt, apt-get or aptitude to install the gui
+$ sudo apt-get update
+$ sudo apt-get install -y heaptrack-gui
+```
+
+### Installing `heaptrack-gui` on OSX
+
+At the time of writing this there is no official pre-built binary for OSX. This was a bit of a challenge but it was WELL worth the effort as the tool works very well. There were a number of bugs along the way while "using the docs" so your mileage may vary, but this is what worked for me.
+
+Most of the dependencies can be installed via Homebrew and the tool itself needs to be built from source. There was one dependency that needed to be built from source. This process assumes a working folder that the repos can be cloned into.
+
+```sh
+# Start in the root folder where the repos will be cloned
+$ brew install qt@5
+
+# prepare tap of kde-mac/kde
+$ brew tap kde-mac/kde https://invent.kde.org/packaging/homebrew-kde.git
+$ "$(brew --repo kde-mac/kde)/tools/do-caveats.sh"
+
+# install the kde-mac and other required dependencies
+$ brew install kde-mac/kde/kf5-kcoreaddons \
+$ kde-mac/kde/kf5-kitemmodels \
+$ kde-mac/kde/kf5-kconfigwidgets \
+$ kde-mac/kde/kdiagram \
+$ extra-cmake-modules \
+$ ki18n \
+$ threadweaver \
+$ boost \
+$ zstd \
+$ gettext
+
+# There is a bug in the current version of kde-mac/kde and one dependency needs
+# to be built manually. This is the workaround to get it built.
+$ git clone https://invent.kde.org/frameworks/kio.git
+$ mkdir kio/build
+$ cd kio/build
+$ export CMAKE_PREFIX_PATH=$(brew --prefix qt@5)
+$ cmake -G Ninja -DCMAKE_BUILD_TYPE=Release ..
+$ ninja
+$ sudo ninja install
+$ cd ../..
+
+# Now make sure that the dependencies are available to the system during runtime
+$ ln -sfv "$(brew --prefix)/share/kf5" "$HOME/Library/Application Support"
+$ ln -sfv "$(brew --prefix)/share/knotifications5" "$HOME/Library/Application Support"
+$ ln -sfv "$(brew --prefix)/share/kservices5" "$HOME/Library/Application Support"
+$ ln -sfv "$(brew --prefix)/share/kservicetypes5" "$HOME/Library/Application Support"
+
+# We are now ready to build the heaptrack_gui binaries for analysis on OSX
+$ git clone https://invent.kde.org/sdk/heaptrack.git
+$ cd heaptrack
+$ mkdir build
+$ cd build
+$ CMAKE_PREFIX_PATH=$(brew --prefix qt@5) PATH=$PATH:/opt/homebrew/opt/gettext/bin cmake ..
+$ cmake -DCMAKE_BUILD_TYPE=Release ..
+$ make heaptrack_gui
+$ sudo make install
+# You can now find heaptrack_gui with your gui Applications. It is default
+# placed as /Applications/KDE/heaptrack_gui.app
+```
diff --git a/docs/pages/tools/perf.md b/docs/pages/tools/perf.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/pages/trouble-shooting.md b/docs/pages/trouble-shooting.md
new file mode 100644
index 000000000000..144aeb90ce20
--- /dev/null
+++ b/docs/pages/trouble-shooting.md
@@ -0,0 +1 @@
+# Trouble Shooting
diff --git a/docs/pages/validator-management/key-management.md b/docs/pages/validator-management/key-management.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/pages/validator-management/multiple-and-fallback-validation.md b/docs/pages/validator-management/multiple-and-fallback-validation.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/docs/usage/validator-management.md b/docs/pages/validator-management/validator-management.md
similarity index 100%
rename from docs/usage/validator-management.md
rename to docs/pages/validator-management/validator-management.md
diff --git a/docs/pages/validator-management/withdrawals.md b/docs/pages/validator-management/withdrawals.md
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/lerna.json b/lerna.json
index 4130f46a317b..bb0c43fed5fe 100644
--- a/lerna.json
+++ b/lerna.json
@@ -4,7 +4,7 @@
],
"npmClient": "yarn",
"useNx": true,
- "version": "1.12.0",
+ "version": "1.13.0",
"stream": true,
"command": {
"version": {
diff --git a/mkdocs.yml b/mkdocs.yml
deleted file mode 100644
index 759a8dfd7151..000000000000
--- a/mkdocs.yml
+++ /dev/null
@@ -1,85 +0,0 @@
-site_name: Lodestar Documentation
-site_description: Lodestar Documentation - Typescript Ethereum Consensus client
-site_url: https://chainsafe.github.io/lodestar
-
-repo_name: chainsafe/lodestar
-repo_url: https://github.com/chainsafe/lodestar
-
-# Configuration
-theme:
- name: material
- logo: assets/lodestar_icon_300.png
- favicon: assets/round-icon.ico
- palette:
- - scheme: preference
- media: "(prefers-color-scheme: light)"
- primary: black
- accent: deep purple
- toggle:
- icon: material/weather-night
- name: Switch to dark mode
- - scheme: slate
- media: "(prefers-color-scheme: dark)"
- primary: black
- accent: deep purple
- toggle:
- icon: material/weather-sunny
- name: Switch to light mode
- nav_style: dark
-
-plugins:
- - search
- - mermaid2:
- version: 8.6.4
- arguments:
- theme: |
- ^(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches) ? 'dark' : 'light'
-
-markdown_extensions:
- - meta
- - codehilite:
- guess_lang: false
- - admonition
- - toc:
- permalink: true
- - pymdownx.superfences:
- # make exceptions to highlighting of code (for mermaid):
- custom_fences:
- - name: mermaid
- class: mermaid
- format: !!python/name:mermaid2.fence_mermaid
-extra_css:
- - stylesheets/extras.css
-
-# Socials
-extra:
- social:
- - icon: fontawesome/brands/github-alt
- link: https://github.com/ChainSafe/lodestar
- - icon: fontawesome/brands/twitter
- link: https://twitter.com/ChainSafeth
- - icon: fontawesome/brands/discord
- link: https://discord.gg/yjyvFRP
- - icon: fontawesome/brands/medium
- link: https://blog.chainsafe.io
-
-# Customize left navigation menu
-nav:
- - Getting Started: index.md
- - Installation:
- - Install from source: install/source.md
- - Install from NPM: install/npm.md
- - Install with Docker: install/docker.md
- - Using Lodestar:
- - Beacon management: usage/beacon-management.md
- - Local testnet: usage/local.md
- - Validator management: usage/validator-management.md
- - Prometheus & Grafana Setup: usage/prometheus-grafana.md
- - MEV Builder Integration: usage/mev-integration.md
- - Client monitoring: usage/client-monitoring.md
- - Reference:
- - Command line: reference/cli.md
- - Libraries: libraries/index.md
- - Design:
- - Lodestar package structure: design/depgraph.md
- - Contributing: contributing.md
diff --git a/package.json b/package.json
index c8910209a83b..8e6dad1fdea2 100644
--- a/package.json
+++ b/package.json
@@ -11,7 +11,7 @@
"clean": "rm -rf ./packages/*/lib ./packages/*/*.tsbuildinfo",
"clean:nm": "rm -rf ./packages/*/node_modules ./node_modules",
"build": "lerna run build",
- "build:docs": "lerna run build:refdocs && ./scripts/prepare-docs.sh",
+ "build:docs": "lerna run check-readme && lerna run build:docs && ./scripts/prepare-docs.sh",
"build:watch": "lerna exec --parallel -- 'yarn run build:watch'",
"build:ifchanged": "lerna exec -- ../../scripts/build_if_changed.sh",
"lint": "eslint --color --ext .ts packages/*/src packages/*/test",
@@ -22,6 +22,7 @@
"check-build": "lerna run check-build",
"check-readme": "lerna run check-readme",
"check-types": "lerna run check-types",
+ "check-spelling": "pyspelling -c .pyspelling.yml -v",
"coverage": "lerna run coverage",
"test": "lerna run test --concurrency 1",
"test:unit": "lerna run test:unit --concurrency 1",
@@ -52,6 +53,8 @@
"@types/sinon-chai": "^3.2.9",
"@typescript-eslint/eslint-plugin": "6.7.2",
"@typescript-eslint/parser": "6.7.2",
+ "@vitest/coverage-v8": "^1.1.0",
+ "@vitest/browser": "^1.1.0",
"c8": "^8.0.1",
"chai": "^4.3.8",
"chai-as-promised": "^7.1.1",
@@ -59,12 +62,13 @@
"crypto-browserify": "^3.12.0",
"electron": "^26.2.2",
"eslint": "^8.50.0",
- "eslint-plugin-import": "^2.28.1",
- "eslint-plugin-prettier": "^5.0.0",
+ "eslint-import-resolver-typescript": "^3.6.1",
"eslint-plugin-chai-expect": "^3.0.0",
+ "eslint-plugin-import": "^2.28.1",
"eslint-plugin-mocha": "^10.2.0",
- "eslint-import-resolver-typescript": "^3.6.1",
+ "eslint-plugin-prettier": "^5.0.0",
"https-browserify": "^1.0.0",
+ "jsdom": "^23.0.1",
"karma": "^6.4.2",
"karma-chai": "^0.1.0",
"karma-chrome-launcher": "^3.2.0",
@@ -93,13 +97,18 @@
"ts-node": "^10.9.1",
"typescript": "^5.2.2",
"typescript-docs-verifier": "^2.5.0",
- "webpack": "^5.88.2",
+ "vite-plugin-node-polyfills": "^0.18.0",
+ "vite-plugin-top-level-await": "^1.4.1",
+ "vitest": "^1.1.0",
+ "vitest-when": "^0.3.0",
"wait-port": "^1.1.0",
- "vitest": "^0.34.6",
- "vitest-when": "^0.2.0",
- "@vitest/coverage-v8": "^0.34.6"
+ "webdriverio": "^8.27.0",
+ "webpack": "^5.88.2"
},
"resolutions": {
- "dns-over-http-resolver": "^2.1.1"
+ "dns-over-http-resolver": "^2.1.1",
+ "chai": "^4.3.10",
+ "loupe": "^2.3.6",
+ "vite": "^5.0.0"
}
}
diff --git a/packages/api/.mocharc.yaml b/packages/api/.mocharc.yaml
deleted file mode 100644
index f9375365e517..000000000000
--- a/packages/api/.mocharc.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-colors: true
-timeout: 2000
-exit: true
-extension: ["ts"]
-require:
- - ./test/setup.ts
-node-option:
- - "loader=ts-node/esm"
diff --git a/packages/api/.nycrc.json b/packages/api/.nycrc.json
deleted file mode 100644
index 69aa626339a0..000000000000
--- a/packages/api/.nycrc.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "extends": "../../.nycrc.json"
-}
diff --git a/packages/api/package.json b/packages/api/package.json
index 2f1c5953a673..2dfcbc73b65c 100644
--- a/packages/api/package.json
+++ b/packages/api/package.json
@@ -11,7 +11,7 @@
"bugs": {
"url": "https://github.com/ChainSafe/lodestar/issues"
},
- "version": "1.12.0",
+ "version": "1.13.0",
"type": "module",
"exports": {
".": {
@@ -65,16 +65,16 @@
"lint:fix": "yarn run lint --fix",
"pretest": "yarn run check-types",
"test": "yarn test:unit && yarn test:e2e",
- "test:unit": "nyc --cache-dir .nyc_output/.cache -e .ts mocha 'test/unit/**/*.test.ts'",
+ "test:unit": "vitest --run --dir test/unit/ --coverage",
"check-readme": "typescript-docs-verifier"
},
"dependencies": {
"@chainsafe/persistent-merkle-tree": "^0.6.1",
"@chainsafe/ssz": "^0.14.0",
- "@lodestar/config": "^1.12.0",
- "@lodestar/params": "^1.12.0",
- "@lodestar/types": "^1.12.0",
- "@lodestar/utils": "^1.12.0",
+ "@lodestar/config": "^1.13.0",
+ "@lodestar/params": "^1.13.0",
+ "@lodestar/types": "^1.13.0",
+ "@lodestar/utils": "^1.13.0",
"eventsource": "^2.0.2",
"qs": "^6.11.1"
},
diff --git a/packages/api/src/beacon/client/events.ts b/packages/api/src/beacon/client/events.ts
index 57ac3d5e2dcd..574a0290bb4d 100644
--- a/packages/api/src/beacon/client/events.ts
+++ b/packages/api/src/beacon/client/events.ts
@@ -65,4 +65,4 @@ export function getClient(config: ChainForkConfig, client: IHttpClient): ApiClie
}
// https://github.com/EventSource/eventsource/blob/82e034389bd2c08d532c63172b8e858c5b185338/lib/eventsource.js#L143
-type EventSourceError = {status: number; message: string};
+type EventSourceError = {status?: number; message: string};
diff --git a/packages/api/src/beacon/routes/beacon/block.ts b/packages/api/src/beacon/routes/beacon/block.ts
index afa0df31473d..023dcf2b80d9 100644
--- a/packages/api/src/beacon/routes/beacon/block.ts
+++ b/packages/api/src/beacon/routes/beacon/block.ts
@@ -1,16 +1,7 @@
/* eslint-disable @typescript-eslint/naming-convention */
import {ContainerType, ListCompositeType, ValueOf} from "@chainsafe/ssz";
import {ChainForkConfig} from "@lodestar/config";
-import {
- allForks,
- Slot,
- ssz,
- RootHex,
- deneb,
- phase0,
- isSignedBlockContents,
- isSignedBlindedBlockContents,
-} from "@lodestar/types";
+import {allForks, Slot, ssz, RootHex, deneb, phase0, isSignedBlockContents} from "@lodestar/types";
import {ForkName, ForkSeq} from "@lodestar/params";
import {Endpoint, RequestCodec, RouteDefinitions, Schema} from "../../../utils/index.js";
import {
@@ -40,18 +31,14 @@ export const RootResponseType = new ContainerType({
});
export const SignedBlockContentsType = new ContainerType({
signedBlock: ssz.deneb.SignedBeaconBlock,
- signedBlobSidecars: ssz.deneb.SignedBlobSidecars,
-});
-export const SignedBlindedBlockContentsType = new ContainerType({
- signedBlindedBlock: ssz.deneb.SignedBlindedBeaconBlock,
- signedBlindedBlobSidecars: ssz.deneb.SignedBlindedBlobSidecars,
+ kzgProofs: ssz.deneb.KZGProofs,
+ blobs: ssz.deneb.Blobs,
});
export type BlockHeaderResponse = ValueOf;
export type BlockHeadersResponse = ValueOf;
export type RootResponse = ValueOf;
export type SignedBlockContents = ValueOf;
-export type SignedBlindedBlockContents = ValueOf;
export type BlockId = RootHex | Slot | "head" | "genesis" | "finalized";
@@ -191,7 +178,7 @@ export type Endpoints = {
*/
publishBlindedBlock: Endpoint<
"POST",
- {signedBlindedBlockOrContents: allForks.SignedBlindedBeaconBlockOrContents},
+ {signedBlindedBlock: allForks.SignedBlindedBeaconBlock},
{body: unknown; headers: {"Eth-Consensus-Version": ForkName}},
EmptyResponseData,
EmptyMeta
@@ -200,7 +187,7 @@ export type Endpoints = {
publishBlindedBlockV2: Endpoint<
"POST",
{
- signedBlindedBlockOrContents: allForks.SignedBlindedBeaconBlockOrContents;
+ signedBlindedBlock: allForks.SignedBlindedBeaconBlock;
broadcastValidation?: BroadcastValidation;
},
{body: unknown; headers: {"Eth-Consensus-Version": ForkName}; query: {broadcast_validation?: string}},
@@ -430,17 +417,10 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions {
- const slot = isSignedBlindedBlockContents(signedBlindedBlockOrContents)
- ? signedBlindedBlockOrContents.signedBlindedBlock.message.slot
- : signedBlindedBlockOrContents.message.slot;
+ writeReqJson: ({signedBlindedBlock}) => {
+ const slot = signedBlindedBlock.message.slot;
return {
- body:
- config.getForkSeq(slot) < ForkSeq.deneb
- ? config
- .getBlindedForkTypes(slot)
- .SignedBeaconBlock.toJson(signedBlindedBlockOrContents as allForks.SignedBlindedBeaconBlock)
- : SignedBlindedBlockContentsType.toJson(signedBlindedBlockOrContents as SignedBlindedBlockContents),
+ body: config.getBlindedForkTypes(slot).SignedBeaconBlock.toJson(signedBlindedBlock),
headers: {
"Eth-Consensus-Version": config.getForkName(slot),
},
@@ -451,23 +431,13 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions {
- const slot = isSignedBlindedBlockContents(signedBlindedBlockOrContents)
- ? signedBlindedBlockOrContents.signedBlindedBlock.message.slot
- : signedBlindedBlockOrContents.message.slot;
+ writeReqSsz: ({signedBlindedBlock}) => {
+ const slot = signedBlindedBlock.message.slot;
return {
- body:
- config.getForkSeq(slot) < ForkSeq.deneb
- ? config
- .getBlindedForkTypes(slot)
- .SignedBeaconBlock.serialize(signedBlindedBlockOrContents as allForks.SignedBlindedBeaconBlock)
- : SignedBlindedBlockContentsType.serialize(signedBlindedBlockOrContents as SignedBlindedBlockContents),
+ body: config.getBlindedForkTypes(slot).SignedBeaconBlock.serialize(signedBlindedBlock),
headers: {
"Eth-Consensus-Version": config.getForkName(slot),
},
@@ -478,10 +448,7 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions {
- const slot = isSignedBlindedBlockContents(signedBlindedBlockOrContents)
- ? signedBlindedBlockOrContents.signedBlindedBlock.message.slot
- : signedBlindedBlockOrContents.message.slot;
+ writeReqJson: ({signedBlindedBlock, broadcastValidation}) => {
+ const slot = signedBlindedBlock.message.slot;
return {
- body:
- config.getForkSeq(slot) < ForkSeq.deneb
- ? config
- .getBlindedForkTypes(slot)
- .SignedBeaconBlock.toJson(signedBlindedBlockOrContents as allForks.SignedBlindedBeaconBlock)
- : SignedBlindedBlockContentsType.toJson(signedBlindedBlockOrContents as SignedBlindedBlockContents),
+ body: config.getBlindedForkTypes(slot).SignedBeaconBlock.toJson(signedBlindedBlock),
+
headers: {
"Eth-Consensus-Version": config.getForkName(slot),
},
@@ -519,24 +480,14 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions {
- const slot = isSignedBlindedBlockContents(signedBlindedBlockOrContents)
- ? signedBlindedBlockOrContents.signedBlindedBlock.message.slot
- : signedBlindedBlockOrContents.message.slot;
+ writeReqSsz: ({signedBlindedBlock, broadcastValidation}) => {
+ const slot = signedBlindedBlock.message.slot;
return {
- body:
- config.getForkSeq(slot) < ForkSeq.deneb
- ? config
- .getBlindedForkTypes(slot)
- .SignedBeaconBlock.serialize(signedBlindedBlockOrContents as allForks.SignedBlindedBeaconBlock)
- : SignedBlindedBlockContentsType.serialize(signedBlindedBlockOrContents as SignedBlindedBlockContents),
+ body: config.getBlindedForkTypes(slot).SignedBeaconBlock.serialize(signedBlindedBlock),
headers: {
"Eth-Consensus-Version": config.getForkName(slot),
},
@@ -548,10 +499,7 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions;
@@ -681,11 +680,9 @@ export const definitions: RouteDefinitions = {
fromJson: (val) => val as ProduceBlockMeta,
toHeadersObject: (meta) => ({
"Eth-Consensus-Version": meta.version,
- "Eth-Execution-Payload-Value": String(meta.executionPayloadValue),
}),
fromHeaders: (headers) => ({
version: headers.get("Eth-Consensus-Version")! as ForkName,
- executionPayloadValue: BigInt(headers.get("Eth-Execution-Payload-Value")!),
}),
},
},
@@ -702,6 +699,7 @@ export const definitions: RouteDefinitions = {
feeRecipient,
builderSelection,
strictFeeRecipientCheck,
+ blindedLocal,
}) => ({
params: {slot},
query: {
@@ -711,6 +709,7 @@ export const definitions: RouteDefinitions = {
fee_recipient: feeRecipient,
builder_selection: builderSelection,
strict_fee_recipient_check: strictFeeRecipientCheck,
+ blinded_local: blindedLocal,
},
}),
parseReq: ({params, query}) => ({
@@ -721,6 +720,7 @@ export const definitions: RouteDefinitions = {
feeRecipient: query.fee_recipient,
builderSelection: query.builder_selection as BuilderSelection,
strictFeeRecipientCheck: query.strict_fee_recipient_check,
+ blindedLocal: query.blinded_local,
}),
schema: {
params: {slot: Schema.UintRequired},
@@ -731,6 +731,7 @@ export const definitions: RouteDefinitions = {
fee_recipient: Schema.String,
builder_selection: Schema.String,
strict_fee_recipient_check: Schema.Boolean,
+ blinded_local: Schema.Boolean,
},
},
},
@@ -738,9 +739,7 @@ export const definitions: RouteDefinitions = {
data: WithMeta(
({version, executionPayloadBlinded}) =>
(executionPayloadBlinded
- ? isForkBlobs(version)
- ? BlindedBlockContentsType
- : ssz[version as ForkName.bellatrix].BlindedBeaconBlock
+ ? ssz.allForksBlinded[isForkExecution(version) ? version : ForkName.bellatrix].BeaconBlock
: isForkBlobs(version)
? BlockContentsType
: ssz[version].BeaconBlock) as Type
@@ -750,13 +749,17 @@ export const definitions: RouteDefinitions = {
fromJson: (val) => val as ProduceBlockV3Meta,
toHeadersObject: (meta) => ({
"Eth-Consensus-Version": meta.version,
- "Eth-Execution-Payload-Value": String(meta.executionPayloadValue),
+ "Eth-Execution-Payload-Source": String(meta.executionPayloadSource),
"Eth-Execution-Payload-Blinded": String(meta.executionPayloadBlinded),
+ "Eth-Execution-Payload-Value": String(meta.executionPayloadValue),
+ "Eth-Consensus-Block-Value": String(meta.consensusBlockValue),
}),
fromHeaders: (headers) => ({
version: headers.get("Eth-Consensus-Version")! as ForkName,
- executionPayloadValue: BigInt(headers.get("Eth-Execution-Payload-Value")!),
+ executionPayloadSource: headers.get("Eth-Execution-Payload-Source")! as ProducedBlockSource,
executionPayloadBlinded: Boolean(headers.get("Eth-Execution-Payload-Blinded")!),
+ executionPayloadValue: BigInt(headers.get("Eth-Execution-Payload-Value")!),
+ consensusBlockValue: BigInt(headers.get("Eth-Consensus-Block-Value")!),
}),
},
},
@@ -783,23 +786,15 @@ export const definitions: RouteDefinitions = {
},
},
resp: {
- data: WithVersion(
- (fork) =>
- // TODO fix the else branch
- (isForkBlobs(fork)
- ? BlindedBlockContentsType
- : ssz[fork as ForkName.bellatrix].BlindedBeaconBlock) as Type
- ),
+ data: WithVersion((fork) => ssz.allForksBlinded[isForkExecution(fork) ? fork : ForkName.bellatrix].BeaconBlock),
meta: {
toJson: (meta) => meta,
fromJson: (val) => val as ProduceBlockMeta,
toHeadersObject: (meta) => ({
"Eth-Consensus-Version": meta.version,
- "Eth-Execution-Payload-Value": String(meta.executionPayloadValue),
}),
fromHeaders: (headers) => ({
version: headers.get("Eth-Consensus-Version")! as ForkName,
- executionPayloadValue: BigInt(headers.get("Eth-Execution-Payload-Value")!),
}),
},
},
diff --git a/packages/api/src/beacon/server/validator.ts b/packages/api/src/beacon/server/validator.ts
index 6bf446e05a16..5d6c22557060 100644
--- a/packages/api/src/beacon/server/validator.ts
+++ b/packages/api/src/beacon/server/validator.ts
@@ -4,6 +4,28 @@ import {ServerRoutes, getGenericJsonServer} from "../../utils/server/index.js";
import {ServerApi} from "../../interfaces.js";
export function getRoutes(config: ChainForkConfig, api: ServerApi): ServerRoutes {
- // All routes return JSON, use a server auto-generator
- return getGenericJsonServer, ReqTypes>({routesData, getReturnTypes, getReqSerializers}, config, api);
+ const reqSerializers = getReqSerializers();
+ const returnTypes = getReturnTypes();
+
+ // Most of routes return JSON, use a server auto-generator
+ const serverRoutes = getGenericJsonServer, ReqTypes>(
+ {routesData, getReturnTypes, getReqSerializers},
+ config,
+ api
+ );
+ return {
+ ...serverRoutes,
+ produceBlockV3: {
+ ...serverRoutes.produceBlockV3,
+ handler: async (req, res) => {
+ const response = await api.produceBlockV3(...reqSerializers.produceBlockV3.parseReq(req));
+ void res.header("Eth-Consensus-Version", response.version);
+ void res.header("Eth-Execution-Payload-Blinded", response.executionPayloadBlinded);
+ void res.header("Eth-Execution-Payload-Value", response.executionPayloadValue);
+ void res.header("Eth-Consensus-Block-Value", response.consensusBlockValue);
+
+ return returnTypes.produceBlockV3.toJson(response);
+ },
+ },
+ };
}
diff --git a/packages/api/src/builder/routes.ts b/packages/api/src/builder/routes.ts
index 0523e471e0f3..42f6a04273b0 100644
--- a/packages/api/src/builder/routes.ts
+++ b/packages/api/src/builder/routes.ts
@@ -18,11 +18,6 @@ import {
WithVersion,
} from "../utils/codecs.js";
import {WireFormat} from "../utils/headers.js";
-import {
- PreBlobSignedBlindedBeaconBlock,
- SignedBlindedBlockContents,
- SignedBlindedBlockContentsType,
-} from "../beacon/routes/beacon/block.js";
// See /packages/api/src/routes/index.ts for reasoning and instructions to add new routes
@@ -60,7 +55,7 @@ export type Endpoints = {
submitBlindedBlock: Endpoint<
"POST",
- {signedBlindedBlock: PreBlobSignedBlindedBeaconBlock} | SignedBlindedBlockContents,
+ {signedBlindedBlock: allForks.SignedBlindedBeaconBlock},
{body: unknown; headers: {"Eth-Consensus-Version": ForkName}},
allForks.ExecutionPayload | allForks.ExecutionPayloadAndBlobsBundle,
VersionMeta
@@ -125,13 +120,7 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions {
const slot = args.signedBlindedBlock.message.slot;
return {
- body:
- config.getForkSeq(slot) < ForkSeq.deneb
- ? config.getBlindedForkTypes(slot).SignedBeaconBlock.toJson(args.signedBlindedBlock)
- : SignedBlindedBlockContentsType.toJson({
- signedBlindedBlock: args.signedBlindedBlock,
- signedBlindedBlobSidecars: (args as SignedBlindedBlockContents).signedBlindedBlobSidecars,
- }),
+ body: config.getBlindedForkTypes(slot).SignedBeaconBlock.toJson(args.signedBlindedBlock),
headers: {
"Eth-Consensus-Version": config.getForkName(slot),
},
@@ -141,22 +130,14 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions {
const slot = args.signedBlindedBlock.message.slot;
return {
- body:
- config.getForkSeq(slot) < ForkSeq.deneb
- ? config.getBlindedForkTypes(slot).SignedBeaconBlock.serialize(args.signedBlindedBlock)
- : SignedBlindedBlockContentsType.serialize({
- signedBlindedBlock: args.signedBlindedBlock,
- signedBlindedBlobSidecars: (args as SignedBlindedBlockContents).signedBlindedBlobSidecars,
- }),
+ body: config.getBlindedForkTypes(slot).SignedBeaconBlock.serialize(args.signedBlindedBlock),
headers: {
"Eth-Consensus-Version": config.getForkName(slot),
},
@@ -166,11 +147,9 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions ({
signed_block: blockSerializer(data.signedBlock).toJson(data.signedBlock),
- signed_blob_sidecars: ssz.deneb.SignedBlobSidecars.toJson(data.signedBlobSidecars),
+ kzg_proofs: ssz.deneb.KZGProofs.toJson(data.kzgProofs),
+ blobs: ssz.deneb.Blobs.toJson(data.blobs),
}),
- fromJson: (data: {signed_block: unknown; signed_blob_sidecars: unknown}) => ({
+ fromJson: (data: {signed_block: unknown; kzg_proofs: unknown; blobs: unknown}) => ({
signedBlock: blockSerializer(data.signed_block as allForks.SignedBeaconBlock).fromJson(data.signed_block),
- signedBlobSidecars: ssz.deneb.SignedBlobSidecars.fromJson(data.signed_blob_sidecars),
+ kzgProofs: ssz.deneb.KZGProofs.fromJson(data.kzg_proofs),
+ blobs: ssz.deneb.Blobs.fromJson(data.blobs),
}),
};
}
@@ -25,44 +27,13 @@ export function allForksBlockContentsResSerializer(fork: ForkBlobs): TypeJson ({
block: (ssz.allForks[fork].BeaconBlock as allForks.AllForksSSZTypes["BeaconBlock"]).toJson(data.block),
- blob_sidecars: ssz.deneb.BlobSidecars.toJson(data.blobSidecars),
+ kzg_proofs: ssz.deneb.KZGProofs.toJson(data.kzgProofs),
+ blobs: ssz.deneb.Blobs.toJson(data.blobs),
}),
- fromJson: (data: {block: unknown; blob_sidecars: unknown}) => ({
+ fromJson: (data: {block: unknown; blob_sidecars: unknown; kzg_proofs: unknown; blobs: unknown}) => ({
block: ssz.allForks[fork].BeaconBlock.fromJson(data.block),
- blobSidecars: ssz.deneb.BlobSidecars.fromJson(data.blob_sidecars),
- }),
- };
-}
-
-export function allForksSignedBlindedBlockContentsReqSerializer(
- blockSerializer: (data: allForks.SignedBlindedBeaconBlock) => TypeJson
-): TypeJson {
- return {
- toJson: (data) => ({
- signed_blinded_block: blockSerializer(data.signedBlindedBlock).toJson(data.signedBlindedBlock),
- signed_blinded_blob_sidecars: ssz.deneb.SignedBlindedBlobSidecars.toJson(data.signedBlindedBlobSidecars),
- }),
-
- fromJson: (data: {signed_blinded_block: unknown; signed_blinded_blob_sidecars: unknown}) => ({
- signedBlindedBlock: blockSerializer(data.signed_blinded_block as allForks.SignedBlindedBeaconBlock).fromJson(
- data.signed_blinded_block
- ),
- signedBlindedBlobSidecars: ssz.deneb.SignedBlindedBlobSidecars.fromJson(data.signed_blinded_blob_sidecars),
- }),
- };
-}
-
-export function allForksBlindedBlockContentsResSerializer(fork: ForkBlobs): TypeJson {
- return {
- toJson: (data) => ({
- blinded_block: (ssz.allForksBlinded[fork].BeaconBlock as allForks.AllForksBlindedSSZTypes["BeaconBlock"]).toJson(
- data.blindedBlock
- ),
- blinded_blob_sidecars: ssz.deneb.BlindedBlobSidecars.toJson(data.blindedBlobSidecars),
- }),
- fromJson: (data: {blinded_block: unknown; blinded_blob_sidecars: unknown}) => ({
- blindedBlock: ssz.allForksBlinded[fork].BeaconBlock.fromJson(data.blinded_block),
- blindedBlobSidecars: ssz.deneb.BlindedBlobSidecars.fromJson(data.blinded_blob_sidecars),
+ kzgProofs: ssz.deneb.KZGProofs.fromJson(data.kzg_proofs),
+ blobs: ssz.deneb.Blobs.fromJson(data.blobs),
}),
};
}
diff --git a/packages/api/src/utils/client/httpClient.ts b/packages/api/src/utils/client/httpClient.ts
index f40b93345a73..d0b3e3849439 100644
--- a/packages/api/src/utils/client/httpClient.ts
+++ b/packages/api/src/utils/client/httpClient.ts
@@ -52,8 +52,7 @@ export class HttpClient implements IHttpClient {
private readonly urlsScore: number[];
get baseUrl(): string {
- // Don't leak username/password to caller
- return new URL(this.urlsInits[0].baseUrl).origin;
+ return this.urlsInits[0].baseUrl;
}
constructor(opts: HttpClientOptions, {logger, metrics}: HttpClientModules = {}) {
diff --git a/packages/api/src/utils/client/metrics.ts b/packages/api/src/utils/client/metrics.ts
index c8bc3c0637a4..65089e92e7ec 100644
--- a/packages/api/src/utils/client/metrics.ts
+++ b/packages/api/src/utils/client/metrics.ts
@@ -1,49 +1,9 @@
+import {Gauge, GaugeExtra, Histogram} from "@lodestar/utils";
+
export type Metrics = {
- requestTime: Histogram<"routeId">;
- streamTime: Histogram<"routeId">;
- requestErrors: Gauge<"routeId">;
- requestToFallbacks: Gauge<"routeId">;
- urlsScore: Gauge<"urlIndex">;
+ requestTime: Histogram<{routeId: string}>;
+ streamTime: Histogram<{routeId: string}>;
+ requestErrors: Gauge<{routeId: string}>;
+ requestToFallbacks: Gauge<{routeId: string}>;
+ urlsScore: GaugeExtra<{urlIndex: number}>;
};
-
-type LabelValues = Partial>;
-type CollectFn = (metric: Gauge) => void;
-
-export interface Gauge {
- /**
- * Increment gauge for given labels
- * @param labels Object with label keys and values
- * @param value The value to increment with
- */
- inc(labels: LabelValues, value?: number): void;
-
- /**
- * Increment gauge
- * @param value The value to increment with
- */
- inc(value?: number): void;
-
- /**
- * Set gauge value for labels
- * @param labels Object with label keys and values
- * @param value The value to set
- */
- set(labels: LabelValues, value: number): void;
-
- /**
- * Set gauge value
- * @param value The value to set
- */
- set(value: number): void;
-
- addCollect(collectFn: CollectFn): void;
-}
-
-export interface Histogram {
- /**
- * Start a timer where the value in seconds will observed
- * @param labels Object with label keys and values
- * @return Function to invoke when timer should be stopped
- */
- startTimer(labels?: LabelValues): (labels?: LabelValues) => number;
-}
diff --git a/packages/api/test/globalSetup.ts b/packages/api/test/globalSetup.ts
new file mode 100644
index 000000000000..0ab57c057472
--- /dev/null
+++ b/packages/api/test/globalSetup.ts
@@ -0,0 +1,2 @@
+export async function setup(): Promise {}
+export async function teardown(): Promise {}
diff --git a/packages/api/test/setup.ts b/packages/api/test/setup.ts
deleted file mode 100644
index b83e6cb78511..000000000000
--- a/packages/api/test/setup.ts
+++ /dev/null
@@ -1,6 +0,0 @@
-import chai from "chai";
-import chaiAsPromised from "chai-as-promised";
-import sinonChai from "sinon-chai";
-
-chai.use(chaiAsPromised);
-chai.use(sinonChai);
diff --git a/packages/api/test/unit/beacon/genericServerTest/beacon.test.ts b/packages/api/test/unit/beacon/genericServerTest/beacon.test.ts
index ae4428b9fc8e..7972e4bfca65 100644
--- a/packages/api/test/unit/beacon/genericServerTest/beacon.test.ts
+++ b/packages/api/test/unit/beacon/genericServerTest/beacon.test.ts
@@ -1,3 +1,4 @@
+import {describe} from "vitest";
import {createChainForkConfig, defaultChainConfig} from "@lodestar/config";
import {Api, ReqTypes} from "../../../../src/beacon/routes/beacon/index.js";
import {getClient} from "../../../../src/beacon/client/beacon.js";
diff --git a/packages/api/test/unit/beacon/genericServerTest/config.test.ts b/packages/api/test/unit/beacon/genericServerTest/config.test.ts
index da791aa2c334..e11e4cbff6cb 100644
--- a/packages/api/test/unit/beacon/genericServerTest/config.test.ts
+++ b/packages/api/test/unit/beacon/genericServerTest/config.test.ts
@@ -1,4 +1,4 @@
-import {expect} from "chai";
+import {describe, it, expect} from "vitest";
import {config} from "@lodestar/config/default";
import {Api, ReqTypes, getReturnTypes} from "../../../../src/beacon/routes/config.js";
import {getClient} from "../../../../src/beacon/client/config.js";
@@ -27,6 +27,6 @@ describe("beacon / config", () => {
const jsonRes = returnTypes.getSpec.toJson({data: partialJsonSpec});
const specRes = returnTypes.getSpec.fromJson(jsonRes);
- expect(specRes).to.deep.equal({data: partialJsonSpec}, "Wrong toJson -> fromJson");
+ expect(specRes).toEqual({data: partialJsonSpec});
});
});
diff --git a/packages/api/test/unit/beacon/genericServerTest/debug.test.ts b/packages/api/test/unit/beacon/genericServerTest/debug.test.ts
index 44b080e29bf4..6f7889677ec6 100644
--- a/packages/api/test/unit/beacon/genericServerTest/debug.test.ts
+++ b/packages/api/test/unit/beacon/genericServerTest/debug.test.ts
@@ -1,4 +1,4 @@
-import {expect} from "chai";
+import {describe, it, expect, MockInstance} from "vitest";
import {toHexString} from "@chainsafe/ssz";
import {ssz} from "@lodestar/types";
import {config} from "@lodestar/config/default";
@@ -11,40 +11,42 @@ import {registerRoute} from "../../../../src/utils/server/registerRoute.js";
import {HttpClient} from "../../../../src/utils/client/httpClient.js";
import {testData} from "../testData/debug.js";
-describe("beacon / debug", function () {
+describe(
+ "beacon / debug",
+ function () {
+ describe("Run generic server test", () => {
+ runGenericServerTest(config, getClient, getRoutes, testData);
+ });
+
+ // Get state by SSZ
+
+ describe("getState() in SSZ format", () => {
+ const {baseUrl, server} = getTestServer();
+ const mockApi = getMockApi(routesData);
+ for (const route of Object.values(getRoutes(config, mockApi))) {
+ registerRoute(server, route);
+ }
+
+ for (const method of ["getState" as const, "getStateV2" as const]) {
+ it(method, async () => {
+ const state = ssz.phase0.BeaconState.defaultValue();
+ const stateSerialized = ssz.phase0.BeaconState.serialize(state);
+ (mockApi[method] as MockInstance).mockResolvedValue(stateSerialized);
+
+ const httpClient = new HttpClient({baseUrl});
+ const client = getClient(config, httpClient);
+
+ const res = await client[method]("head", "ssz");
+
+ expect(res.ok).toBe(true);
+
+ if (res.ok) {
+ expect(toHexString(res.response)).toBe(toHexString(stateSerialized));
+ }
+ });
+ }
+ });
+ },
// Extend timeout since states are very big
- this.timeout(30 * 1000);
-
- describe("Run generic server test", () => {
- runGenericServerTest(config, getClient, getRoutes, testData);
- });
-
- // Get state by SSZ
-
- describe("getState() in SSZ format", () => {
- const {baseUrl, server} = getTestServer();
- const mockApi = getMockApi(routesData);
- for (const route of Object.values(getRoutes(config, mockApi))) {
- registerRoute(server, route);
- }
-
- for (const method of ["getState" as const, "getStateV2" as const]) {
- it(method, async () => {
- const state = ssz.phase0.BeaconState.defaultValue();
- const stateSerialized = ssz.phase0.BeaconState.serialize(state);
- mockApi[method].resolves(stateSerialized);
-
- const httpClient = new HttpClient({baseUrl});
- const client = getClient(config, httpClient);
-
- const res = await client[method]("head", "ssz");
-
- expect(res.ok).to.be.true;
-
- if (res.ok) {
- expect(toHexString(res.response)).to.equal(toHexString(stateSerialized), "returned state value is not equal");
- }
- });
- }
- });
-});
+ {timeout: 30 * 1000}
+);
diff --git a/packages/api/test/unit/beacon/genericServerTest/events.test.ts b/packages/api/test/unit/beacon/genericServerTest/events.test.ts
index deaf0da9c1b9..48ff8ad3d157 100644
--- a/packages/api/test/unit/beacon/genericServerTest/events.test.ts
+++ b/packages/api/test/unit/beacon/genericServerTest/events.test.ts
@@ -1,4 +1,4 @@
-import {expect} from "chai";
+import {describe, it, expect, beforeEach, afterEach} from "vitest";
import {sleep} from "@lodestar/utils";
import {config} from "@lodestar/config/default";
import {Api, routesData, EventType, BeaconEvent} from "../../../../src/beacon/routes/events.js";
@@ -16,7 +16,9 @@ describe("beacon / events", () => {
}
let controller: AbortController;
- beforeEach(() => (controller = new AbortController()));
+ beforeEach(() => {
+ controller = new AbortController();
+ });
afterEach(() => controller.abort());
it("Receive events", async () => {
@@ -38,9 +40,9 @@ describe("beacon / events", () => {
const eventsReceived: BeaconEvent[] = [];
await new Promise((resolve, reject) => {
- mockApi.eventstream.callsFake(async (topics, signal, onEvent) => {
+ mockApi.eventstream.mockImplementation(async (topics, signal, onEvent) => {
try {
- expect(topics).to.deep.equal(topicsToRequest, "Wrong received topics");
+ expect(topics).toEqual(topicsToRequest);
for (const event of eventsToSend) {
onEvent(event);
await sleep(5);
@@ -58,6 +60,6 @@ describe("beacon / events", () => {
});
});
- expect(eventsReceived).to.deep.equal(eventsToSend, "Wrong received events");
+ expect(eventsReceived).toEqual(eventsToSend);
});
});
diff --git a/packages/api/test/unit/beacon/genericServerTest/lightclient.test.ts b/packages/api/test/unit/beacon/genericServerTest/lightclient.test.ts
index 888236dd32f7..10031a150490 100644
--- a/packages/api/test/unit/beacon/genericServerTest/lightclient.test.ts
+++ b/packages/api/test/unit/beacon/genericServerTest/lightclient.test.ts
@@ -1,3 +1,4 @@
+import {describe} from "vitest";
import {config} from "@lodestar/config/default";
import {Api, ReqTypes} from "../../../../src/beacon/routes/lightclient.js";
import {getClient} from "../../../../src/beacon/client/lightclient.js";
diff --git a/packages/api/test/unit/beacon/genericServerTest/node.test.ts b/packages/api/test/unit/beacon/genericServerTest/node.test.ts
index cf87e78da4c1..059bd4ca2c88 100644
--- a/packages/api/test/unit/beacon/genericServerTest/node.test.ts
+++ b/packages/api/test/unit/beacon/genericServerTest/node.test.ts
@@ -1,3 +1,4 @@
+import {describe} from "vitest";
import {config} from "@lodestar/config/default";
import {Api, ReqTypes} from "../../../../src/beacon/routes/node.js";
import {getClient} from "../../../../src/beacon/client/node.js";
diff --git a/packages/api/test/unit/beacon/genericServerTest/proofs.test.ts b/packages/api/test/unit/beacon/genericServerTest/proofs.test.ts
index 5b3a8dea5b91..4619d20d989f 100644
--- a/packages/api/test/unit/beacon/genericServerTest/proofs.test.ts
+++ b/packages/api/test/unit/beacon/genericServerTest/proofs.test.ts
@@ -1,3 +1,4 @@
+import {describe} from "vitest";
import {config} from "@lodestar/config/default";
import {Api, ReqTypes} from "../../../../src/beacon/routes/proof.js";
import {getClient} from "../../../../src/beacon/client/proof.js";
diff --git a/packages/api/test/unit/beacon/genericServerTest/validator.test.ts b/packages/api/test/unit/beacon/genericServerTest/validator.test.ts
index 399747a82d54..5a87ea9eee5f 100644
--- a/packages/api/test/unit/beacon/genericServerTest/validator.test.ts
+++ b/packages/api/test/unit/beacon/genericServerTest/validator.test.ts
@@ -1,3 +1,4 @@
+import {describe} from "vitest";
import {config} from "@lodestar/config/default";
import {Api, ReqTypes} from "../../../../src/beacon/routes/validator.js";
import {getClient} from "../../../../src/beacon/client/validator.js";
diff --git a/packages/api/test/unit/beacon/oapiSpec.test.ts b/packages/api/test/unit/beacon/oapiSpec.test.ts
index 5bfacce6a683..1a300eba6f36 100644
--- a/packages/api/test/unit/beacon/oapiSpec.test.ts
+++ b/packages/api/test/unit/beacon/oapiSpec.test.ts
@@ -1,12 +1,12 @@
import path from "node:path";
import {fileURLToPath} from "node:url";
-import {expect} from "chai";
+import {describe, it, beforeAll, expect} from "vitest";
import {createChainForkConfig, defaultChainConfig} from "@lodestar/config";
import {OpenApiFile} from "../../utils/parseOpenApiSpec.js";
import {routes} from "../../../src/beacon/index.js";
import {ReqSerializers} from "../../../src/utils/types.js";
import {Schema} from "../../../src/utils/schema.js";
-import {runTestCheckAgainstSpec} from "../../utils/checkAgainstSpec.js";
+import {IgnoredProperty, runTestCheckAgainstSpec} from "../../utils/checkAgainstSpec.js";
import {fetchOpenApiSpec} from "../../utils/fetchOpenApiSpec.js";
// Import all testData and merge below
import {testData as beaconTestData} from "./testData/beacon.js";
@@ -23,7 +23,7 @@ import {testData as validatorTestData} from "./testData/validator.js";
// eslint-disable-next-line @typescript-eslint/naming-convention
const __dirname = path.dirname(fileURLToPath(import.meta.url));
-const version = "v2.3.0";
+const version = "v2.4.2";
const openApiFile: OpenApiFile = {
url: `https://github.com/ethereum/beacon-APIs/releases/download/${version}/beacon-node-oapi.json`,
filepath: path.join(__dirname, "../../../oapi-schemas/beacon-node-oapi.json"),
@@ -84,11 +84,105 @@ const testDatas = {
...validatorTestData,
};
+const ignoredOperations = [
+ /* missing route */
+ /* https://github.com/ChainSafe/lodestar/issues/5694 */
+ "getSyncCommitteeRewards",
+ "getBlockRewards",
+ "getAttestationsRewards",
+ "getDepositSnapshot", // Won't fix for now, see https://github.com/ChainSafe/lodestar/issues/5697
+ "getBlindedBlock", // https://github.com/ChainSafe/lodestar/issues/5699
+ "getNextWithdrawals", // https://github.com/ChainSafe/lodestar/issues/5696
+ "getDebugForkChoice", // https://github.com/ChainSafe/lodestar/issues/5700
+ /* https://github.com/ChainSafe/lodestar/issues/6080 */
+ "getLightClientBootstrap",
+ "getLightClientUpdatesByRange",
+ "getLightClientFinalityUpdate",
+ "getLightClientOptimisticUpdate",
+ "getPoolBLSToExecutionChanges",
+ "submitPoolBLSToExecutionChange",
+];
+
+const ignoredProperties: Record = {
+ /*
+ https://github.com/ChainSafe/lodestar/issues/5693
+ missing finalized
+ */
+ getStateRoot: {response: ["finalized"]},
+ getStateFork: {response: ["finalized"]},
+ getStateFinalityCheckpoints: {response: ["finalized"]},
+ getStateValidators: {response: ["finalized"]},
+ getStateValidator: {response: ["finalized"]},
+ getStateValidatorBalances: {response: ["finalized"]},
+ getEpochCommittees: {response: ["finalized"]},
+ getEpochSyncCommittees: {response: ["finalized"]},
+ getStateRandao: {response: ["finalized"]},
+ getBlockHeaders: {response: ["finalized"]},
+ getBlockHeader: {response: ["finalized"]},
+ getBlockV2: {response: ["finalized"]},
+ getBlockRoot: {response: ["finalized"]},
+ getBlockAttestations: {response: ["finalized"]},
+ getStateV2: {response: ["finalized"]},
+
+ /*
+ https://github.com/ChainSafe/lodestar/issues/6168
+ /query/syncing_status - must be integer
+ */
+ getHealth: {request: ["query.syncing_status"]},
+
+ /**
+ * https://github.com/ChainSafe/lodestar/issues/6185
+ * - must have required property 'query'
+ */
+ getBlobSidecars: {request: ["query"]},
+
+ /*
+ https://github.com/ChainSafe/lodestar/issues/4638
+ /query - must have required property 'skip_randao_verification'
+ */
+ produceBlockV2: {request: ["query.skip_randao_verification"]},
+ produceBlindedBlock: {request: ["query.skip_randao_verification"]},
+};
+
const openApiJson = await fetchOpenApiSpec(openApiFile);
-runTestCheckAgainstSpec(openApiJson, routesData, reqSerializers, returnTypes, testDatas, {
- // TODO: Investigate why schema validation fails otherwise
- routesDropOneOf: ["produceBlockV2", "produceBlindedBlock", "publishBlindedBlock"],
-});
+runTestCheckAgainstSpec(
+ openApiJson,
+ routesData,
+ reqSerializers,
+ returnTypes,
+ testDatas,
+ {
+ // TODO: Investigate why schema validation fails otherwise (see https://github.com/ChainSafe/lodestar/issues/6187)
+ routesDropOneOf: [
+ "produceBlockV2",
+ "produceBlockV3",
+ "produceBlindedBlock",
+ "publishBlindedBlock",
+ "publishBlindedBlockV2",
+ ],
+ },
+ ignoredOperations,
+ ignoredProperties
+);
+
+const ignoredTopics = [
+ /*
+ https://github.com/ChainSafe/lodestar/issues/6167
+ eventTestData[bls_to_execution_change] does not match spec's example
+ */
+ "bls_to_execution_change",
+ /*
+ https://github.com/ChainSafe/lodestar/issues/6170
+ Error: Invalid slot=0 fork=phase0 for lightclient fork types
+ */
+ "light_client_finality_update",
+ "light_client_optimistic_update",
+ /*
+ https://github.com/ethereum/beacon-APIs/pull/379
+ SyntaxError: Unexpected non-whitespace character after JSON at position 629 (line 1 column 630)
+ */
+ "payload_attributes",
+];
// eventstream types are defined as comments in the description of "examples".
// The function runTestCheckAgainstSpec() can't handle those, so the custom code before:
@@ -104,7 +198,7 @@ describe("eventstream event data", () => {
const eventstreamExamples =
openApiJson.paths["/eth/v1/events"]["get"].responses["200"].content?.["text/event-stream"].examples;
- before("Check eventstreamExamples exists", () => {
+ beforeAll(() => {
if (!eventstreamExamples) {
throw Error(`eventstreamExamples not defined: ${eventstreamExamples}`);
}
@@ -113,7 +207,9 @@ describe("eventstream event data", () => {
const eventSerdes = routes.events.getEventSerdes(config);
const knownTopics = new Set(Object.values(routes.events.eventTypes));
- for (const [topic, {value}] of Object.entries(eventstreamExamples ?? {})) {
+ for (const [topic, {value}] of Object.entries(eventstreamExamples ?? {}).filter(
+ ([topic]) => !ignoredTopics.includes(topic)
+ )) {
it(topic, () => {
if (!knownTopics.has(topic)) {
throw Error(`topic ${topic} not implemented`);
@@ -130,13 +226,12 @@ describe("eventstream event data", () => {
if (testEvent == null) {
throw Error(`No eventTestData for ${topic}`);
}
-
const testEventJson = eventSerdes.toJson({
type: topic as routes.events.EventType,
message: testEvent,
} as routes.events.BeaconEvent);
- expect(testEventJson).deep.equals(exampleDataJson, `eventTestData[${topic}] does not match spec's example`);
+ expect(testEventJson).toEqual(exampleDataJson);
});
}
});
diff --git a/packages/api/test/unit/beacon/testData/beacon.ts b/packages/api/test/unit/beacon/testData/beacon.ts
index 54b2537648cb..7fa8368c590b 100644
--- a/packages/api/test/unit/beacon/testData/beacon.ts
+++ b/packages/api/test/unit/beacon/testData/beacon.ts
@@ -9,8 +9,8 @@ import {
} from "../../../../src/beacon/routes/beacon/index.js";
import {GenericServerTestCases} from "../../../utils/genericServerTest.js";
-const root = Buffer.alloc(32, 1);
-const randao = Buffer.alloc(32, 1);
+const root = new Uint8Array(32).fill(1);
+const randao = new Uint8Array(32).fill(1);
const balance = 32e9;
const pubkeyHex = toHexString(Buffer.alloc(48, 1));
diff --git a/packages/api/test/unit/beacon/testData/config.ts b/packages/api/test/unit/beacon/testData/config.ts
index 463d584880a3..642ed5e7e224 100644
--- a/packages/api/test/unit/beacon/testData/config.ts
+++ b/packages/api/test/unit/beacon/testData/config.ts
@@ -15,7 +15,7 @@ export const testData: GenericServerTestCases = {
res: {
data: {
chainId: 1,
- address: Buffer.alloc(20, 1),
+ address: new Uint8Array(20).fill(1),
},
},
},
diff --git a/packages/api/test/unit/beacon/testData/events.ts b/packages/api/test/unit/beacon/testData/events.ts
index 92e413037bcf..af33f4a2b011 100644
--- a/packages/api/test/unit/beacon/testData/events.ts
+++ b/packages/api/test/unit/beacon/testData/events.ts
@@ -4,7 +4,7 @@ import {Api, EventData, EventType, blobSidecarSSE} from "../../../../src/beacon/
import {GenericServerTestCases} from "../../../utils/genericServerTest.js";
const abortController = new AbortController();
-const root = Buffer.alloc(32, 0);
+const root = new Uint8Array(32);
/* eslint-disable @typescript-eslint/no-empty-function, @typescript-eslint/naming-convention */
diff --git a/packages/api/test/unit/beacon/testData/lightclient.ts b/packages/api/test/unit/beacon/testData/lightclient.ts
index 553f11d685d1..13e08e365987 100644
--- a/packages/api/test/unit/beacon/testData/lightclient.ts
+++ b/packages/api/test/unit/beacon/testData/lightclient.ts
@@ -46,6 +46,6 @@ export const testData: GenericServerTestCases = {
},
getCommitteeRoot: {
args: [1, 2],
- res: {data: [Buffer.alloc(32, 0), Buffer.alloc(32, 1)]},
+ res: {data: [Uint8Array.from(Buffer.alloc(32, 0)), Uint8Array.from(Buffer.alloc(32, 1))]},
},
};
diff --git a/packages/api/test/unit/beacon/testData/validator.ts b/packages/api/test/unit/beacon/testData/validator.ts
index da245646f8d5..c10f67fa4095 100644
--- a/packages/api/test/unit/beacon/testData/validator.ts
+++ b/packages/api/test/unit/beacon/testData/validator.ts
@@ -1,12 +1,12 @@
import {ForkName} from "@lodestar/params";
-import {ssz} from "@lodestar/types";
+import {ssz, ProducedBlockSource} from "@lodestar/types";
import {Api} from "../../../../src/beacon/routes/validator.js";
import {GenericServerTestCases} from "../../../utils/genericServerTest.js";
-const ZERO_HASH = Buffer.alloc(32, 0);
-const ZERO_HASH_HEX = "0x" + ZERO_HASH.toString("hex");
-const randaoReveal = Buffer.alloc(96, 1);
-const selectionProof = Buffer.alloc(96, 1);
+const ZERO_HASH = new Uint8Array(32);
+const ZERO_HASH_HEX = "0x" + Buffer.from(ZERO_HASH).toString("hex");
+const randaoReveal = new Uint8Array(96).fill(1);
+const selectionProof = new Uint8Array(96).fill(1);
const graffiti = "a".repeat(32);
const feeRecipient = "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb";
@@ -17,7 +17,7 @@ export const testData: GenericServerTestCases = {
executionOptimistic: true,
data: [
{
- pubkey: Buffer.alloc(48, 1),
+ pubkey: new Uint8Array(48).fill(1),
validatorIndex: 2,
committeeIndex: 3,
committeeLength: 4,
@@ -33,7 +33,7 @@ export const testData: GenericServerTestCases = {
args: [1000],
res: {
executionOptimistic: true,
- data: [{slot: 1, validatorIndex: 2, pubkey: Buffer.alloc(48, 3)}],
+ data: [{slot: 1, validatorIndex: 2, pubkey: new Uint8Array(48).fill(3)}],
dependentRoot: ZERO_HASH_HEX,
},
},
@@ -41,19 +41,32 @@ export const testData: GenericServerTestCases = {
args: [1000, [1, 2, 3]],
res: {
executionOptimistic: true,
- data: [{pubkey: Buffer.alloc(48, 1), validatorIndex: 2, validatorSyncCommitteeIndices: [3]}],
+ data: [{pubkey: Uint8Array.from(Buffer.alloc(48, 1)), validatorIndex: 2, validatorSyncCommitteeIndices: [3]}],
},
},
produceBlock: {
- args: [32000, randaoReveal, graffiti],
+ args: [
+ 32000,
+ randaoReveal,
+ graffiti,
+ undefined,
+ {feeRecipient: undefined, builderSelection: undefined, strictFeeRecipientCheck: undefined},
+ ] as unknown as GenericServerTestCases["produceBlock"]["args"],
res: {data: ssz.phase0.BeaconBlock.defaultValue()},
},
produceBlockV2: {
- args: [32000, randaoReveal, graffiti],
+ args: [
+ 32000,
+ randaoReveal,
+ graffiti,
+ undefined,
+ {feeRecipient: undefined, builderSelection: undefined, strictFeeRecipientCheck: undefined},
+ ] as unknown as GenericServerTestCases["produceBlockV2"]["args"],
res: {
data: ssz.altair.BeaconBlock.defaultValue(),
version: ForkName.altair,
executionPayloadValue: ssz.Wei.defaultValue(),
+ consensusBlockValue: ssz.Gwei.defaultValue(),
},
},
produceBlockV3: {
@@ -68,15 +81,24 @@ export const testData: GenericServerTestCases = {
data: ssz.altair.BeaconBlock.defaultValue(),
version: ForkName.altair,
executionPayloadValue: ssz.Wei.defaultValue(),
+ consensusBlockValue: ssz.Gwei.defaultValue(),
executionPayloadBlinded: false,
+ executionPayloadSource: ProducedBlockSource.engine,
},
},
produceBlindedBlock: {
- args: [32000, randaoReveal, graffiti],
+ args: [
+ 32000,
+ randaoReveal,
+ graffiti,
+ undefined,
+ {feeRecipient: undefined, builderSelection: undefined, strictFeeRecipientCheck: undefined},
+ ] as unknown as GenericServerTestCases["produceBlindedBlock"]["args"],
res: {
data: ssz.bellatrix.BlindedBeaconBlock.defaultValue(),
version: ForkName.bellatrix,
executionPayloadValue: ssz.Wei.defaultValue(),
+ consensusBlockValue: ssz.Gwei.defaultValue(),
},
},
produceAttestationData: {
diff --git a/packages/api/test/unit/builder/builder.test.ts b/packages/api/test/unit/builder/builder.test.ts
index 8a4766e64f00..56b8eee45ea5 100644
--- a/packages/api/test/unit/builder/builder.test.ts
+++ b/packages/api/test/unit/builder/builder.test.ts
@@ -1,3 +1,4 @@
+import {describe} from "vitest";
import {createChainForkConfig, defaultChainConfig} from "@lodestar/config";
import {Api, ReqTypes} from "../../../src/builder/routes.js";
import {getClient} from "../../../src/builder/client.js";
diff --git a/packages/api/test/unit/builder/testData.ts b/packages/api/test/unit/builder/testData.ts
index 94ef3c393b20..e198e6971905 100644
--- a/packages/api/test/unit/builder/testData.ts
+++ b/packages/api/test/unit/builder/testData.ts
@@ -7,7 +7,7 @@ import {GenericServerTestCases} from "../../utils/genericServerTest.js";
// randomly pregenerated pubkey
const pubkeyRand = "0x84105a985058fc8740a48bf1ede9d223ef09e8c6b1735ba0a55cf4a9ff2ff92376b778798365e488dab07a652eb04576";
-const root = Buffer.alloc(32, 1);
+const root = new Uint8Array(32).fill(1);
export const testData: GenericServerTestCases = {
status: {
diff --git a/packages/api/test/unit/client/fetch.test.ts b/packages/api/test/unit/client/fetch.test.ts
index e0f87e1c57e2..80e5f58b164a 100644
--- a/packages/api/test/unit/client/fetch.test.ts
+++ b/packages/api/test/unit/client/fetch.test.ts
@@ -1,6 +1,6 @@
import crypto from "node:crypto";
import http from "node:http";
-import {expect} from "chai";
+import {describe, it, expect, afterEach} from "vitest";
import {FetchError, FetchErrorType, fetch} from "../../../src/utils/client/fetch.js";
describe("FetchError", function () {
@@ -116,12 +116,16 @@ describe("FetchError", function () {
);
}
- await expect(fetch(url, {signal: signalHandler?.()})).to.be.rejected.then((error: FetchError) => {
- expect(error.type).to.be.equal(testCase.errorType);
- expect(error.code).to.be.equal(testCase.errorCode);
+ await expect(fetch(url, {signal: signalHandler?.()})).rejects.toSatisfy((err) => {
+ expect(err).toBeInstanceOf(FetchError);
+ expect((err as FetchError).code).toBe(testCase.errorCode);
+ expect((err as FetchError).type).toBe(testCase.errorType);
+
if (testCase.expectCause) {
- expect(error.cause).to.be.instanceof(Error);
+ expect((err as FetchError).cause).toBeInstanceOf(Error);
}
+
+ return true;
});
});
}
diff --git a/packages/api/test/unit/client/format.test.ts b/packages/api/test/unit/client/format.test.ts
index 0e388c3cb825..2ab73c9295c5 100644
--- a/packages/api/test/unit/client/format.test.ts
+++ b/packages/api/test/unit/client/format.test.ts
@@ -1,9 +1,9 @@
-import {expect} from "chai";
+import {describe, expect, it} from "vitest";
import {EventType} from "../../../src/beacon/routes/events.js";
import {stringifyQuery} from "../../../src/utils/client/format.js";
describe("client / utils / format", () => {
it("Should repeat topic query", () => {
- expect(stringifyQuery({topics: [EventType.finalizedCheckpoint]})).to.equal("topics=finalized_checkpoint");
+ expect(stringifyQuery({topics: [EventType.finalizedCheckpoint]})).toBe("topics=finalized_checkpoint");
});
});
diff --git a/packages/api/test/unit/client/httpClient.test.ts b/packages/api/test/unit/client/httpClient.test.ts
index 85dd1106b996..b22727d6a22b 100644
--- a/packages/api/test/unit/client/httpClient.test.ts
+++ b/packages/api/test/unit/client/httpClient.test.ts
@@ -1,5 +1,5 @@
import {IncomingMessage} from "node:http";
-import {expect} from "chai";
+import {describe, it, afterEach, expect} from "vitest";
import fastify, {RouteOptions} from "fastify";
import {ErrorAborted, TimeoutError, toBase64} from "@lodestar/utils";
import {HttpClient, HttpError} from "../../../src/utils/client/index.js";
@@ -52,8 +52,8 @@ describe("httpClient json client", () => {
const {body: resBody, status} = await httpClient.json({url, method: "GET"});
- expect(status).to.equal(HttpStatusCode.OK);
- expect(resBody).to.deep.equal({test: 1}, "Wrong res body");
+ expect(status).toBe(HttpStatusCode.OK);
+ expect(resBody).toEqual({test: 1});
});
it("should handle successful POST request correctly", async () => {
@@ -76,10 +76,10 @@ describe("httpClient json client", () => {
const {body: resBodyReceived, status} = await httpClient.json({url, method: "POST", query, body});
- expect(status).to.equal(HttpStatusCode.OK);
- expect(resBodyReceived).to.deep.equal(resBody, "Wrong resBody");
- expect(queryReceived).to.deep.equal(query, "Wrong query");
- expect(bodyReceived).to.deep.equal(body, "Wrong body");
+ expect(status).toBe(HttpStatusCode.OK);
+ expect(resBodyReceived).toEqual(resBody);
+ expect(queryReceived).toEqual(query);
+ expect(bodyReceived).toEqual(body);
});
it("should handle http status code 404 correctly", async () => {
@@ -94,8 +94,8 @@ describe("httpClient json client", () => {
return Promise.reject(Error("did not throw")); // So it doesn't gets catch {}
} catch (e) {
if (!(e instanceof HttpError)) throw Error(`Not an HttpError: ${(e as Error).message}`);
- expect(e.message).to.equal("Not Found: Route GET:/test-route not found", "Wrong error message");
- expect(e.status).to.equal(404, "Wrong error status code");
+ expect(e.message).toBe("Not Found: Route GET:/test-route not found");
+ expect(e.status).toBe(404);
}
});
@@ -112,8 +112,8 @@ describe("httpClient json client", () => {
return Promise.reject(Error("did not throw"));
} catch (e) {
if (!(e instanceof HttpError)) throw Error(`Not an HttpError: ${(e as Error).message}`);
- expect(e.message).to.equal("Internal Server Error: Test error");
- expect(e.status).to.equal(500, "Wrong error status code");
+ expect(e.message).toBe("Internal Server Error: Test error");
+ expect(e.status).toBe(500);
}
});
@@ -130,8 +130,8 @@ describe("httpClient json client", () => {
return Promise.reject(Error("did not throw"));
} catch (e) {
if (!(e instanceof HttpError)) throw Error(`Not an HttpError: ${(e as Error).message}`);
- expect(e.message).to.equal("Service Unavailable: Node is syncing");
- expect(e.status).to.equal(503, "Wrong error status code");
+ expect(e.message).toBe("Service Unavailable: Node is syncing");
+ expect(e.status).toBe(503);
}
});
@@ -139,7 +139,7 @@ describe("httpClient json client", () => {
const {baseUrl} = await getServer({
...testRoute,
handler: async (req) => {
- expect(req.headers.authorization).to.equal("Basic dXNlcjpwYXNzd29yZA==");
+ expect(req.headers.authorization).toBe("Basic dXNlcjpwYXNzd29yZA==");
return {};
},
});
@@ -159,13 +159,13 @@ describe("httpClient json client", () => {
let {baseUrl} = await getServer({
...testRoute,
handler: async (req) => {
- expect(req.headers.authorization).to.equal(`Basic ${toBase64(`${username}:${password}`)}`);
+ expect(req.headers.authorization).toBe(`Basic ${toBase64(`${username}:${password}`)}`);
return {};
},
});
// Since `new URL()` is what URI-encodes, we have to do string manipulation to set the username/password
// First validate the assumption that the URL starts with http://
- expect(baseUrl.indexOf("http://")).to.equal(0);
+ expect(baseUrl.indexOf("http://")).toBe(0);
// We avoid using baseUrl.replace() because it treats $ as a special character
baseUrl = `http://${username}:${password}@${baseUrl.substring("http://".length)}`;
@@ -174,16 +174,6 @@ describe("httpClient json client", () => {
await httpClient.json(testRoute);
});
- it("should not leak user credentials in baseUrl getter", () => {
- const url = new URL("http://localhost");
- url.username = "user";
- url.password = "password";
- const httpClient = new HttpClient({baseUrl: url.toString()});
-
- expect(httpClient.baseUrl.includes(url.username)).to.be.false;
- expect(httpClient.baseUrl.includes(url.password)).to.be.false;
- });
-
it("should handle aborting request with timeout", async () => {
const {baseUrl} = await getServer({
...testRoute,
diff --git a/packages/api/test/unit/client/httpClientFallback.test.ts b/packages/api/test/unit/client/httpClientFallback.test.ts
index 2c0846d00148..ff02095b1cc6 100644
--- a/packages/api/test/unit/client/httpClientFallback.test.ts
+++ b/packages/api/test/unit/client/httpClientFallback.test.ts
@@ -1,5 +1,4 @@
-import Sinon from "sinon";
-import {expect} from "chai";
+import {describe, it, beforeEach, afterEach, expect, vi} from "vitest";
import {HttpClient} from "../../../src/utils/client/index.js";
describe("httpClient fallback", () => {
@@ -8,7 +7,7 @@ describe("httpClient fallback", () => {
// Using fetchSub instead of actually setting up servers because there are some strange
// race conditions, where the server stub doesn't count the call in time before the test is over.
- const fetchStub = Sinon.stub<[URL], ReturnType>();
+ const fetchStub = vi.fn();
let httpClient: HttpClient;
@@ -37,7 +36,7 @@ describe("httpClient fallback", () => {
fetch: fetchStub as typeof fetch,
});
- fetchStub.callsFake(async (url) => {
+ fetchStub.mockImplementation(async (url) => {
// Simulate network delay
await new Promise((r) => setTimeout(r, 10));
const i = getServerIndex(url);
@@ -50,7 +49,6 @@ describe("httpClient fallback", () => {
});
afterEach(() => {
- fetchStub.reset();
serverErrors.clear();
});
@@ -58,13 +56,13 @@ describe("httpClient fallback", () => {
function assertServerCallCount(step: number, expectedCallCounts: number[]): void {
const callCounts: number[] = [];
for (let i = 0; i < serverCount; i++) callCounts[i] = 0;
- for (const call of fetchStub.getCalls()) {
- callCounts[getServerIndex(call.args[0])]++;
+ for (const call of fetchStub.mock.calls) {
+ callCounts[getServerIndex(call)]++;
}
- expect(callCounts.join(",")).equals(expectedCallCounts.join(","), `step ${step} - callCounts`);
+ expect(callCounts.join(",")).toBe(expectedCallCounts.join(","));
- fetchStub.resetHistory();
+ fetchStub.mockClear();
// eslint-disable-next-line no-console
if (DEBUG_LOGS) console.log("completed assertions step", step);
@@ -114,7 +112,7 @@ describe("httpClient fallback", () => {
serverErrors.set(0, true);
serverErrors.set(1, true);
serverErrors.set(2, true);
- await expect(requestTestRoute()).rejectedWith("test_error_server_2");
+ await expect(requestTestRoute()).rejects.toThrow("test_error_server_2");
assertServerCallCount(0, [1, 1, 1]);
});
});
diff --git a/packages/api/test/unit/client/httpClientOptions.test.ts b/packages/api/test/unit/client/httpClientOptions.test.ts
index 0409a41f5aa9..af0968777219 100644
--- a/packages/api/test/unit/client/httpClientOptions.test.ts
+++ b/packages/api/test/unit/client/httpClientOptions.test.ts
@@ -1,4 +1,4 @@
-import {expect} from "chai";
+import {describe, it, expect} from "vitest";
import {HttpClient} from "../../../src/index.js";
describe("HTTPClient options", () => {
@@ -10,7 +10,7 @@ describe("HTTPClient options", () => {
it("Single root baseUrl option", () => {
const httpClient = new HttpClient({baseUrl: baseUrl1, bearerToken: bearerToken1});
- expect(httpClient["urlsOpts"]).deep.equals([{baseUrl: baseUrl1, bearerToken: bearerToken1}]);
+ expect(httpClient["urlsOpts"]).toEqual([{baseUrl: baseUrl1, bearerToken: bearerToken1}]);
});
it("Multiple urls option with common bearerToken", () => {
@@ -19,7 +19,7 @@ describe("HTTPClient options", () => {
bearerToken: bearerToken1,
});
- expect(httpClient["urlsOpts"]).deep.equals([
+ expect(httpClient["urlsOpts"]).toEqual([
{baseUrl: baseUrl1, bearerToken: bearerToken1},
{baseUrl: baseUrl2, bearerToken: bearerToken1},
]);
@@ -33,7 +33,7 @@ describe("HTTPClient options", () => {
],
});
- expect(httpClient["urlsOpts"]).deep.equals([
+ expect(httpClient["urlsOpts"]).toEqual([
{baseUrl: baseUrl1, bearerToken: bearerToken1},
{baseUrl: baseUrl2, bearerToken: bearerToken2},
]);
@@ -46,7 +46,7 @@ describe("HTTPClient options", () => {
urls: [{baseUrl: baseUrl2, bearerToken: bearerToken2}],
});
- expect(httpClient["urlsOpts"]).deep.equals([
+ expect(httpClient["urlsOpts"]).toEqual([
{baseUrl: baseUrl1, bearerToken: bearerToken1},
{baseUrl: baseUrl2, bearerToken: bearerToken2},
]);
@@ -62,29 +62,29 @@ describe("HTTPClient options", () => {
{baseUrl: baseUrl2, bearerToken: bearerToken2},
],
});
- expect(httpClient["urlsOpts"]).deep.equals([
+ expect(httpClient["urlsOpts"]).toEqual([
{baseUrl: baseUrl1, bearerToken: bearerToken1},
{baseUrl: baseUrl2, bearerToken: bearerToken2},
]);
});
it("Throw if empty baseUrl", () => {
- expect(() => new HttpClient({baseUrl: ""})).to.throw(Error);
+ expect(() => new HttpClient({baseUrl: ""})).toThrow(Error);
});
it("Throw if invalid baseUrl", () => {
- expect(() => new HttpClient({baseUrl: "invalid"})).to.throw(Error);
+ expect(() => new HttpClient({baseUrl: "invalid"})).toThrow(Error);
});
it("Throw if empty value in urls option", () => {
- expect(() => new HttpClient({urls: [""]})).to.throw(Error);
+ expect(() => new HttpClient({urls: [""]})).toThrow(Error);
});
it("Throw if invalid value in urls option", () => {
- expect(() => new HttpClient({urls: ["invalid"]})).to.throw(Error);
+ expect(() => new HttpClient({urls: ["invalid"]})).toThrow(Error);
});
it("Throw if invalid username/password", () => {
- expect(() => new HttpClient({baseUrl: "http://hasa%:%can'tbedecoded@localhost"})).to.throw(Error);
+ expect(() => new HttpClient({baseUrl: "http://hasa%:%can'tbedecoded@localhost"})).toThrow(Error);
});
});
diff --git a/packages/api/test/unit/client/urlFormat.test.ts b/packages/api/test/unit/client/urlFormat.test.ts
index 851742ac1ed5..5b8e1f294976 100644
--- a/packages/api/test/unit/client/urlFormat.test.ts
+++ b/packages/api/test/unit/client/urlFormat.test.ts
@@ -1,4 +1,4 @@
-import {expect} from "chai";
+import {describe, it, expect} from "vitest";
import {
compileRouteUrlFormater,
toColonNotationPath,
@@ -55,14 +55,14 @@ describe("utils / urlFormat", () => {
for (const {urlTemplate, colonNotation, tokens, cases} of testCases) {
it(urlTemplate, () => {
- expect(urlToTokens(urlTemplate)).deep.equal(tokens, "Wrong tokens");
+ expect(urlToTokens(urlTemplate)).toEqual(tokens);
- expect(toColonNotationPath(urlTemplate)).equal(colonNotation, "Wrong colonNotation");
+ expect(toColonNotationPath(urlTemplate)).toBe(colonNotation);
const utlFormater = compileRouteUrlFormater(urlTemplate);
- for (const [i, {args, url}] of cases.entries()) {
- expect(utlFormater(args)).to.equal(url, `wrong case ${i}`);
+ for (const [_, {args, url}] of cases.entries()) {
+ expect(utlFormater(args)).toBe(url);
}
});
}
diff --git a/packages/api/test/unit/keymanager/keymanager.test.ts b/packages/api/test/unit/keymanager/keymanager.test.ts
index f00e6e754a51..1adf5b1e44da 100644
--- a/packages/api/test/unit/keymanager/keymanager.test.ts
+++ b/packages/api/test/unit/keymanager/keymanager.test.ts
@@ -1,3 +1,4 @@
+import {describe} from "vitest";
import {config} from "@lodestar/config/default";
import {Api, ReqTypes} from "../../../src/keymanager/routes.js";
import {getClient} from "../../../src/keymanager/client.js";
diff --git a/packages/api/test/unit/utils/acceptHeader.test.ts b/packages/api/test/unit/utils/acceptHeader.test.ts
index b92d39799743..67b77864eafa 100644
--- a/packages/api/test/unit/utils/acceptHeader.test.ts
+++ b/packages/api/test/unit/utils/acceptHeader.test.ts
@@ -1,4 +1,4 @@
-import {expect} from "chai";
+import {describe, it, expect} from "vitest";
import {parseAcceptHeader} from "../../../src/utils/acceptHeader.js";
import {ResponseFormat} from "../../../src/interfaces.js";
@@ -30,10 +30,8 @@ describe("utils / acceptHeader", () => {
{header: "application/json;q=1,application/octet-stream;q=1", expected: "ssz"},
];
- for (const testCase of testCases) {
- it(`should correctly parse the header ${testCase.header}`, () => {
- expect(parseAcceptHeader(testCase.header)).to.equal(testCase.expected);
- });
- }
+ it.each(testCases)("should correctly parse the header $header", ({header, expected}) => {
+ expect(parseAcceptHeader(header)).toBe(expected);
+ });
});
});
diff --git a/packages/api/test/unit/utils/serdes.test.ts b/packages/api/test/unit/utils/serdes.test.ts
index c390e3e6b6da..5b55ef66805e 100644
--- a/packages/api/test/unit/utils/serdes.test.ts
+++ b/packages/api/test/unit/utils/serdes.test.ts
@@ -1,70 +1,68 @@
-import {expect} from "chai";
+import {describe, it, expect} from "vitest";
import {fromGraffitiHex, toGraffitiHex} from "../../../src/utils/serdes.js";
describe("utils / serdes", () => {
describe("toGraffitiHex", () => {
it("should convert a UTF-8 graffiti to hex", () => {
- expect(toGraffitiHex("a".repeat(32))).to.equal(
- "0x6161616161616161616161616161616161616161616161616161616161616161"
- );
+ expect(toGraffitiHex("a".repeat(32))).toBe("0x6161616161616161616161616161616161616161616161616161616161616161");
});
it("should convert a graffiti with Unicode symbols to hex", () => {
- expect(toGraffitiHex("🦇🔊".repeat(4))).to.equal(
+ expect(toGraffitiHex("🦇🔊".repeat(4))).toBe(
"0xf09fa687f09f948af09fa687f09f948af09fa687f09f948af09fa687f09f948a"
);
});
it("should trim the hex graffiti if it is too long", () => {
- expect(toGraffitiHex("a".repeat(50))).to.equal(toGraffitiHex("a".repeat(32)));
+ expect(toGraffitiHex("a".repeat(50))).toBe(toGraffitiHex("a".repeat(32)));
});
it("should trim the hex graffiti if the last character is a Unicode symbol", () => {
- expect(toGraffitiHex("a".repeat(31) + "🐼")).to.equal(
+ expect(toGraffitiHex("a".repeat(31) + "🐼")).toBe(
"0x61616161616161616161616161616161616161616161616161616161616161f0"
);
});
it("should right-pad the hex graffiti with zeros if it is too short", () => {
- expect(toGraffitiHex("a")).to.equal("0x6100000000000000000000000000000000000000000000000000000000000000");
- expect(toGraffitiHex("ab")).to.equal("0x6162000000000000000000000000000000000000000000000000000000000000");
- expect(toGraffitiHex("abc")).to.equal("0x6162630000000000000000000000000000000000000000000000000000000000");
+ expect(toGraffitiHex("a")).toBe("0x6100000000000000000000000000000000000000000000000000000000000000");
+ expect(toGraffitiHex("ab")).toBe("0x6162000000000000000000000000000000000000000000000000000000000000");
+ expect(toGraffitiHex("abc")).toBe("0x6162630000000000000000000000000000000000000000000000000000000000");
});
});
describe("fromGraffitiHex", () => {
it("should convert a hex graffiti to UTF-8", () => {
- expect(fromGraffitiHex("0x6161616161616161616161616161616161616161616161616161616161616161")).to.equal(
+ expect(fromGraffitiHex("0x6161616161616161616161616161616161616161616161616161616161616161")).toBe(
"a".repeat(32)
);
});
it("should convert a hex graffiti with Unicode symbols to UTF-8", () => {
- expect(fromGraffitiHex("0xf09fa687f09f948af09fa687f09f948af09fa687f09f948af09fa687f09f948a")).to.equal(
+ expect(fromGraffitiHex("0xf09fa687f09f948af09fa687f09f948af09fa687f09f948af09fa687f09f948a")).toBe(
"🦇🔊".repeat(4)
);
});
it("should convert a padded hex graffiti to UTF-8", () => {
- expect(fromGraffitiHex("0x6100000000000000000000000000000000000000000000000000000000000000")).to.equal(
+ expect(fromGraffitiHex("0x6100000000000000000000000000000000000000000000000000000000000000")).toBe(
// null bytes will not be displayed/ignored later on
"a" + "\u0000".repeat(31)
);
});
it("should decode a hex graffiti with a cut off Unicode character at the end", () => {
- expect(fromGraffitiHex("0x61616161616161616161616161616161616161616161616161616161616161f0")).to.equal(
+ expect(fromGraffitiHex("0x61616161616161616161616161616161616161616161616161616161616161f0")).toBe(
// last character will be displayed as �
"a".repeat(31) + "\ufffd"
);
});
it("should not throw an error if an invalid hex graffiti is provided", () => {
- expect(() => fromGraffitiHex("a")).to.not.throw();
+ expect(() => fromGraffitiHex("a")).not.toThrow();
});
it("should return the provided graffiti string if decoding fails", () => {
- expect(fromGraffitiHex("a")).to.equal("a");
+ expect(fromGraffitiHex("a")).toBe("a");
});
});
});
diff --git a/packages/api/test/utils/checkAgainstSpec.ts b/packages/api/test/utils/checkAgainstSpec.ts
index eba274e16ef6..ed65279bca22 100644
--- a/packages/api/test/utils/checkAgainstSpec.ts
+++ b/packages/api/test/utils/checkAgainstSpec.ts
@@ -1,16 +1,16 @@
import Ajv, {ErrorObject} from "ajv";
-import {expect} from "chai";
+import {expect, describe, beforeAll, it} from "vitest";
import {ReqGeneric, ReqSerializer, ReturnTypes, RouteDef} from "../../src/utils/types.js";
-import {applyRecursively, OpenApiJson, parseOpenApiSpec, ParseOpenApiSpecOpts} from "./parseOpenApiSpec.js";
+import {applyRecursively, JsonSchema, OpenApiJson, parseOpenApiSpec, ParseOpenApiSpecOpts} from "./parseOpenApiSpec.js";
import {GenericServerTestCases} from "./genericServerTest.js";
const ajv = new Ajv({
- // strict: true,
- // strictSchema: true,
+ strict: true,
+ strictTypes: false, // TODO Enable once beacon-APIs is fixed. See https://github.com/ChainSafe/lodestar/issues/6206
allErrors: true,
});
-// TODO: Still necessary?
+// Ensure embedded schema 'example' do not fail validation
ajv.addKeyword({
keyword: "example",
validate: () => true,
@@ -19,24 +19,76 @@ ajv.addKeyword({
ajv.addFormat("hex", /^0x[a-fA-F0-9]+$/);
+/**
+ * A set of properties that will be ignored during tests execution.
+ * This allows for a black-list mechanism to have a test pass while some part of the spec is not yet implemented.
+ *
+ * Properties can be nested using dot notation, following JSONPath semantic.
+ *
+ * Example:
+ * - query
+ * - query.skip_randao_verification
+ */
+export type IgnoredProperty = {
+ /**
+ * Properties to ignore in the request schema
+ */
+ request?: string[];
+ /**
+ * Properties to ignore in the response schema
+ */
+ response?: string[];
+};
+
+/**
+ * Recursively remove a property from a schema
+ *
+ * @param schema Schema to remove a property from
+ * @param property JSONPath like property to remove from the schema
+ */
+function deleteNested(schema: JsonSchema | undefined, property: string): void {
+ const properties = schema?.properties;
+ if (property.includes(".")) {
+ // Extract first segment, keep the rest as dotted
+ const [key, ...rest] = property.split(".");
+ deleteNested(properties?.[key], rest.join("."));
+ } else {
+ // Remove property from 'required'
+ if (schema?.required) {
+ schema.required = schema.required?.filter((e) => property !== e);
+ }
+ // Remove property from 'properties'
+ delete properties?.[property];
+ }
+}
+
export function runTestCheckAgainstSpec(
openApiJson: OpenApiJson,
routesData: Record,
reqSerializers: Record>,
returnTypes: Record[string]>,
testDatas: Record[string]>,
- opts?: ParseOpenApiSpecOpts
+ opts?: ParseOpenApiSpecOpts,
+ ignoredOperations: string[] = [],
+ ignoredProperties: Record = {}
): void {
const openApiSpec = parseOpenApiSpec(openApiJson, opts);
for (const [operationId, routeSpec] of openApiSpec.entries()) {
+ const isIgnored = ignoredOperations.some((id) => id === operationId);
+ if (isIgnored) {
+ continue;
+ }
+
+ const ignoredProperty = ignoredProperties[operationId];
+
describe(operationId, () => {
const {requestSchema, responseOkSchema} = routeSpec;
const routeId = operationId;
const testData = testDatas[routeId];
const routeData = routesData[routeId];
- before("route is defined", () => {
+ beforeAll(() => {
if (routeData == null) {
throw Error(`No routeData for ${routeId}`);
}
@@ -68,7 +120,15 @@ export function runTestCheckAgainstSpec(
stringifyProperties((reqJson as ReqGeneric).params ?? {});
stringifyProperties((reqJson as ReqGeneric).query ?? {});
- // Validate response
+ const ignoredProperties = ignoredProperty?.request;
+ if (ignoredProperties) {
+ // Remove ignored properties from schema validation
+ for (const property of ignoredProperties) {
+ deleteNested(routeSpec.requestSchema, property);
+ }
+ }
+
+ // Validate request
validateSchema(routeSpec.requestSchema, reqJson, "request");
});
}
@@ -87,6 +147,13 @@ export function runTestCheckAgainstSpec(
}
}
+ const ignoredProperties = ignoredProperty?.response;
+ if (ignoredProperties) {
+ // Remove ignored properties from schema validation
+ for (const property of ignoredProperties) {
+ deleteNested(routeSpec.responseOkSchema, property);
+ }
+ }
// Validate response
validateSchema(responseOkSchema, resJson, "response");
});
diff --git a/packages/api/test/utils/genericServerTest.ts b/packages/api/test/utils/genericServerTest.ts
index d5e091bc25af..f0f805b7469a 100644
--- a/packages/api/test/utils/genericServerTest.ts
+++ b/packages/api/test/utils/genericServerTest.ts
@@ -1,4 +1,4 @@
-import {expect} from "chai";
+import {it, expect, MockInstance} from "vitest";
import {ChainForkConfig} from "@lodestar/config";
import {ReqGeneric, Resolves} from "../../src/utils/index.js";
import {FetchOpts, HttpClient, IHttpClient} from "../../src/utils/client/index.js";
@@ -44,30 +44,25 @@ export function runGenericServerTest<
it(routeId as string, async () => {
// Register mock data for this route
// TODO: Look for the type error
- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
- // @ts-expect-error
- mockApi[routeId].resolves(testCases[routeId].res);
+ (mockApi[routeId] as MockInstance).mockResolvedValue(testCases[routeId].res);
// Do the call
const res = await (client[routeId] as APIClientHandler)(...(testCase.args as any[]));
// Use spy to assert argument serialization
if (testCase.query) {
- expect(httpClient.opts?.query).to.deep.equal(testCase.query, "Wrong fetch opts.query");
+ expect(httpClient.opts?.query).toEqual(testCase.query);
}
// Assert server handler called with correct args
- expect(mockApi[routeId].callCount).to.equal(1, `mockApi[${routeId as string}] must be called once`);
+ expect(mockApi[routeId] as MockInstance).toHaveBeenCalledTimes(1);
// if mock api args are > testcase args, there may be some undefined extra args parsed towards the end
// to obtain a match, ignore the extra args
- expect(mockApi[routeId].getCall(0).args.slice(0, testCase.args.length)).to.deep.equal(
- testCase.args,
- `mockApi[${routeId as string}] wrong args`
- );
+ expect(mockApi[routeId] as MockInstance).toHaveBeenNthCalledWith(1, ...(testCase.args as any[]));
// Assert returned value is correct
- expect(res.response).to.deep.equal(testCase.res, "Wrong returned value");
+ expect(res.response).toEqual(testCase.res);
});
}
}
diff --git a/packages/api/test/utils/parseOpenApiSpec.ts b/packages/api/test/utils/parseOpenApiSpec.ts
index 5faf0082012d..84b024e5950e 100644
--- a/packages/api/test/utils/parseOpenApiSpec.ts
+++ b/packages/api/test/utils/parseOpenApiSpec.ts
@@ -11,7 +11,7 @@ type RouteUrl = string;
/** "get" | "post" */
type HttpMethod = string;
-type JsonSchema = {
+export type JsonSchema = {
type: "object";
properties?: Record;
required?: string[];
diff --git a/packages/api/test/utils/utils.ts b/packages/api/test/utils/utils.ts
index 793f8b2c61ef..8faa2c90d187 100644
--- a/packages/api/test/utils/utils.ts
+++ b/packages/api/test/utils/utils.ts
@@ -1,6 +1,6 @@
+import {beforeAll, afterAll, MockedObject, vi} from "vitest";
import qs from "qs";
import fastify, {FastifyInstance} from "fastify";
-import Sinon from "sinon";
import {mapValues} from "@lodestar/utils";
import {ServerApi} from "../../src/interfaces.js";
@@ -19,7 +19,7 @@ export function getTestServer(): {baseUrl: string; server: FastifyInstance} {
done();
});
- before("start server", async () => {
+ beforeAll(async () => {
await new Promise((resolve, reject) => {
server.listen({port}, function (err, address) {
if (err !== null && err != undefined) {
@@ -31,7 +31,7 @@ export function getTestServer(): {baseUrl: string; server: FastifyInstance} {
});
});
- after("stop server", async () => {
+ afterAll(async () => {
await server.close();
});
@@ -41,6 +41,6 @@ export function getTestServer(): {baseUrl: string; server: FastifyInstance} {
/** Type helper to get a Sinon mock object type with Api */
export function getMockApi>(
routeIds: Record
-): Sinon.SinonStubbedInstance> & ServerApi {
- return mapValues(routeIds, () => Sinon.stub()) as Sinon.SinonStubbedInstance> & ServerApi;
+): MockedObject> & ServerApi {
+ return mapValues(routeIds, () => vi.fn()) as MockedObject> & ServerApi;
}
diff --git a/packages/api/vitest.config.ts b/packages/api/vitest.config.ts
new file mode 100644
index 000000000000..9f325a6477e2
--- /dev/null
+++ b/packages/api/vitest.config.ts
@@ -0,0 +1,12 @@
+import {defineConfig, mergeConfig} from "vitest/config";
+import vitestConfig from "../../vitest.base.config";
+
+export default mergeConfig(
+ vitestConfig,
+ defineConfig({
+ test: {
+ globalSetup: ["./test/globalSetup.ts"],
+ restoreMocks: true,
+ },
+ })
+);
diff --git a/packages/beacon-node/package.json b/packages/beacon-node/package.json
index c8cff7cbf28c..396dac8c54eb 100644
--- a/packages/beacon-node/package.json
+++ b/packages/beacon-node/package.json
@@ -11,7 +11,7 @@
"bugs": {
"url": "https://github.com/ChainSafe/lodestar/issues"
},
- "version": "1.12.0",
+ "version": "1.13.0",
"type": "module",
"exports": {
".": {
@@ -80,7 +80,7 @@
"test:unit:minimal": "vitest --run --segfaultRetry 3 --dir test/unit/ --coverage",
"test:unit:mainnet": "LODESTAR_PRESET=mainnet nyc --cache-dir .nyc_output/.cache -e .ts mocha 'test/unit-mainnet/**/*.test.ts'",
"test:unit": "yarn test:unit:minimal && yarn test:unit:mainnet",
- "test:e2e": "LODESTAR_PRESET=minimal vitest --run --segfaultRetry 3 --single-thread --dir test/e2e",
+ "test:e2e": "LODESTAR_PRESET=minimal vitest --run --segfaultRetry 3 --poolOptions.threads.singleThread true --dir test/e2e",
"test:sim": "mocha 'test/sim/**/*.test.ts'",
"test:sim:merge-interop": "mocha 'test/sim/merge-interop.test.ts'",
"test:sim:mergemock": "mocha 'test/sim/mergemock.test.ts'",
@@ -100,7 +100,7 @@
"@chainsafe/bls": "7.1.1",
"@chainsafe/blst": "^0.2.9",
"@chainsafe/discv5": "^5.1.0",
- "@chainsafe/libp2p-gossipsub": "^10.1.0",
+ "@chainsafe/libp2p-gossipsub": "^10.1.1",
"@chainsafe/libp2p-noise": "^13.0.1",
"@chainsafe/persistent-merkle-tree": "^0.6.1",
"@chainsafe/prometheus-gc-stats": "^1.0.0",
@@ -119,18 +119,18 @@
"@libp2p/peer-id-factory": "^3.0.4",
"@libp2p/prometheus-metrics": "^2.0.7",
"@libp2p/tcp": "8.0.8",
- "@lodestar/api": "^1.12.0",
- "@lodestar/config": "^1.12.0",
- "@lodestar/db": "^1.12.0",
- "@lodestar/fork-choice": "^1.12.0",
- "@lodestar/light-client": "^1.12.0",
- "@lodestar/logger": "^1.12.0",
- "@lodestar/params": "^1.12.0",
- "@lodestar/reqresp": "^1.12.0",
- "@lodestar/state-transition": "^1.12.0",
- "@lodestar/types": "^1.12.0",
- "@lodestar/utils": "^1.12.0",
- "@lodestar/validator": "^1.12.0",
+ "@lodestar/api": "^1.13.0",
+ "@lodestar/config": "^1.13.0",
+ "@lodestar/db": "^1.13.0",
+ "@lodestar/fork-choice": "^1.13.0",
+ "@lodestar/light-client": "^1.13.0",
+ "@lodestar/logger": "^1.13.0",
+ "@lodestar/params": "^1.13.0",
+ "@lodestar/reqresp": "^1.13.0",
+ "@lodestar/state-transition": "^1.13.0",
+ "@lodestar/types": "^1.13.0",
+ "@lodestar/utils": "^1.13.0",
+ "@lodestar/validator": "^1.13.0",
"@multiformats/multiaddr": "^12.1.3",
"@types/datastore-level": "^3.0.0",
"buffer-xor": "^2.0.2",
@@ -145,7 +145,7 @@
"jwt-simple": "0.5.6",
"libp2p": "0.46.12",
"multiformats": "^11.0.1",
- "prom-client": "^14.2.0",
+ "prom-client": "^15.1.0",
"qs": "^6.11.1",
"snappyjs": "^0.7.0",
"strict-event-emitter-types": "^2.0.0",
diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts
index 3fe93345ed32..a4775d523959 100644
--- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts
+++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts
@@ -1,17 +1,13 @@
import {fromHexString, toHexString} from "@chainsafe/ssz";
import {ApplicationMethods, routes} from "@lodestar/api";
-import {
- computeTimeAtSlot,
- parseSignedBlindedBlockOrContents,
- reconstructFullBlockOrContents,
- DataAvailableStatus,
-} from "@lodestar/state-transition";
+import {computeTimeAtSlot, reconstructFullBlockOrContents} from "@lodestar/state-transition";
import {SLOTS_PER_HISTORICAL_ROOT} from "@lodestar/params";
import {sleep, toHex} from "@lodestar/utils";
import {allForks, deneb, isSignedBlockContents, ProducedBlockSource} from "@lodestar/types";
import {BlockSource, getBlockInput, ImportBlockOpts, BlockInput} from "../../../../chain/blocks/types.js";
import {promiseAllMaybeAsync} from "../../../../util/promises.js";
import {isOptimisticBlock} from "../../../../util/forkChoice.js";
+import {computeBlobSidecars} from "../../../../util/blobs.js";
import {BlockError, BlockErrorCode} from "../../../../chain/errors/index.js";
import {OpSource} from "../../../../metrics/validatorMonitor.js";
import {NetworkEvent} from "../../../../network/index.js";
@@ -49,22 +45,23 @@ export function getBeaconBlockApi({
opts: PublishBlockOpts = {}
) => {
const seenTimestampSec = Date.now() / 1000;
- let blockForImport: BlockInput, signedBlock: allForks.SignedBeaconBlock, signedBlobs: deneb.SignedBlobSidecars;
+ let blockForImport: BlockInput, signedBlock: allForks.SignedBeaconBlock, blobSidecars: deneb.BlobSidecars;
if (isSignedBlockContents(signedBlockOrContents)) {
- ({signedBlock, signedBlobSidecars: signedBlobs} = signedBlockOrContents);
+ ({signedBlock} = signedBlockOrContents);
+ blobSidecars = computeBlobSidecars(config, signedBlock, signedBlockOrContents);
blockForImport = getBlockInput.postDeneb(
config,
signedBlock,
BlockSource.api,
- signedBlobs.map((sblob) => sblob.message),
+ blobSidecars,
// don't bundle any bytes for block and blobs
null,
- signedBlobs.map(() => null)
+ blobSidecars.map(() => null)
);
} else {
signedBlock = signedBlockOrContents;
- signedBlobs = [];
+ blobSidecars = [];
// TODO: Once API supports submitting data as SSZ, replace null with blockBytes
blockForImport = getBlockInput.preDeneb(config, signedBlock, BlockSource.api, null);
}
@@ -77,9 +74,11 @@ export function getBeaconBlockApi({
const slot = signedBlock.message.slot;
const fork = config.getForkName(slot);
const blockRoot = toHex(chain.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(signedBlock.message));
+ // bodyRoot should be the same to produced block
+ const bodyRoot = toHex(chain.config.getForkTypes(slot).BeaconBlockBody.hashTreeRoot(signedBlock.message.body));
const blockLocallyProduced =
chain.producedBlockRoot.has(blockRoot) || chain.producedBlindedBlockRoot.has(blockRoot);
- const valLogMeta = {broadcastValidation, blockRoot, blockLocallyProduced, slot};
+ const valLogMeta = {broadcastValidation, blockRoot, bodyRoot, blockLocallyProduced, slot};
switch (broadcastValidation) {
case routes.beacon.BroadcastValidation.gossip: {
@@ -88,6 +87,11 @@ export function getBeaconBlockApi({
await validateGossipBlock(config, chain, signedBlock, fork);
} catch (error) {
chain.logger.error("Gossip validations failed while publishing the block", valLogMeta, error as Error);
+ chain.persistInvalidSszValue(
+ chain.config.getForkTypes(slot).SignedBeaconBlock,
+ signedBlock,
+ "api_reject_gossip_failure"
+ );
throw error;
}
}
@@ -105,6 +109,11 @@ export function getBeaconBlockApi({
blockInput: blockForImport,
peer: IDENTITY_PEER_ID,
});
+ chain.persistInvalidSszValue(
+ chain.config.getForkTypes(slot).SignedBeaconBlock,
+ signedBlock,
+ "api_reject_parent_unknown"
+ );
throw new BlockError(signedBlock, {
code: BlockErrorCode.PARENT_UNKNOWN,
parentRoot: toHexString(signedBlock.message.parentRoot),
@@ -112,20 +121,20 @@ export function getBeaconBlockApi({
}
try {
- await verifyBlocksInEpoch.call(
- chain as BeaconChain,
- parentBlock,
- [blockForImport],
- [DataAvailableStatus.available],
- {
- ...opts,
- verifyOnly: true,
- skipVerifyBlockSignatures: true,
- skipVerifyExecutionPayload: true,
- }
- );
+ await verifyBlocksInEpoch.call(chain as BeaconChain, parentBlock, [blockForImport], {
+ ...opts,
+ verifyOnly: true,
+ skipVerifyBlockSignatures: true,
+ skipVerifyExecutionPayload: true,
+ seenTimestampSec,
+ });
} catch (error) {
chain.logger.error("Consensus checks failed while publishing the block", valLogMeta, error as Error);
+ chain.persistInvalidSszValue(
+ chain.config.getForkTypes(slot).SignedBeaconBlock,
+ signedBlock,
+ "api_reject_consensus_failure"
+ );
throw error;
}
}
@@ -180,18 +189,15 @@ export function getBeaconBlockApi({
}
throw e;
}),
- ...signedBlobs.map((signedBlob) => () => network.publishBlobSidecar(signedBlob)),
+ ...blobSidecars.map((blobSidecar) => () => network.publishBlobSidecar(blobSidecar)),
];
await promiseAllMaybeAsync(publishPromises);
};
const publishBlindedBlock: ApplicationMethods["publishBlindedBlock"] = async (
- {signedBlindedBlockOrContents},
+ {signedBlindedBlock},
opts: PublishBlockOpts = {}
) => {
- const {signedBlindedBlock, signedBlindedBlobSidecars} =
- parseSignedBlindedBlockOrContents(signedBlindedBlockOrContents);
-
const slot = signedBlindedBlock.message.slot;
const blockRoot = toHex(
chain.config
@@ -202,28 +208,32 @@ export function getBeaconBlockApi({
// Either the payload/blobs are cached from i) engine locally or ii) they are from the builder
//
// executionPayload can be null or a real payload in locally produced so check for presence of root
- const source = chain.producedBlockRoot.has(blockRoot) ? ProducedBlockSource.engine : ProducedBlockSource.builder;
-
- const executionPayload = chain.producedBlockRoot.get(blockRoot) ?? null;
- const blobSidecars = executionPayload
- ? chain.producedBlobSidecarsCache.get(toHex(executionPayload.blockHash))
- : undefined;
- const blobs = blobSidecars ? blobSidecars.map((blobSidecar) => blobSidecar.blob) : null;
-
- chain.logger.debug("Assembling blinded block for publishing", {source, blockRoot, slot});
-
- const signedBlockOrContents =
- source === ProducedBlockSource.engine
- ? reconstructFullBlockOrContents({signedBlindedBlock, signedBlindedBlobSidecars}, {executionPayload, blobs})
- : await reconstructBuilderBlockOrContents(chain, signedBlindedBlockOrContents);
-
- // the full block is published by relay and it's possible that the block is already known to us
- // by gossip
- //
- // see: https://github.com/ChainSafe/lodestar/issues/5404
- chain.logger.info("Publishing assembled block", {blockRoot, slot, source});
- // TODO: opts are not type safe, add ServerOpts in Endpoint type definition?
- return publishBlock({signedBlockOrContents}, {...opts, ignoreIfKnown: true});
+ const executionPayload = chain.producedBlockRoot.get(blockRoot);
+ if (executionPayload !== undefined) {
+ const source = ProducedBlockSource.engine;
+ chain.logger.debug("Reconstructing signedBlockOrContents", {blockRoot, slot, source});
+
+ const contents = executionPayload
+ ? chain.producedContentsCache.get(toHex(executionPayload.blockHash)) ?? null
+ : null;
+ const signedBlockOrContents = reconstructFullBlockOrContents(signedBlindedBlock, {executionPayload, contents});
+
+ chain.logger.info("Publishing assembled block", {blockRoot, slot, source});
+ return publishBlock(signedBlockOrContents, opts);
+ } else {
+ const source = ProducedBlockSource.builder;
+ chain.logger.debug("Reconstructing signedBlockOrContents", {blockRoot, slot, source});
+
+ const signedBlockOrContents = await reconstructBuilderBlockOrContents(chain, signedBlindedBlock);
+
+ // the full block is published by relay and it's possible that the block is already known to us
+ // by gossip
+ //
+ // see: https://github.com/ChainSafe/lodestar/issues/5404
+ chain.logger.info("Publishing assembled block", {blockRoot, slot, source});
+ // TODO: opts are not type safe, add ServerOpts in Endpoint type definition?
+ return publishBlock({signedBlockOrContents}, {...opts, ignoreIfKnown: true});
+ }
};
return {
@@ -407,13 +417,13 @@ export function getBeaconBlockApi({
async function reconstructBuilderBlockOrContents(
chain: ApiModules["chain"],
- signedBlindedBlockOrContents: allForks.SignedBlindedBeaconBlockOrContents
+ signedBlindedBlock: allForks.SignedBlindedBeaconBlock
): Promise {
const executionBuilder = chain.executionBuilder;
if (!executionBuilder) {
throw Error("executionBuilder required to publish SignedBlindedBeaconBlock");
}
- const signedBlockOrContents = await executionBuilder.submitBlindedBlock(signedBlindedBlockOrContents);
+ const signedBlockOrContents = await executionBuilder.submitBlindedBlock(signedBlindedBlock);
return signedBlockOrContents;
}
diff --git a/packages/beacon-node/src/api/impl/validator/index.ts b/packages/beacon-node/src/api/impl/validator/index.ts
index d9c6906229b7..40276ebe2bfa 100644
--- a/packages/beacon-node/src/api/impl/validator/index.ts
+++ b/packages/beacon-node/src/api/impl/validator/index.ts
@@ -9,7 +9,6 @@ import {
computeEpochAtSlot,
getCurrentSlot,
beaconBlockToBlinded,
- blobSidecarsToBlinded,
} from "@lodestar/state-transition";
import {
GENESIS_SLOT,
@@ -31,11 +30,11 @@ import {
allForks,
BLSSignature,
isBlindedBeaconBlock,
- isBlindedBlockContents,
+ isBlockContents,
phase0,
} from "@lodestar/types";
import {ExecutionStatus} from "@lodestar/fork-choice";
-import {toHex, racePromisesWithCutoff, RaceEvent} from "@lodestar/utils";
+import {toHex, racePromisesWithCutoff, RaceEvent, gweiToWei} from "@lodestar/utils";
import {AttestationError, AttestationErrorCode, GossipAction, SyncCommitteeError} from "../../../chain/errors/index.js";
import {validateApiAggregateAndProof} from "../../../chain/validation/index.js";
import {ZERO_HASH} from "../../../constants/index.js";
@@ -280,7 +279,7 @@ export function getValidatorApi({
);
}
- const produceBlindedBlockOrContents = async function produceBlindedBlockOrContents(
+ const produceBuilderBlindedBlock = async function produceBuilderBlindedBlock(
slot: Slot,
randaoReveal: BLSSignature,
graffiti: string,
@@ -288,7 +287,12 @@ export function getValidatorApi({
{
skipHeadChecksAndUpdate,
}: Omit & {skipHeadChecksAndUpdate?: boolean} = {}
- ): Promise {
+ ): Promise {
+ const version = config.getForkName(slot);
+ if (!isForkExecution(version)) {
+ throw Error(`Invalid fork=${version} for produceBuilderBlindedBlock`);
+ }
+
const source = ProducedBlockSource.builder;
metrics?.blockProductionRequests.inc({source});
@@ -314,7 +318,7 @@ export function getValidatorApi({
let timer;
try {
timer = metrics?.blockProductionTime.startTimer();
- const {block, executionPayloadValue} = await chain.produceBlindedBlock({
+ const {block, executionPayloadValue, consensusBlockValue} = await chain.produceBlindedBlock({
slot,
randaoReveal,
graffiti: toGraffitiBuffer(graffiti || ""),
@@ -325,30 +329,21 @@ export function getValidatorApi({
logger.verbose("Produced blinded block", {
slot,
executionPayloadValue,
+ consensusBlockValue,
root: toHexString(config.getBlindedForkTypes(slot).BeaconBlock.hashTreeRoot(block)),
});
- const version = config.getForkName(block.slot);
- if (isForkBlobs(version)) {
- const blockHash = toHex((block as bellatrix.BlindedBeaconBlock).body.executionPayloadHeader.blockHash);
- const blindedBlobSidecars = chain.producedBlindedBlobSidecarsCache.get(blockHash);
- if (blindedBlobSidecars === undefined) {
- throw Error("blobSidecars missing in cache");
- }
- return {
- data: {blindedBlock: block, blindedBlobSidecars} as allForks.BlindedBlockContents,
- version,
- executionPayloadValue,
- };
- } else {
- return {data: block, version, executionPayloadValue};
+ if (chain.opts.persistProducedBlocks) {
+ void chain.persistBlock(block, "produced_builder_block");
}
+
+ return {data: block, version, executionPayloadValue, consensusBlockValue};
} finally {
if (timer) timer({source});
}
};
- const produceFullBlockOrContents = async function produceFullBlockOrContents(
+ const produceEngineFullBlockOrContents = async function produceEngineFullBlockOrContents(
slot: Slot,
randaoReveal: BLSSignature,
graffiti: string,
@@ -375,13 +370,12 @@ export function getValidatorApi({
let timer;
try {
timer = metrics?.blockProductionTime.startTimer();
- const {block, executionPayloadValue} = await chain.produceBlock({
+ const {block, executionPayloadValue, consensusBlockValue} = await chain.produceBlock({
slot,
randaoReveal,
graffiti: toGraffitiBuffer(graffiti || ""),
feeRecipient,
});
-
const version = config.getForkName(block.slot);
if (strictFeeRecipientCheck && feeRecipient && isForkExecution(version)) {
const blockFeeRecipient = toHexString((block as bellatrix.BeaconBlock).body.executionPayload.feeRecipient);
@@ -395,206 +389,238 @@ export function getValidatorApi({
logger.verbose("Produced execution block", {
slot,
executionPayloadValue,
+ consensusBlockValue,
root: toHexString(config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block)),
});
+ if (chain.opts.persistProducedBlocks) {
+ void chain.persistBlock(block, "produced_engine_block");
+ }
if (isForkBlobs(version)) {
const blockHash = toHex((block as bellatrix.BeaconBlock).body.executionPayload.blockHash);
- const blobSidecars = chain.producedBlobSidecarsCache.get(blockHash);
- if (blobSidecars === undefined) {
- throw Error("blobSidecars missing in cache");
+ const contents = chain.producedContentsCache.get(blockHash);
+ if (contents === undefined) {
+ throw Error("contents missing in cache");
}
- return {data: {block, blobSidecars} as allForks.BlockContents, version, executionPayloadValue};
+
+ return {
+ data: {block, ...contents} as allForks.BlockContents,
+ version,
+ executionPayloadValue,
+ consensusBlockValue,
+ };
} else {
- return {data: block, version, executionPayloadValue};
+ return {data: block, version, executionPayloadValue, consensusBlockValue};
}
} finally {
if (timer) timer({source});
}
};
- const produceBlockV3: ServerApi["produceBlockV3"] = async function produceBlockV3(
- slot,
- randaoReveal,
- graffiti,
- // TODO deneb: skip randao verification
- _skipRandaoVerification?: boolean,
- {feeRecipient, builderSelection, strictFeeRecipientCheck}: routes.validator.ExtraProduceBlockOps = {}
- ) {
- notWhileSyncing();
- await waitForSlot(slot); // Must never request for a future slot > currentSlot
-
- // Process the queued attestations in the forkchoice for correct head estimation
- // forkChoice.updateTime() might have already been called by the onSlot clock
- // handler, in which case this should just return.
- chain.forkChoice.updateTime(slot);
- chain.recomputeForkChoiceHead();
-
- const fork = config.getForkName(slot);
- // set some sensible opts
- builderSelection = builderSelection ?? routes.validator.BuilderSelection.MaxProfit;
- const isBuilderEnabled =
- ForkSeq[fork] >= ForkSeq.bellatrix &&
- chain.executionBuilder !== undefined &&
- builderSelection !== routes.validator.BuilderSelection.ExecutionOnly;
-
- logger.verbose("Assembling block with produceBlockV3 ", {
- fork,
- builderSelection,
+ const produceEngineOrBuilderBlock: ServerApi["produceBlockV3"] =
+ async function produceEngineOrBuilderBlock(
slot,
- isBuilderEnabled,
- strictFeeRecipientCheck,
- });
- // Start calls for building execution and builder blocks
- const blindedBlockPromise = isBuilderEnabled
- ? // can't do fee recipient checks as builder bid doesn't return feeRecipient as of now
- produceBlindedBlockOrContents(slot, randaoReveal, graffiti, {
- feeRecipient,
- // skip checking and recomputing head in these individual produce calls
- skipHeadChecksAndUpdate: true,
- }).catch((e) => {
- logger.error("produceBlindedBlockOrContents failed to produce block", {slot}, e);
- return null;
- })
- : null;
+ randaoReveal,
+ graffiti,
+ // TODO deneb: skip randao verification
+ _skipRandaoVerification?: boolean,
+ {feeRecipient, builderSelection, strictFeeRecipientCheck}: routes.validator.ExtraProduceBlockOps = {}
+ ) {
+ notWhileSyncing();
+ await waitForSlot(slot); // Must never request for a future slot > currentSlot
- const fullBlockPromise =
- // At any point either the builder or execution or both flows should be active.
- //
- // Ideally such a scenario should be prevented on startup, but proposerSettingsFile or keymanager
- // configurations could cause a validator pubkey to have builder disabled with builder selection builder only
- // (TODO: independently make sure such an options update is not successful for a validator pubkey)
- //
- // So if builder is disabled ignore builder selection of builderonly if caused by user mistake
- !isBuilderEnabled || builderSelection !== routes.validator.BuilderSelection.BuilderOnly
- ? // TODO deneb: builderSelection needs to be figured out if to be done beacon side
- // || builderSelection !== BuilderSelection.BuilderOnly
- produceFullBlockOrContents(slot, randaoReveal, graffiti, {
+ // Process the queued attestations in the forkchoice for correct head estimation
+ // forkChoice.updateTime() might have already been called by the onSlot clock
+ // handler, in which case this should just return.
+ chain.forkChoice.updateTime(slot);
+ chain.recomputeForkChoiceHead();
+
+ const fork = config.getForkName(slot);
+ // set some sensible opts
+ builderSelection = builderSelection ?? routes.validator.BuilderSelection.MaxProfit;
+ const isBuilderEnabled =
+ ForkSeq[fork] >= ForkSeq.bellatrix &&
+ chain.executionBuilder !== undefined &&
+ builderSelection !== routes.validator.BuilderSelection.ExecutionOnly;
+
+ logger.verbose("Assembling block with produceEngineOrBuilderBlock ", {
+ fork,
+ builderSelection,
+ slot,
+ isBuilderEnabled,
+ strictFeeRecipientCheck,
+ });
+ // Start calls for building execution and builder blocks
+ const blindedBlockPromise = isBuilderEnabled
+ ? // can't do fee recipient checks as builder bid doesn't return feeRecipient as of now
+ produceBuilderBlindedBlock(slot, randaoReveal, graffiti, {
feeRecipient,
- strictFeeRecipientCheck,
// skip checking and recomputing head in these individual produce calls
skipHeadChecksAndUpdate: true,
}).catch((e) => {
- logger.error("produceFullBlockOrContents failed to produce block", {slot}, e);
+ logger.error("produceBuilderBlindedBlock failed to produce block", {slot}, e);
return null;
})
: null;
- let blindedBlock, fullBlock;
- if (blindedBlockPromise !== null && fullBlockPromise !== null) {
- // reference index of promises in the race
- const promisesOrder = [ProducedBlockSource.builder, ProducedBlockSource.engine];
- [blindedBlock, fullBlock] = await racePromisesWithCutoff<
- routes.validator.ProduceBlockOrContentsRes | routes.validator.ProduceBlindedBlockOrContentsRes | null
- >(
- [blindedBlockPromise, fullBlockPromise],
- BLOCK_PRODUCTION_RACE_CUTOFF_MS,
- BLOCK_PRODUCTION_RACE_TIMEOUT_MS,
- // Callback to log the race events for better debugging capability
- (event: RaceEvent, delayMs: number, index?: number) => {
- const eventRef = index !== undefined ? {source: promisesOrder[index]} : {};
- logger.verbose("Block production race (builder vs execution)", {
- event,
- ...eventRef,
- delayMs,
- cutoffMs: BLOCK_PRODUCTION_RACE_CUTOFF_MS,
- timeoutMs: BLOCK_PRODUCTION_RACE_TIMEOUT_MS,
- slot,
- });
+ const fullBlockPromise =
+ // At any point either the builder or execution or both flows should be active.
+ //
+ // Ideally such a scenario should be prevented on startup, but proposerSettingsFile or keymanager
+ // configurations could cause a validator pubkey to have builder disabled with builder selection builder only
+ // (TODO: independently make sure such an options update is not successful for a validator pubkey)
+ //
+ // So if builder is disabled ignore builder selection of builderonly if caused by user mistake
+ !isBuilderEnabled || builderSelection !== routes.validator.BuilderSelection.BuilderOnly
+ ? // TODO deneb: builderSelection needs to be figured out if to be done beacon side
+ // || builderSelection !== BuilderSelection.BuilderOnly
+ produceEngineFullBlockOrContents(slot, randaoReveal, graffiti, {
+ feeRecipient,
+ strictFeeRecipientCheck,
+ // skip checking and recomputing head in these individual produce calls
+ skipHeadChecksAndUpdate: true,
+ }).catch((e) => {
+ logger.error("produceEngineFullBlockOrContents failed to produce block", {slot}, e);
+ return null;
+ })
+ : null;
+
+ let blindedBlock, fullBlock;
+ if (blindedBlockPromise !== null && fullBlockPromise !== null) {
+ // reference index of promises in the race
+ const promisesOrder = [ProducedBlockSource.builder, ProducedBlockSource.engine];
+ [blindedBlock, fullBlock] = await racePromisesWithCutoff<
+ routes.validator.ProduceBlockOrContentsRes | routes.validator.ProduceBlindedBlockRes | null
+ >(
+ [blindedBlockPromise, fullBlockPromise],
+ BLOCK_PRODUCTION_RACE_CUTOFF_MS,
+ BLOCK_PRODUCTION_RACE_TIMEOUT_MS,
+ // Callback to log the race events for better debugging capability
+ (event: RaceEvent, delayMs: number, index?: number) => {
+ const eventRef = index !== undefined ? {source: promisesOrder[index]} : {};
+ logger.verbose("Block production race (builder vs execution)", {
+ event,
+ ...eventRef,
+ delayMs,
+ cutoffMs: BLOCK_PRODUCTION_RACE_CUTOFF_MS,
+ timeoutMs: BLOCK_PRODUCTION_RACE_TIMEOUT_MS,
+ slot,
+ });
+ }
+ );
+ if (blindedBlock instanceof Error) {
+ // error here means race cutoff exceeded
+ logger.error("Failed to produce builder block", {slot}, blindedBlock);
+ blindedBlock = null;
}
- );
- if (blindedBlock instanceof Error) {
- // error here means race cutoff exceeded
- logger.error("Failed to produce builder block", {slot}, blindedBlock);
- blindedBlock = null;
- }
- if (fullBlock instanceof Error) {
- logger.error("Failed to produce execution block", {slot}, fullBlock);
+ if (fullBlock instanceof Error) {
+ logger.error("Failed to produce execution block", {slot}, fullBlock);
+ fullBlock = null;
+ }
+ } else if (blindedBlockPromise !== null && fullBlockPromise === null) {
+ blindedBlock = await blindedBlockPromise;
fullBlock = null;
+ } else if (blindedBlockPromise === null && fullBlockPromise !== null) {
+ blindedBlock = null;
+ fullBlock = await fullBlockPromise;
+ } else {
+ throw Error(
+ `Internal Error: Neither builder nor execution proposal flow activated isBuilderEnabled=${isBuilderEnabled} builderSelection=${builderSelection}`
+ );
}
- } else if (blindedBlockPromise !== null && fullBlockPromise === null) {
- blindedBlock = await blindedBlockPromise;
- fullBlock = null;
- } else if (blindedBlockPromise === null && fullBlockPromise !== null) {
- blindedBlock = null;
- fullBlock = await fullBlockPromise;
- } else {
- throw Error(
- `Internal Error: Neither builder nor execution proposal flow activated isBuilderEnabled=${isBuilderEnabled} builderSelection=${builderSelection}`
- );
- }
- const builderPayloadValue = blindedBlock?.executionPayloadValue ?? BigInt(0);
- const enginePayloadValue = fullBlock?.executionPayloadValue ?? BigInt(0);
+ const builderPayloadValue = blindedBlock?.executionPayloadValue ?? BigInt(0);
+ const enginePayloadValue = fullBlock?.executionPayloadValue ?? BigInt(0);
+ const consensusBlockValueBuilder = blindedBlock?.consensusBlockValue ?? BigInt(0);
+ const consensusBlockValueEngine = fullBlock?.consensusBlockValue ?? BigInt(0);
+
+ const blockValueBuilder = builderPayloadValue + gweiToWei(consensusBlockValueBuilder); // Total block value is in wei
+ const blockValueEngine = enginePayloadValue + gweiToWei(consensusBlockValueEngine); // Total block value is in wei
- let selectedSource: ProducedBlockSource | null = null;
+ let executionPayloadSource: ProducedBlockSource | null = null;
- if (fullBlock && blindedBlock) {
- switch (builderSelection) {
- case routes.validator.BuilderSelection.MaxProfit: {
- // If executionPayloadValues are zero, than choose builder as most likely beacon didn't provide executionPayloadValue
- // and builder blocks are most likely thresholded by a min bid
- if (enginePayloadValue >= builderPayloadValue && enginePayloadValue !== BigInt(0)) {
- selectedSource = ProducedBlockSource.engine;
- } else {
- selectedSource = ProducedBlockSource.builder;
+ if (fullBlock && blindedBlock) {
+ switch (builderSelection) {
+ case routes.validator.BuilderSelection.MaxProfit: {
+ if (blockValueEngine >= blockValueBuilder) {
+ executionPayloadSource = ProducedBlockSource.engine;
+ } else {
+ executionPayloadSource = ProducedBlockSource.builder;
+ }
+ break;
}
- break;
- }
- case routes.validator.BuilderSelection.ExecutionOnly: {
- selectedSource = ProducedBlockSource.engine;
- break;
- }
+ case routes.validator.BuilderSelection.ExecutionOnly: {
+ executionPayloadSource = ProducedBlockSource.engine;
+ break;
+ }
- // For everything else just select the builder
- default: {
- selectedSource = ProducedBlockSource.builder;
+ // For everything else just select the builder
+ default: {
+ executionPayloadSource = ProducedBlockSource.builder;
+ }
}
+ logger.verbose(`Selected executionPayloadSource=${executionPayloadSource} block`, {
+ builderSelection,
+ // winston logger doesn't like bigint
+ enginePayloadValue: `${enginePayloadValue}`,
+ builderPayloadValue: `${builderPayloadValue}`,
+ consensusBlockValueEngine: `${consensusBlockValueEngine}`,
+ consensusBlockValueBuilder: `${consensusBlockValueBuilder}`,
+ blockValueEngine: `${blockValueEngine}`,
+ blockValueBuilder: `${blockValueBuilder}`,
+ slot,
+ });
+ } else if (fullBlock && !blindedBlock) {
+ executionPayloadSource = ProducedBlockSource.engine;
+ logger.verbose("Selected engine block: no builder block produced", {
+ // winston logger doesn't like bigint
+ enginePayloadValue: `${enginePayloadValue}`,
+ consensusBlockValueEngine: `${consensusBlockValueEngine}`,
+ blockValueEngine: `${blockValueEngine}`,
+ slot,
+ });
+ } else if (blindedBlock && !fullBlock) {
+ executionPayloadSource = ProducedBlockSource.builder;
+ logger.verbose("Selected builder block: no engine block produced", {
+ // winston logger doesn't like bigint
+ builderPayloadValue: `${builderPayloadValue}`,
+ consensusBlockValueBuilder: `${consensusBlockValueBuilder}`,
+ blockValueBuilder: `${blockValueBuilder}`,
+ slot,
+ });
}
- logger.verbose(`Selected ${selectedSource} block`, {
- builderSelection,
- // winston logger doesn't like bigint
- enginePayloadValue: `${enginePayloadValue}`,
- builderPayloadValue: `${builderPayloadValue}`,
- slot,
- });
- } else if (fullBlock && !blindedBlock) {
- selectedSource = ProducedBlockSource.engine;
- logger.verbose("Selected engine block: no builder block produced", {
- // winston logger doesn't like bigint
- enginePayloadValue: `${enginePayloadValue}`,
- slot,
- });
- } else if (blindedBlock && !fullBlock) {
- selectedSource = ProducedBlockSource.builder;
- logger.verbose("Selected builder block: no engine block produced", {
- // winston logger doesn't like bigint
- builderPayloadValue: `${builderPayloadValue}`,
- slot,
- });
- }
- if (selectedSource === null) {
- throw Error(`Failed to produce engine or builder block for slot=${slot}`);
- }
+ if (executionPayloadSource === null) {
+ throw Error(`Failed to produce engine or builder block for slot=${slot}`);
+ }
- if (selectedSource === ProducedBlockSource.engine) {
- return {...fullBlock, executionPayloadBlinded: false} as routes.validator.ProduceBlockOrContentsRes & {
- executionPayloadBlinded: false;
- };
- } else {
- return {...blindedBlock, executionPayloadBlinded: true} as routes.validator.ProduceBlindedBlockOrContentsRes & {
- executionPayloadBlinded: true;
- };
- }
- };
+ if (executionPayloadSource === ProducedBlockSource.engine) {
+ return {
+ ...fullBlock,
+ executionPayloadBlinded: false,
+ executionPayloadSource,
+ } as routes.validator.ProduceBlockOrContentsRes & {
+ executionPayloadBlinded: false;
+ executionPayloadSource: ProducedBlockSource;
+ };
+ } else {
+ return {
+ ...blindedBlock,
+ executionPayloadBlinded: true,
+ executionPayloadSource,
+ } as routes.validator.ProduceBlindedBlockRes & {
+ executionPayloadBlinded: true;
+ executionPayloadSource: ProducedBlockSource;
+ };
+ }
+ };
const produceBlock: ServerApi["produceBlock"] = async function produceBlock(
slot,
randaoReveal,
graffiti
) {
- const producedData = await produceFullBlockOrContents(slot, randaoReveal, graffiti);
+ const producedData = await produceEngineFullBlockOrContents(slot, randaoReveal, graffiti);
if (isForkBlobs(producedData.version)) {
throw Error(`Invalid call to produceBlock for deneb+ fork=${producedData.version}`);
} else {
@@ -604,45 +630,85 @@ export function getValidatorApi({
}
};
- const produceBlindedBlock: ServerApi["produceBlindedBlock"] =
- async function produceBlindedBlock(slot, randaoReveal, graffiti) {
- const producedData = await produceBlockV3(slot, randaoReveal, graffiti);
- let blindedProducedData: routes.validator.ProduceBlindedBlockOrContentsRes;
-
- if (isForkBlobs(producedData.version)) {
- if (isBlindedBlockContents(producedData.data as allForks.FullOrBlindedBlockContents)) {
- blindedProducedData = producedData as routes.validator.ProduceBlindedBlockOrContentsRes;
- } else {
- //
- const {block, blobSidecars} = producedData.data as allForks.BlockContents;
- const blindedBlock = beaconBlockToBlinded(config, block as allForks.AllForksExecution["BeaconBlock"]);
- const blindedBlobSidecars = blobSidecarsToBlinded(blobSidecars);
+ const produceEngineOrBuilderBlindedBlock: ServerApi["produceBlindedBlock"] =
+ async function produceEngineOrBuilderBlindedBlock(slot, randaoReveal, graffiti) {
+ const {data, executionPayloadValue, consensusBlockValue, version} = await produceEngineOrBuilderBlock(
+ slot,
+ randaoReveal,
+ graffiti
+ );
+ if (!isForkExecution(version)) {
+ throw Error(`Invalid fork=${version} for produceEngineOrBuilderBlindedBlock`);
+ }
+ const executionPayloadBlinded = true;
+
+ if (isBlockContents(data)) {
+ const {block} = data;
+ const blindedBlock = beaconBlockToBlinded(config, block as allForks.AllForksExecution["BeaconBlock"]);
+ return {executionPayloadValue, consensusBlockValue, data: blindedBlock, executionPayloadBlinded, version};
+ } else if (isBlindedBeaconBlock(data)) {
+ return {executionPayloadValue, consensusBlockValue, data, executionPayloadBlinded, version};
+ } else {
+ const blindedBlock = beaconBlockToBlinded(config, data as allForks.AllForksExecution["BeaconBlock"]);
+ return {executionPayloadValue, consensusBlockValue, data: blindedBlock, executionPayloadBlinded, version};
+ }
+ };
- blindedProducedData = {
- ...producedData,
- data: {blindedBlock, blindedBlobSidecars},
- } as routes.validator.ProduceBlindedBlockOrContentsRes;
- }
+ const produceBlockV3: ServerApi["produceBlockV3"] = async function produceBlockV3(
+ slot,
+ randaoReveal,
+ graffiti,
+ skipRandaoVerification?: boolean,
+ opts: routes.validator.ExtraProduceBlockOps = {}
+ ) {
+ const produceBlockEngineOrBuilderRes = await produceEngineOrBuilderBlock(
+ slot,
+ randaoReveal,
+ graffiti,
+ skipRandaoVerification,
+ opts
+ );
+
+ if (opts.blindedLocal === true && ForkSeq[produceBlockEngineOrBuilderRes.version] >= ForkSeq.bellatrix) {
+ if (produceBlockEngineOrBuilderRes.executionPayloadBlinded) {
+ return produceBlockEngineOrBuilderRes;
} else {
- if (isBlindedBeaconBlock(producedData.data)) {
- blindedProducedData = producedData as routes.validator.ProduceBlindedBlockOrContentsRes;
- } else {
- const block = producedData.data;
+ if (isBlockContents(produceBlockEngineOrBuilderRes.data)) {
+ const {block} = produceBlockEngineOrBuilderRes.data;
const blindedBlock = beaconBlockToBlinded(config, block as allForks.AllForksExecution["BeaconBlock"]);
- blindedProducedData = {
- ...producedData,
+ return {
+ ...produceBlockEngineOrBuilderRes,
data: blindedBlock,
- } as routes.validator.ProduceBlindedBlockOrContentsRes;
+ executionPayloadBlinded: true,
+ } as routes.validator.ProduceBlindedBlockRes & {
+ executionPayloadBlinded: true;
+ executionPayloadSource: ProducedBlockSource;
+ };
+ } else {
+ const blindedBlock = beaconBlockToBlinded(
+ config,
+ produceBlockEngineOrBuilderRes.data as allForks.AllForksExecution["BeaconBlock"]
+ );
+ return {
+ ...produceBlockEngineOrBuilderRes,
+ data: blindedBlock,
+ executionPayloadBlinded: true,
+ } as routes.validator.ProduceBlindedBlockRes & {
+ executionPayloadBlinded: true;
+ executionPayloadSource: ProducedBlockSource;
+ };
}
}
- return blindedProducedData;
- };
+ } else {
+ return produceBlockEngineOrBuilderRes;
+ }
+ };
return {
produceBlock,
- produceBlockV2: produceFullBlockOrContents,
+ produceBlockV2: produceEngineFullBlockOrContents,
produceBlockV3,
- produceBlindedBlock,
+ produceBlindedBlock: produceEngineOrBuilderBlindedBlock,
async produceAttestationData(committeeIndex, slot) {
notWhileSyncing();
diff --git a/packages/beacon-node/src/api/rest/activeSockets.ts b/packages/beacon-node/src/api/rest/activeSockets.ts
index ba8a35c80119..9f1b0f1a78a3 100644
--- a/packages/beacon-node/src/api/rest/activeSockets.ts
+++ b/packages/beacon-node/src/api/rest/activeSockets.ts
@@ -1,12 +1,11 @@
import http, {Server} from "node:http";
import {Socket} from "node:net";
-import {waitFor} from "@lodestar/utils";
-import {IGauge} from "../../metrics/index.js";
+import {Gauge, GaugeExtra, waitFor} from "@lodestar/utils";
export type SocketMetrics = {
- activeSockets: IGauge;
- socketsBytesRead: IGauge;
- socketsBytesWritten: IGauge;
+ activeSockets: GaugeExtra;
+ socketsBytesRead: Gauge;
+ socketsBytesWritten: Gauge;
};
// Use relatively short timeout to speed up shutdown
diff --git a/packages/beacon-node/src/api/rest/base.ts b/packages/beacon-node/src/api/rest/base.ts
index b9c86ff5f372..8a1f59ef3c27 100644
--- a/packages/beacon-node/src/api/rest/base.ts
+++ b/packages/beacon-node/src/api/rest/base.ts
@@ -3,9 +3,8 @@ import fastify, {FastifyBodyParser, FastifyContentTypeParser, FastifyInstance, F
import fastifyCors from "@fastify/cors";
import bearerAuthPlugin from "@fastify/bearer-auth";
import {RouteConfig} from "@lodestar/api/beacon/server";
-import {ErrorAborted, Logger} from "@lodestar/utils";
+import {ErrorAborted, Gauge, Histogram, Logger} from "@lodestar/utils";
import {isLocalhostIP} from "../../util/ip.js";
-import {IGauge, IHistogram} from "../../metrics/index.js";
import {ApiError, NodeIsSyncing} from "../impl/errors.js";
import {HttpActiveSocketsTracker, SocketMetrics} from "./activeSockets.js";
@@ -25,9 +24,9 @@ export type RestApiServerModules = {
};
export type RestApiServerMetrics = SocketMetrics & {
- requests: IGauge<"operationId">;
- responseTime: IHistogram<"operationId">;
- errors: IGauge<"operationId">;
+ requests: Gauge<{operationId: string}>;
+ responseTime: Histogram<{operationId: string}>;
+ errors: Gauge<{operationId: string}>;
};
/**
@@ -98,6 +97,11 @@ export class RestApiServer {
metrics?.requests.inc({operationId});
});
+ server.addHook("preHandler", async (req, _res) => {
+ const {operationId} = req.routeConfig as RouteConfig;
+ this.logger.debug(`Exec ${req.id as string} ${req.ip} ${operationId}`);
+ });
+
// Log after response
server.addHook("onResponse", async (req, res) => {
const {operationId} = req.routeConfig as RouteConfig;
diff --git a/packages/beacon-node/src/chain/blocks/importBlock.ts b/packages/beacon-node/src/chain/blocks/importBlock.ts
index feaddfbad39d..12b43359fa4e 100644
--- a/packages/beacon-node/src/chain/blocks/importBlock.ts
+++ b/packages/beacon-node/src/chain/blocks/importBlock.ts
@@ -7,7 +7,6 @@ import {
computeStartSlotAtEpoch,
isStateValidatorsNodesPopulated,
RootCache,
- kzgCommitmentToVersionedHash,
} from "@lodestar/state-transition";
import {routes} from "@lodestar/api";
import {ForkChoiceError, ForkChoiceErrorCode, EpochDifference, AncestorStatus} from "@lodestar/fork-choice";
@@ -16,6 +15,7 @@ import {ZERO_HASH_HEX} from "../../constants/index.js";
import {toCheckpointHex} from "../stateCache/index.js";
import {isOptimisticBlock} from "../../util/forkChoice.js";
import {isQueueErrorAborted} from "../../util/queue/index.js";
+import {kzgCommitmentToVersionedHash} from "../../util/blobs.js";
import {ChainEvent, ReorgEventData} from "../emitter.js";
import {REPROCESS_MIN_TIME_TO_NEXT_SLOT_SEC} from "../reprocess.js";
import type {BeaconChain} from "../chain.js";
diff --git a/packages/beacon-node/src/chain/blocks/index.ts b/packages/beacon-node/src/chain/blocks/index.ts
index 569fd0771022..8f4c7fa5f0f1 100644
--- a/packages/beacon-node/src/chain/blocks/index.ts
+++ b/packages/beacon-node/src/chain/blocks/index.ts
@@ -58,11 +58,7 @@ export async function processBlocks(
}
try {
- const {relevantBlocks, dataAvailabilityStatuses, parentSlots, parentBlock} = verifyBlocksSanityChecks(
- this,
- blocks,
- opts
- );
+ const {relevantBlocks, parentSlots, parentBlock} = verifyBlocksSanityChecks(this, blocks, opts);
// No relevant blocks, skip verifyBlocksInEpoch()
if (relevantBlocks.length === 0 || parentBlock === null) {
@@ -72,13 +68,8 @@ export async function processBlocks(
// Fully verify a block to be imported immediately after. Does not produce any side-effects besides adding intermediate
// states in the state cache through regen.
- const {postStates, proposerBalanceDeltas, segmentExecStatus} = await verifyBlocksInEpoch.call(
- this,
- parentBlock,
- relevantBlocks,
- dataAvailabilityStatuses,
- opts
- );
+ const {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus} =
+ await verifyBlocksInEpoch.call(this, parentBlock, relevantBlocks, opts);
// If segmentExecStatus has lvhForkchoice then, the entire segment should be invalid
// and we need to further propagate
diff --git a/packages/beacon-node/src/chain/blocks/types.ts b/packages/beacon-node/src/chain/blocks/types.ts
index 5f1ac8833578..aff5a64c9929 100644
--- a/packages/beacon-node/src/chain/blocks/types.ts
+++ b/packages/beacon-node/src/chain/blocks/types.ts
@@ -1,14 +1,13 @@
-import {toHexString} from "@chainsafe/ssz";
import {CachedBeaconStateAllForks, computeEpochAtSlot, DataAvailableStatus} from "@lodestar/state-transition";
import {MaybeValidExecutionStatus} from "@lodestar/fork-choice";
-import {allForks, deneb, Slot, RootHex} from "@lodestar/types";
+import {allForks, deneb, Slot} from "@lodestar/types";
import {ForkSeq, MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS} from "@lodestar/params";
import {ChainForkConfig} from "@lodestar/config";
-import {pruneSetToMax} from "@lodestar/utils";
export enum BlockInputType {
preDeneb = "preDeneb",
postDeneb = "postDeneb",
+ blobsPromise = "blobsPromise",
}
/** Enum to represent where blocks come from */
@@ -19,9 +18,18 @@ export enum BlockSource {
byRoot = "req_resp_by_root",
}
+export enum GossipedInputType {
+ block = "block",
+ blob = "blob",
+}
+
+export type BlobsCache = Map;
+export type BlockInputBlobs = {blobs: deneb.BlobSidecars; blobsBytes: (Uint8Array | null)[]};
+
export type BlockInput = {block: allForks.SignedBeaconBlock; source: BlockSource; blockBytes: Uint8Array | null} & (
| {type: BlockInputType.preDeneb}
- | {type: BlockInputType.postDeneb; blobs: deneb.BlobSidecars; blobsBytes: (Uint8Array | null)[]}
+ | ({type: BlockInputType.postDeneb} & BlockInputBlobs)
+ | {type: BlockInputType.blobsPromise; blobsCache: BlobsCache; availabilityPromise: Promise}
);
export function blockRequiresBlobs(config: ChainForkConfig, blockSlot: Slot, clockSlot: Slot): boolean {
@@ -32,125 +40,7 @@ export function blockRequiresBlobs(config: ChainForkConfig, blockSlot: Slot, clo
);
}
-export enum GossipedInputType {
- block = "block",
- blob = "blob",
-}
-type GossipedBlockInput =
- | {type: GossipedInputType.block; signedBlock: allForks.SignedBeaconBlock; blockBytes: Uint8Array | null}
- | {type: GossipedInputType.blob; signedBlob: deneb.SignedBlobSidecar; blobBytes: Uint8Array | null};
-type BlockInputCacheType = {
- block?: allForks.SignedBeaconBlock;
- blockBytes?: Uint8Array | null;
- blobs: Map;
- blobsBytes: Map;
-};
-
-const MAX_GOSSIPINPUT_CACHE = 5;
-// ssz.deneb.BlobSidecars.elementType.fixedSize;
-const BLOBSIDECAR_FIXED_SIZE = 131256;
-
export const getBlockInput = {
- blockInputCache: new Map(),
-
- getGossipBlockInput(
- config: ChainForkConfig,
- gossipedInput: GossipedBlockInput
- ):
- | {blockInput: BlockInput; blockInputMeta: {pending: null; haveBlobs: number; expectedBlobs: number}}
- | {blockInput: null; blockInputMeta: {pending: GossipedInputType.block; haveBlobs: number; expectedBlobs: null}}
- | {blockInput: null; blockInputMeta: {pending: GossipedInputType.blob; haveBlobs: number; expectedBlobs: number}} {
- let blockHex;
- let blockCache;
-
- if (gossipedInput.type === GossipedInputType.block) {
- const {signedBlock, blockBytes} = gossipedInput;
-
- blockHex = toHexString(
- config.getForkTypes(signedBlock.message.slot).BeaconBlock.hashTreeRoot(signedBlock.message)
- );
- blockCache = this.blockInputCache.get(blockHex) ?? {
- blobs: new Map(),
- blobsBytes: new Map(),
- };
-
- blockCache.block = signedBlock;
- blockCache.blockBytes = blockBytes;
- } else {
- const {signedBlob, blobBytes} = gossipedInput;
- blockHex = toHexString(signedBlob.message.blockRoot);
- blockCache = this.blockInputCache.get(blockHex);
-
- // If a new entry is going to be inserted, prune out old ones
- if (blockCache === undefined) {
- pruneSetToMax(this.blockInputCache, MAX_GOSSIPINPUT_CACHE);
- blockCache = {blobs: new Map(), blobsBytes: new Map()};
- }
-
- // TODO: freetheblobs check if its the same blob or a duplicate and throw/take actions
- blockCache.blobs.set(signedBlob.message.index, signedBlob.message);
- // easily splice out the unsigned message as blob is a fixed length type
- blockCache.blobsBytes.set(signedBlob.message.index, blobBytes?.slice(0, BLOBSIDECAR_FIXED_SIZE) ?? null);
- }
-
- this.blockInputCache.set(blockHex, blockCache);
- const {block: signedBlock, blockBytes} = blockCache;
-
- if (signedBlock !== undefined) {
- // block is available, check if all blobs have shown up
- const {slot, body} = signedBlock.message;
- const {blobKzgCommitments} = body as deneb.BeaconBlockBody;
- const blockInfo = `blockHex=${blockHex}, slot=${slot}`;
-
- if (blobKzgCommitments.length < blockCache.blobs.size) {
- throw Error(
- `Received more blobs=${blockCache.blobs.size} than commitments=${blobKzgCommitments.length} for ${blockInfo}`
- );
- }
- if (blobKzgCommitments.length === blockCache.blobs.size) {
- const blobSidecars = [];
- const blobsBytes = [];
-
- for (let index = 0; index < blobKzgCommitments.length; index++) {
- const blobSidecar = blockCache.blobs.get(index);
- if (blobSidecar === undefined) {
- throw Error(`Missing blobSidecar at index=${index} for ${blockInfo}`);
- }
- blobSidecars.push(blobSidecar);
- blobsBytes.push(blockCache.blobsBytes.get(index) ?? null);
- }
-
- return {
- // TODO freetheblobs: collate and add serialized data for the postDeneb blockinput
- blockInput: getBlockInput.postDeneb(
- config,
- signedBlock,
- BlockSource.gossip,
- blobSidecars,
- blockBytes ?? null,
- blobsBytes
- ),
- blockInputMeta: {pending: null, haveBlobs: blockCache.blobs.size, expectedBlobs: blobKzgCommitments.length},
- };
- } else {
- return {
- blockInput: null,
- blockInputMeta: {
- pending: GossipedInputType.blob,
- haveBlobs: blockCache.blobs.size,
- expectedBlobs: blobKzgCommitments.length,
- },
- };
- }
- } else {
- // will need to wait for the block to showup
- return {
- blockInput: null,
- blockInputMeta: {pending: GossipedInputType.block, haveBlobs: blockCache.blobs.size, expectedBlobs: null},
- };
- }
- },
-
preDeneb(
config: ChainForkConfig,
block: allForks.SignedBeaconBlock,
@@ -188,6 +78,27 @@ export const getBlockInput = {
blobsBytes,
};
},
+
+ blobsPromise(
+ config: ChainForkConfig,
+ block: allForks.SignedBeaconBlock,
+ source: BlockSource,
+ blobsCache: BlobsCache,
+ blockBytes: Uint8Array | null,
+ availabilityPromise: Promise
+ ): BlockInput {
+ if (config.getForkSeq(block.message.slot) < ForkSeq.deneb) {
+ throw Error(`Pre Deneb block slot ${block.message.slot}`);
+ }
+ return {
+ type: BlockInputType.blobsPromise,
+ block,
+ source,
+ blobsCache,
+ blockBytes,
+ availabilityPromise,
+ };
+ },
};
export enum AttestationImportOpt {
diff --git a/packages/beacon-node/src/chain/blocks/verifyBlock.ts b/packages/beacon-node/src/chain/blocks/verifyBlock.ts
index 72db1d801b48..94a42a39a6ae 100644
--- a/packages/beacon-node/src/chain/blocks/verifyBlock.ts
+++ b/packages/beacon-node/src/chain/blocks/verifyBlock.ts
@@ -5,7 +5,7 @@ import {
isStateValidatorsNodesPopulated,
DataAvailableStatus,
} from "@lodestar/state-transition";
-import {bellatrix} from "@lodestar/types";
+import {bellatrix, deneb} from "@lodestar/types";
import {ForkName} from "@lodestar/params";
import {ProtoBlock, ExecutionStatus} from "@lodestar/fork-choice";
import {ChainForkConfig} from "@lodestar/config";
@@ -14,13 +14,14 @@ import {BlockError, BlockErrorCode} from "../errors/index.js";
import {BlockProcessOpts} from "../options.js";
import {RegenCaller} from "../regen/index.js";
import type {BeaconChain} from "../chain.js";
-import {BlockInput, ImportBlockOpts} from "./types.js";
+import {BlockInput, ImportBlockOpts, BlockInputType} from "./types.js";
import {POS_PANDA_MERGE_TRANSITION_BANNER} from "./utils/pandaMergeTransitionBanner.js";
import {CAPELLA_OWL_BANNER} from "./utils/ownBanner.js";
import {DENEB_BLOWFISH_BANNER} from "./utils/blowfishBanner.js";
import {verifyBlocksStateTransitionOnly} from "./verifyBlocksStateTransitionOnly.js";
import {verifyBlocksSignatures} from "./verifyBlocksSignatures.js";
import {verifyBlocksExecutionPayload, SegmentExecStatus} from "./verifyBlocksExecutionPayloads.js";
+import {verifyBlocksDataAvailability} from "./verifyBlocksDataAvailability.js";
import {writeBlockInputToDb} from "./writeBlockInputToDb.js";
/**
@@ -38,12 +39,12 @@ export async function verifyBlocksInEpoch(
this: BeaconChain,
parentBlock: ProtoBlock,
blocksInput: BlockInput[],
- dataAvailabilityStatuses: DataAvailableStatus[],
opts: BlockProcessOpts & ImportBlockOpts
): Promise<{
postStates: CachedBeaconStateAllForks[];
proposerBalanceDeltas: number[];
segmentExecStatus: SegmentExecStatus;
+ dataAvailabilityStatuses: DataAvailableStatus[];
}> {
const blocks = blocksInput.map(({block}) => block);
if (blocks.length === 0) {
@@ -88,7 +89,12 @@ export async function verifyBlocksInEpoch(
try {
// batch all I/O operations to reduce overhead
- const [segmentExecStatus, {postStates, proposerBalanceDeltas}] = await Promise.all([
+ const [
+ segmentExecStatus,
+ {dataAvailabilityStatuses, availableTime},
+ {postStates, proposerBalanceDeltas, verifyStateTime},
+ {verifySignaturesTime},
+ ] = await Promise.all([
// Execution payloads
opts.skipVerifyExecutionPayload !== true
? verifyBlocksExecutionPayload(this, parentBlock, blocks, preState0, abortController.signal, opts)
@@ -98,12 +104,16 @@ export async function verifyBlocksInEpoch(
mergeBlockFound: null,
} as SegmentExecStatus),
+ // data availability for the blobs
+ verifyBlocksDataAvailability(this, blocksInput, opts),
+
// Run state transition only
// TODO: Ensure it yields to allow flushing to workers and engine API
verifyBlocksStateTransitionOnly(
preState0,
blocksInput,
- dataAvailabilityStatuses,
+ // hack availability for state transition eval as availability is separately determined
+ blocks.map(() => DataAvailableStatus.available),
this.logger,
this.metrics,
abortController.signal,
@@ -113,7 +123,7 @@ export async function verifyBlocksInEpoch(
// All signatures at once
opts.skipVerifyBlockSignatures !== true
? verifyBlocksSignatures(this.bls, this.logger, this.metrics, preState0, blocks, opts)
- : Promise.resolve(),
+ : Promise.resolve({verifySignaturesTime: Date.now()}),
// ideally we want to only persist blocks after verifying them however the reality is there are
// rarely invalid blocks we'll batch all I/O operation here to reduce the overhead if there's
@@ -151,7 +161,35 @@ export async function verifyBlocksInEpoch(
}
}
- return {postStates, proposerBalanceDeltas, segmentExecStatus};
+ if (segmentExecStatus.execAborted === null) {
+ const {executionStatuses, executionTime} = segmentExecStatus;
+ if (
+ blocksInput.length === 1 &&
+ // gossip blocks have seenTimestampSec
+ opts.seenTimestampSec !== undefined &&
+ blocksInput[0].type !== BlockInputType.preDeneb &&
+ executionStatuses[0] === ExecutionStatus.Valid
+ ) {
+ // Find the max time when the block was actually verified
+ const fullyVerifiedTime = Math.max(executionTime, verifyStateTime, verifySignaturesTime);
+ const recvTofullyVerifedTime = fullyVerifiedTime / 1000 - opts.seenTimestampSec;
+ this.metrics?.gossipBlock.receivedToFullyVerifiedTime.observe(recvTofullyVerifedTime);
+
+ const verifiedToBlobsAvailabiltyTime = Math.max(availableTime - fullyVerifiedTime, 0) / 1000;
+ const numBlobs = (blocksInput[0].block as deneb.SignedBeaconBlock).message.body.blobKzgCommitments.length;
+
+ this.metrics?.gossipBlock.verifiedToBlobsAvailabiltyTime.observe({numBlobs}, verifiedToBlobsAvailabiltyTime);
+ this.logger.verbose("Verified blockInput fully with blobs availability", {
+ slot: blocksInput[0].block.message.slot,
+ recvTofullyVerifedTime,
+ verifiedToBlobsAvailabiltyTime,
+ type: blocksInput[0].type,
+ numBlobs,
+ });
+ }
+ }
+
+ return {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus};
} finally {
abortController.abort();
}
diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts
new file mode 100644
index 000000000000..9c45469d56dd
--- /dev/null
+++ b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts
@@ -0,0 +1,126 @@
+import {computeTimeAtSlot, DataAvailableStatus} from "@lodestar/state-transition";
+import {ChainForkConfig} from "@lodestar/config";
+import {deneb, UintNum64} from "@lodestar/types";
+import {Logger} from "@lodestar/utils";
+import {BlockError, BlockErrorCode} from "../errors/index.js";
+import {validateBlobSidecars} from "../validation/blobSidecar.js";
+import {Metrics} from "../../metrics/metrics.js";
+import {BlockInput, BlockInputType, ImportBlockOpts, BlobSidecarValidation} from "./types.js";
+
+// proposer boost is not available post 3 sec so try pulling using unknown block hash
+// post 3 sec after throwing the availability error
+const BLOB_AVAILABILITY_TIMEOUT = 3_000;
+
+/**
+ * Verifies some early cheap sanity checks on the block before running the full state transition.
+ *
+ * - Parent is known to the fork-choice
+ * - Check skipped slots limit
+ * - check_block_relevancy()
+ * - Block not in the future
+ * - Not genesis block
+ * - Block's slot is < Infinity
+ * - Not finalized slot
+ * - Not already known
+ */
+export async function verifyBlocksDataAvailability(
+ chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger; metrics: Metrics | null},
+ blocks: BlockInput[],
+ opts: ImportBlockOpts
+): Promise<{dataAvailabilityStatuses: DataAvailableStatus[]; availableTime: number}> {
+ if (blocks.length === 0) {
+ throw Error("Empty partiallyVerifiedBlocks");
+ }
+
+ const dataAvailabilityStatuses: DataAvailableStatus[] = [];
+ const seenTime = opts.seenTimestampSec !== undefined ? opts.seenTimestampSec * 1000 : Date.now();
+
+ for (const blockInput of blocks) {
+ // Validate status of only not yet finalized blocks, we don't need yet to propogate the status
+ // as it is not used upstream anywhere
+ const dataAvailabilityStatus = await maybeValidateBlobs(chain, blockInput, opts);
+ dataAvailabilityStatuses.push(dataAvailabilityStatus);
+ }
+
+ const availableTime = blocks[blocks.length - 1].type === BlockInputType.blobsPromise ? Date.now() : seenTime;
+ if (blocks.length === 1 && opts.seenTimestampSec !== undefined && blocks[0].type !== BlockInputType.preDeneb) {
+ const recvToAvailableTime = availableTime / 1000 - opts.seenTimestampSec;
+ const numBlobs = (blocks[0].block as deneb.SignedBeaconBlock).message.body.blobKzgCommitments.length;
+
+ chain.metrics?.gossipBlock.receivedToBlobsAvailabilityTime.observe({numBlobs}, recvToAvailableTime);
+ chain.logger.verbose("Verified blobs availability", {
+ slot: blocks[0].block.message.slot,
+ recvToAvailableTime,
+ type: blocks[0].type,
+ });
+ }
+
+ return {dataAvailabilityStatuses, availableTime};
+}
+
+async function maybeValidateBlobs(
+ chain: {config: ChainForkConfig; genesisTime: UintNum64},
+ blockInput: BlockInput,
+ opts: ImportBlockOpts
+): Promise {
+ switch (blockInput.type) {
+ case BlockInputType.preDeneb:
+ return DataAvailableStatus.preDeneb;
+
+ case BlockInputType.postDeneb:
+ if (opts.validBlobSidecars === BlobSidecarValidation.Full) {
+ return DataAvailableStatus.available;
+ }
+
+ // eslint-disable-next-line no-fallthrough
+ case BlockInputType.blobsPromise: {
+ // run full validation
+ const {block} = blockInput;
+ const blockSlot = block.message.slot;
+
+ const blobsData =
+ blockInput.type === BlockInputType.postDeneb
+ ? blockInput
+ : await raceWithCutoff(chain, blockInput, blockInput.availabilityPromise);
+ const {blobs} = blobsData;
+
+ const {blobKzgCommitments} = (block as deneb.SignedBeaconBlock).message.body;
+ const beaconBlockRoot = chain.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(block.message);
+
+ // if the blob siddecars have been individually verified then we can skip kzg proof check
+ // but other checks to match blobs with block data still need to be performed
+ const skipProofsCheck = opts.validBlobSidecars === BlobSidecarValidation.Individual;
+ validateBlobSidecars(blockSlot, beaconBlockRoot, blobKzgCommitments, blobs, {skipProofsCheck});
+
+ return DataAvailableStatus.available;
+ }
+ }
+}
+
+/**
+ * Wait for blobs to become available with a cutoff time. If fails then throw DATA_UNAVAILABLE error
+ * which may try unknownblock/blobs fill (by root).
+ */
+async function raceWithCutoff(
+ chain: {config: ChainForkConfig; genesisTime: UintNum64},
+ blockInput: BlockInput,
+ availabilityPromise: Promise
+): Promise {
+ const {block} = blockInput;
+ const blockSlot = block.message.slot;
+
+ const cutoffTime = Math.max(
+ computeTimeAtSlot(chain.config, blockSlot, chain.genesisTime) * 1000 + BLOB_AVAILABILITY_TIMEOUT - Date.now(),
+ 0
+ );
+ const cutoffTimeout = new Promise((_resolve, reject) => setTimeout(reject, cutoffTime));
+
+ try {
+ await Promise.race([availabilityPromise, cutoffTimeout]);
+ } catch (e) {
+ // throw unavailable so that the unknownblock/blobs can be triggered to pull the block
+ throw new BlockError(block, {code: BlockErrorCode.DATA_UNAVAILABLE});
+ }
+ // we can only be here if availabilityPromise has resolved else an error will be thrown
+ return availabilityPromise;
+}
diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksExecutionPayloads.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksExecutionPayloads.ts
index 7f4edd14c618..5dbe104c9541 100644
--- a/packages/beacon-node/src/chain/blocks/verifyBlocksExecutionPayloads.ts
+++ b/packages/beacon-node/src/chain/blocks/verifyBlocksExecutionPayloads.ts
@@ -5,7 +5,6 @@ import {
isExecutionBlockBodyType,
isMergeTransitionBlock as isMergeTransitionBlockFn,
isExecutionEnabled,
- kzgCommitmentToVersionedHash,
} from "@lodestar/state-transition";
import {bellatrix, allForks, Slot, deneb} from "@lodestar/types";
import {
@@ -24,6 +23,7 @@ import {ForkSeq, SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY} from "@lodestar/params";
import {IExecutionEngine} from "../../execution/engine/interface.js";
import {BlockError, BlockErrorCode} from "../errors/index.js";
import {IClock} from "../../util/clock.js";
+import {kzgCommitmentToVersionedHash} from "../../util/blobs.js";
import {BlockProcessOpts} from "../options.js";
import {ExecutionPayloadStatus} from "../../execution/engine/interface.js";
import {IEth1ForBlockProduction} from "../../eth1/index.js";
@@ -45,6 +45,7 @@ export type SegmentExecStatus =
| {
execAborted: null;
executionStatuses: MaybeValidExecutionStatus[];
+ executionTime: number;
mergeBlockFound: bellatrix.BeaconBlock | null;
}
| {execAborted: ExecAbortType; invalidSegmentLVH?: LVHInvalidResponse; mergeBlockFound: null};
@@ -243,8 +244,9 @@ export async function verifyBlocksExecutionPayload(
}
}
- if (blocks.length === 1 && opts.seenTimestampSec !== undefined) {
- const recvToVerifiedExecPayload = Date.now() / 1000 - opts.seenTimestampSec;
+ const executionTime = Date.now();
+ if (blocks.length === 1 && opts.seenTimestampSec !== undefined && executionStatuses[0] === ExecutionStatus.Valid) {
+ const recvToVerifiedExecPayload = executionTime / 1000 - opts.seenTimestampSec;
chain.metrics?.gossipBlock.receivedToExecutionPayloadVerification.observe(recvToVerifiedExecPayload);
chain.logger.verbose("Verified execution payload", {
slot: blocks[0].message.slot,
@@ -255,6 +257,7 @@ export async function verifyBlocksExecutionPayload(
return {
execAborted: null,
executionStatuses,
+ executionTime,
mergeBlockFound,
};
}
diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts
index 9fb7d04f1ed8..e62355a4889d 100644
--- a/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts
+++ b/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts
@@ -1,12 +1,11 @@
-import {computeStartSlotAtEpoch, DataAvailableStatus} from "@lodestar/state-transition";
+import {computeStartSlotAtEpoch} from "@lodestar/state-transition";
import {ChainForkConfig} from "@lodestar/config";
import {IForkChoice, ProtoBlock} from "@lodestar/fork-choice";
-import {Slot, deneb} from "@lodestar/types";
+import {Slot} from "@lodestar/types";
import {toHexString} from "@lodestar/utils";
import {IClock} from "../../util/clock.js";
import {BlockError, BlockErrorCode} from "../errors/index.js";
-import {validateBlobSidecars} from "../validation/blobSidecar.js";
-import {BlockInput, BlockInputType, ImportBlockOpts, BlobSidecarValidation} from "./types.js";
+import {BlockInput, ImportBlockOpts} from "./types.js";
/**
* Verifies some early cheap sanity checks on the block before running the full state transition.
@@ -26,7 +25,6 @@ export function verifyBlocksSanityChecks(
opts: ImportBlockOpts
): {
relevantBlocks: BlockInput[];
- dataAvailabilityStatuses: DataAvailableStatus[];
parentSlots: Slot[];
parentBlock: ProtoBlock | null;
} {
@@ -35,7 +33,6 @@ export function verifyBlocksSanityChecks(
}
const relevantBlocks: BlockInput[] = [];
- const dataAvailabilityStatuses: DataAvailableStatus[] = [];
const parentSlots: Slot[] = [];
let parentBlock: ProtoBlock | null = null;
@@ -64,10 +61,6 @@ export function verifyBlocksSanityChecks(
}
}
- // Validate status of only not yet finalized blocks, we don't need yet to propogate the status
- // as it is not used upstream anywhere
- const dataAvailabilityStatus = maybeValidateBlobs(chain.config, blockInput, opts);
-
let parentBlockSlot: Slot;
if (relevantBlocks.length > 0) {
@@ -105,7 +98,6 @@ export function verifyBlocksSanityChecks(
// Block is relevant
relevantBlocks.push(blockInput);
- dataAvailabilityStatuses.push(dataAvailabilityStatus);
parentSlots.push(parentBlockSlot);
}
@@ -115,35 +107,5 @@ export function verifyBlocksSanityChecks(
throw Error(`Internal error, parentBlock should not be null for relevantBlocks=${relevantBlocks.length}`);
}
- return {relevantBlocks, dataAvailabilityStatuses, parentSlots, parentBlock};
-}
-
-function maybeValidateBlobs(
- config: ChainForkConfig,
- blockInput: BlockInput,
- opts: ImportBlockOpts
-): DataAvailableStatus {
- switch (blockInput.type) {
- case BlockInputType.postDeneb: {
- if (opts.validBlobSidecars === BlobSidecarValidation.Full) {
- return DataAvailableStatus.available;
- }
-
- // run full validation
- const {block, blobs} = blockInput;
- const blockSlot = block.message.slot;
- const {blobKzgCommitments} = (block as deneb.SignedBeaconBlock).message.body;
- const beaconBlockRoot = config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(block.message);
-
- // if the blob siddecars have been individually verified then we can skip kzg proof check
- // but other checks to match blobs with block data still need to be performed
- const skipProofsCheck = opts.validBlobSidecars === BlobSidecarValidation.Individual;
- validateBlobSidecars(blockSlot, beaconBlockRoot, blobKzgCommitments, blobs, {skipProofsCheck});
-
- return DataAvailableStatus.available;
- }
-
- case BlockInputType.preDeneb:
- return DataAvailableStatus.preDeneb;
- }
+ return {relevantBlocks, parentSlots, parentBlock};
}
diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksSignatures.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksSignatures.ts
index fbbef969b696..14ad46a35c1e 100644
--- a/packages/beacon-node/src/chain/blocks/verifyBlocksSignatures.ts
+++ b/packages/beacon-node/src/chain/blocks/verifyBlocksSignatures.ts
@@ -20,7 +20,7 @@ export async function verifyBlocksSignatures(
preState0: CachedBeaconStateAllForks,
blocks: allForks.SignedBeaconBlock[],
opts: ImportBlockOpts
-): Promise {
+): Promise<{verifySignaturesTime: number}> {
const isValidPromises: Promise[] = [];
// Verifies signatures after running state transition, so all SyncCommittee signed roots are known at this point.
@@ -46,17 +46,20 @@ export async function verifyBlocksSignatures(
}
}
- if (blocks.length === 1 && opts.seenTimestampSec !== undefined) {
- const recvToSigVer = Date.now() / 1000 - opts.seenTimestampSec;
- metrics?.gossipBlock.receivedToSignaturesVerification.observe(recvToSigVer);
- logger.verbose("Verified block signatures", {slot: blocks[0].message.slot, recvToSigVer});
- }
-
// `rejectFirstInvalidResolveAllValid()` returns on isValid result with its index
const res = await rejectFirstInvalidResolveAllValid(isValidPromises);
if (!res.allValid) {
throw new BlockError(blocks[res.index], {code: BlockErrorCode.INVALID_SIGNATURE, state: preState0});
}
+
+ const verifySignaturesTime = Date.now();
+ if (blocks.length === 1 && opts.seenTimestampSec !== undefined) {
+ const recvToSigVer = verifySignaturesTime / 1000 - opts.seenTimestampSec;
+ metrics?.gossipBlock.receivedToSignaturesVerification.observe(recvToSigVer);
+ logger.verbose("Verified block signatures", {slot: blocks[0].message.slot, recvToSigVer});
+ }
+
+ return {verifySignaturesTime};
}
type AllValidRes = {allValid: true} | {allValid: false; index: number};
diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts
index 2afc9543f847..7d15d4e4f6ce 100644
--- a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts
+++ b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts
@@ -3,6 +3,7 @@ import {
stateTransition,
ExecutionPayloadStatus,
DataAvailableStatus,
+ StateHashTreeRootSource,
} from "@lodestar/state-transition";
import {ErrorAborted, Logger, sleep} from "@lodestar/utils";
import {Metrics} from "../../metrics/index.js";
@@ -27,7 +28,7 @@ export async function verifyBlocksStateTransitionOnly(
metrics: Metrics | null,
signal: AbortSignal,
opts: BlockProcessOpts & ImportBlockOpts
-): Promise<{postStates: CachedBeaconStateAllForks[]; proposerBalanceDeltas: number[]}> {
+): Promise<{postStates: CachedBeaconStateAllForks[]; proposerBalanceDeltas: number[]; verifyStateTime: number}> {
const postStates: CachedBeaconStateAllForks[] = [];
const proposerBalanceDeltas: number[] = [];
@@ -57,7 +58,9 @@ export async function verifyBlocksStateTransitionOnly(
metrics
);
- const hashTreeRootTimer = metrics?.stateHashTreeRootTime.startTimer();
+ const hashTreeRootTimer = metrics?.stateHashTreeRootTime.startTimer({
+ source: StateHashTreeRootSource.blockTransition,
+ });
const stateRoot = postState.hashTreeRoot();
hashTreeRootTimer?.();
@@ -90,12 +93,13 @@ export async function verifyBlocksStateTransitionOnly(
}
}
+ const verifyStateTime = Date.now();
if (blocks.length === 1 && opts.seenTimestampSec !== undefined) {
const slot = blocks[0].block.message.slot;
- const recvToTransition = Date.now() / 1000 - opts.seenTimestampSec;
+ const recvToTransition = verifyStateTime / 1000 - opts.seenTimestampSec;
metrics?.gossipBlock.receivedToStateTransition.observe(recvToTransition);
- logger.verbose("Transitioned gossip block", {slot, recvToTransition});
+ logger.verbose("Verified block state transition", {slot, recvToTransition});
}
- return {postStates, proposerBalanceDeltas};
+ return {postStates, proposerBalanceDeltas, verifyStateTime};
}
diff --git a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts
index 0603ed7e7f7e..0b94d32b84ec 100644
--- a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts
+++ b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts
@@ -13,7 +13,7 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInput: BlockI
const fnPromises: Promise[] = [];
for (const blockInput of blocksInput) {
- const {block, blockBytes, type} = blockInput;
+ const {block, blockBytes} = blockInput;
const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message);
const blockRootHex = toHex(blockRoot);
if (blockBytes) {
@@ -29,8 +29,13 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInput: BlockI
root: blockRootHex,
});
- if (type === BlockInputType.postDeneb) {
- const {blobs: blobSidecars} = blockInput;
+ if (blockInput.type === BlockInputType.postDeneb || blockInput.type === BlockInputType.blobsPromise) {
+ const blobSidecars =
+ blockInput.type == BlockInputType.postDeneb
+ ? blockInput.blobs
+ : // At this point of import blobs are available and can be safely awaited
+ (await blockInput.availabilityPromise).blobs;
+
// NOTE: Old blobs are pruned on archive
fnPromises.push(this.db.blobSidecars.add({blockRoot, slot: block.message.slot, blobSidecars}));
this.logger.debug("Persisted blobSidecars to hot DB", {
diff --git a/packages/beacon-node/src/chain/bls/index.ts b/packages/beacon-node/src/chain/bls/index.ts
index 3ee72ac66cbd..f9898b13776b 100644
--- a/packages/beacon-node/src/chain/bls/index.ts
+++ b/packages/beacon-node/src/chain/bls/index.ts
@@ -1,4 +1,4 @@
export type {IBlsVerifier} from "./interface.js";
-export type {BlsMultiThreadWorkerPoolModules} from "./multithread/index.js";
+export type {BlsMultiThreadWorkerPoolModules, JobQueueItemType} from "./multithread/index.js";
export {BlsMultiThreadWorkerPool} from "./multithread/index.js";
export {BlsSingleThreadVerifier} from "./singleThread.js";
diff --git a/packages/beacon-node/src/chain/bls/multithread/index.ts b/packages/beacon-node/src/chain/bls/multithread/index.ts
index 9b0006566253..235ec1536be7 100644
--- a/packages/beacon-node/src/chain/bls/multithread/index.ts
+++ b/packages/beacon-node/src/chain/bls/multithread/index.ts
@@ -41,6 +41,8 @@ export type BlsMultiThreadWorkerPoolOptions = {
blsVerifyAllMultiThread?: boolean;
};
+export type {JobQueueItemType};
+
// 1 worker for the main thread
const blsPoolSize = Math.max(defaultPoolSize - 1, 1);
diff --git a/packages/beacon-node/src/chain/bls/multithread/jobItem.ts b/packages/beacon-node/src/chain/bls/multithread/jobItem.ts
index 4ae05cdab913..8b5c63df2eeb 100644
--- a/packages/beacon-node/src/chain/bls/multithread/jobItem.ts
+++ b/packages/beacon-node/src/chain/bls/multithread/jobItem.ts
@@ -56,7 +56,7 @@ export function jobItemWorkReq(job: JobQueueItem, format: PointFormat, metrics:
opts: job.opts,
sets: job.sets.map((set) => ({
// this can throw, handled in the consumer code
- publicKey: getAggregatedPubkey(set).toBytes(format),
+ publicKey: getAggregatedPubkey(set, metrics).toBytes(format),
signature: set.signature,
message: set.signingRoot,
})),
diff --git a/packages/beacon-node/src/chain/bls/utils.ts b/packages/beacon-node/src/chain/bls/utils.ts
index 0b1010de27f6..4a3a027f31ac 100644
--- a/packages/beacon-node/src/chain/bls/utils.ts
+++ b/packages/beacon-node/src/chain/bls/utils.ts
@@ -1,14 +1,19 @@
import type {PublicKey} from "@chainsafe/bls/types";
import bls from "@chainsafe/bls";
import {ISignatureSet, SignatureSetType} from "@lodestar/state-transition";
+import {Metrics} from "../../metrics/metrics.js";
-export function getAggregatedPubkey(signatureSet: ISignatureSet): PublicKey {
+export function getAggregatedPubkey(signatureSet: ISignatureSet, metrics: Metrics | null = null): PublicKey {
switch (signatureSet.type) {
case SignatureSetType.single:
return signatureSet.pubkey;
- case SignatureSetType.aggregate:
- return bls.PublicKey.aggregate(signatureSet.pubkeys);
+ case SignatureSetType.aggregate: {
+ const timer = metrics?.blsThreadPool.pubkeysAggregationMainThreadDuration.startTimer();
+ const pubkeys = bls.PublicKey.aggregate(signatureSet.pubkeys);
+ timer?.();
+ return pubkeys;
+ }
default:
throw Error("Unknown signature set type");
diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts
index 3464aad8b673..5e38cf23f5de 100644
--- a/packages/beacon-node/src/chain/chain.ts
+++ b/packages/beacon-node/src/chain/chain.ts
@@ -1,5 +1,5 @@
import path from "node:path";
-import {CompositeTypeAny, fromHexString, toHexString, TreeView, Type} from "@chainsafe/ssz";
+import {CompositeTypeAny, fromHexString, TreeView, Type, toHexString} from "@chainsafe/ssz";
import {
BeaconStateAllForks,
CachedBeaconStateAllForks,
@@ -26,6 +26,8 @@ import {
deneb,
Wei,
bellatrix,
+ isBlindedBeaconBlock,
+ Gwei,
} from "@lodestar/types";
import {CheckpointWithHex, ExecutionStatus, IForkChoice, ProtoBlock} from "@lodestar/fork-choice";
import {ProcessShutdownCallback} from "@lodestar/validator";
@@ -77,6 +79,7 @@ import {BlockInput} from "./blocks/types.js";
import {SeenAttestationDatas} from "./seenCache/seenAttestationData.js";
import {ShufflingCache} from "./shufflingCache.js";
import {StateContextCache} from "./stateCache/stateContextCache.js";
+import {SeenGossipBlockInput} from "./seenCache/index.js";
import {CheckpointStateCache} from "./stateCache/stateContextCheckpointsCache.js";
/**
@@ -85,7 +88,6 @@ import {CheckpointStateCache} from "./stateCache/stateContextCheckpointsCache.js
* allow some margin if the node overloads.
*/
const DEFAULT_MAX_CACHED_PRODUCED_ROOTS = 4;
-const DEFAULT_MAX_CACHED_BLOB_SIDECARS = 4;
export class BeaconChain implements IBeaconChain {
readonly genesisTime: UintNum64;
@@ -123,6 +125,7 @@ export class BeaconChain implements IBeaconChain {
readonly seenSyncCommitteeMessages = new SeenSyncCommitteeMessages();
readonly seenContributionAndProof: SeenContributionAndProof;
readonly seenAttestationDatas: SeenAttestationDatas;
+ readonly seenGossipBlockInput = new SeenGossipBlockInput();
// Seen cache for liveness checks
readonly seenBlockAttesters = new SeenBlockAttesters();
@@ -134,8 +137,7 @@ export class BeaconChain implements IBeaconChain {
readonly checkpointBalancesCache: CheckpointBalancesCache;
readonly shufflingCache: ShufflingCache;
/** Map keyed by executionPayload.blockHash of the block for those blobs */
- readonly producedBlobSidecarsCache = new Map();
- readonly producedBlindedBlobSidecarsCache = new Map();
+ readonly producedContentsCache = new Map();
// Cache payload from the local execution so that produceBlindedBlock or produceBlockV3 and
// send and get signed/published blinded versions which beacon can assemble into full before
@@ -468,20 +470,22 @@ export class BeaconChain implements IBeaconChain {
return data && {block: data, executionOptimistic: false};
}
- produceBlock(blockAttributes: BlockAttributes): Promise<{block: allForks.BeaconBlock; executionPayloadValue: Wei}> {
+ produceBlock(
+ blockAttributes: BlockAttributes
+ ): Promise<{block: allForks.BeaconBlock; executionPayloadValue: Wei; consensusBlockValue: Gwei}> {
return this.produceBlockWrapper(BlockType.Full, blockAttributes);
}
produceBlindedBlock(
blockAttributes: BlockAttributes
- ): Promise<{block: allForks.BlindedBeaconBlock; executionPayloadValue: Wei}> {
+ ): Promise<{block: allForks.BlindedBeaconBlock; executionPayloadValue: Wei; consensusBlockValue: Gwei}> {
return this.produceBlockWrapper(BlockType.Blinded, blockAttributes);
}
async produceBlockWrapper(
blockType: T,
{randaoReveal, graffiti, slot, feeRecipient}: BlockAttributes
- ): Promise<{block: AssembledBlockType; executionPayloadValue: Wei}> {
+ ): Promise<{block: AssembledBlockType; executionPayloadValue: Wei; consensusBlockValue: Gwei}> {
const head = this.forkChoice.getHead();
const state = await this.regen.getBlockSlotState(
head.blockRoot,
@@ -522,7 +526,9 @@ export class BeaconChain implements IBeaconChain {
stateRoot: ZERO_HASH,
body,
} as AssembledBlockType;
- block.stateRoot = computeNewStateRoot(this.metrics, state, block);
+
+ const {newStateRoot, proposerReward} = computeNewStateRoot(this.metrics, state, block);
+ block.stateRoot = newStateRoot;
const blockRoot =
blockType === BlockType.Full
? this.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block)
@@ -546,35 +552,12 @@ export class BeaconChain implements IBeaconChain {
// publishing the blinded block's full version
if (blobs.type === BlobsResultType.produced) {
// body is of full type here
- const blockHash = blobs.blockHash;
- const blobSidecars = blobs.blobSidecars.map((blobSidecar) => ({
- ...blobSidecar,
- blockRoot,
- slot,
- blockParentRoot: parentBlockRoot,
- proposerIndex,
- }));
-
- this.producedBlobSidecarsCache.set(blockHash, blobSidecars);
- this.metrics?.blockProductionCaches.producedBlobSidecarsCache.set(this.producedBlobSidecarsCache.size);
- } else if (blobs.type === BlobsResultType.blinded) {
- // body is of blinded type here
- const blockHash = blobs.blockHash;
- const blindedBlobSidecars = blobs.blobSidecars.map((blindedBlobSidecar) => ({
- ...blindedBlobSidecar,
- blockRoot,
- slot,
- blockParentRoot: parentBlockRoot,
- proposerIndex,
- }));
-
- this.producedBlindedBlobSidecarsCache.set(blockHash, blindedBlobSidecars);
- this.metrics?.blockProductionCaches.producedBlindedBlobSidecarsCache.set(
- this.producedBlindedBlobSidecarsCache.size
- );
+ const {blockHash, contents} = blobs;
+ this.producedContentsCache.set(blockHash, contents);
+ this.metrics?.blockProductionCaches.producedContentsCache.set(this.producedContentsCache.size);
}
- return {block, executionPayloadValue};
+ return {block, executionPayloadValue, consensusBlockValue: proposerReward};
}
/**
@@ -587,14 +570,14 @@ export class BeaconChain implements IBeaconChain {
* kzg_aggregated_proof=compute_proof_from_blobs(blobs),
* )
*/
- getBlobSidecars(beaconBlock: deneb.BeaconBlock): deneb.BlobSidecars {
+ getContents(beaconBlock: deneb.BeaconBlock): deneb.Contents {
const blockHash = toHex(beaconBlock.body.executionPayload.blockHash);
- const blobSidecars = this.producedBlobSidecarsCache.get(blockHash);
- if (!blobSidecars) {
- throw Error(`No blobSidecars for executionPayload.blockHash ${blockHash}`);
+ const contents = this.producedContentsCache.get(blockHash);
+ if (!contents) {
+ throw Error(`No contents for executionPayload.blockHash ${blockHash}`);
}
- return blobSidecars;
+ return contents;
}
async processBlock(block: BlockInput, opts?: ImportBlockOpts): Promise {
@@ -645,21 +628,32 @@ export class BeaconChain implements IBeaconChain {
return this.reprocessController.waitForBlockOfAttestation(slot, root);
}
+ persistBlock(data: allForks.BeaconBlock | allForks.BlindedBeaconBlock, suffix?: string): void {
+ const slot = data.slot;
+ if (isBlindedBeaconBlock(data)) {
+ const sszType = this.config.getBlindedForkTypes(slot).BeaconBlock;
+ void this.persistSszObject("BlindedBeaconBlock", sszType.serialize(data), sszType.hashTreeRoot(data), suffix);
+ } else {
+ const sszType = this.config.getForkTypes(slot).BeaconBlock;
+ void this.persistSszObject("BeaconBlock", sszType.serialize(data), sszType.hashTreeRoot(data), suffix);
+ }
+ }
+
persistInvalidSszValue(type: Type, sszObject: T, suffix?: string): void {
if (this.opts.persistInvalidSszObjects) {
- void this.persistInvalidSszObject(type.typeName, type.serialize(sszObject), type.hashTreeRoot(sszObject), suffix);
+ void this.persistSszObject(type.typeName, type.serialize(sszObject), type.hashTreeRoot(sszObject), suffix);
}
}
persistInvalidSszBytes(typeName: string, sszBytes: Uint8Array, suffix?: string): void {
if (this.opts.persistInvalidSszObjects) {
- void this.persistInvalidSszObject(typeName, sszBytes, sszBytes, suffix);
+ void this.persistSszObject(typeName, sszBytes, sszBytes, suffix);
}
}
persistInvalidSszView(view: TreeView, suffix?: string): void {
if (this.opts.persistInvalidSszObjects) {
- void this.persistInvalidSszObject(view.type.typeName, view.serialize(), view.hashTreeRoot(), suffix);
+ void this.persistSszObject(view.type.typeName, view.serialize(), view.hashTreeRoot(), suffix);
}
}
@@ -795,16 +789,12 @@ export class BeaconChain implements IBeaconChain {
return {state: blockState, stateId: "block_state_any_epoch", shouldWarn: true};
}
- private async persistInvalidSszObject(
+ private async persistSszObject(
typeName: string,
bytes: Uint8Array,
root: Uint8Array,
suffix?: string
): Promise {
- if (!this.opts.persistInvalidSszObjects) {
- return;
- }
-
const now = new Date();
// yyyy-MM-dd
const dateStr = now.toISOString().split("T")[0];
@@ -869,19 +859,8 @@ export class BeaconChain implements IBeaconChain {
this.metrics?.blockProductionCaches.producedBlindedBlockRoot.set(this.producedBlindedBlockRoot.size);
if (this.config.getForkSeq(slot) >= ForkSeq.deneb) {
- pruneSetToMax(
- this.producedBlobSidecarsCache,
- this.opts.maxCachedBlobSidecars ?? DEFAULT_MAX_CACHED_BLOB_SIDECARS
- );
- this.metrics?.blockProductionCaches.producedBlobSidecarsCache.set(this.producedBlobSidecarsCache.size);
-
- pruneSetToMax(
- this.producedBlindedBlobSidecarsCache,
- this.opts.maxCachedBlobSidecars ?? DEFAULT_MAX_CACHED_BLOB_SIDECARS
- );
- this.metrics?.blockProductionCaches.producedBlindedBlobSidecarsCache.set(
- this.producedBlindedBlobSidecarsCache.size
- );
+ pruneSetToMax(this.producedContentsCache, this.opts.maxCachedProducedRoots ?? DEFAULT_MAX_CACHED_PRODUCED_ROOTS);
+ this.metrics?.blockProductionCaches.producedContentsCache.set(this.producedContentsCache.size);
}
const metrics = this.metrics;
diff --git a/packages/beacon-node/src/chain/errors/blobSidecarError.ts b/packages/beacon-node/src/chain/errors/blobSidecarError.ts
index e242cbcb11ba..f38aa883002c 100644
--- a/packages/beacon-node/src/chain/errors/blobSidecarError.ts
+++ b/packages/beacon-node/src/chain/errors/blobSidecarError.ts
@@ -21,6 +21,7 @@ export enum BlobSidecarErrorCode {
PARENT_UNKNOWN = "BLOB_SIDECAR_ERROR_PARENT_UNKNOWN",
NOT_LATER_THAN_PARENT = "BLOB_SIDECAR_ERROR_NOT_LATER_THAN_PARENT",
PROPOSAL_SIGNATURE_INVALID = "BLOB_SIDECAR_ERROR_PROPOSAL_SIGNATURE_INVALID",
+ INCLUSION_PROOF_INVALID = "BLOB_SIDECAR_ERROR_INCLUSION_PROOF_INVALID",
INCORRECT_PROPOSER = "BLOB_SIDECAR_ERROR_INCORRECT_PROPOSER",
}
@@ -37,6 +38,7 @@ export type BlobSidecarErrorType =
| {code: BlobSidecarErrorCode.PARENT_UNKNOWN; parentRoot: RootHex}
| {code: BlobSidecarErrorCode.NOT_LATER_THAN_PARENT; parentSlot: Slot; slot: Slot}
| {code: BlobSidecarErrorCode.PROPOSAL_SIGNATURE_INVALID}
+ | {code: BlobSidecarErrorCode.INCLUSION_PROOF_INVALID; slot: Slot; blobIdx: number}
| {code: BlobSidecarErrorCode.INCORRECT_PROPOSER; proposerIndex: ValidatorIndex};
export class BlobSidecarGossipError extends GossipActionError {}
diff --git a/packages/beacon-node/src/chain/errors/blockError.ts b/packages/beacon-node/src/chain/errors/blockError.ts
index ee06927a4fc1..6ab15275934e 100644
--- a/packages/beacon-node/src/chain/errors/blockError.ts
+++ b/packages/beacon-node/src/chain/errors/blockError.ts
@@ -63,6 +63,8 @@ export enum BlockErrorCode {
/** The attestation head block is too far behind the attestation slot, causing many skip slots.
This is deemed a DoS risk */
TOO_MANY_SKIPPED_SLOTS = "TOO_MANY_SKIPPED_SLOTS",
+ /** The blobs are unavailable */
+ DATA_UNAVAILABLE = "BLOCK_ERROR_DATA_UNAVAILABLE",
}
type ExecutionErrorStatus = Exclude<
@@ -103,7 +105,8 @@ export type BlockErrorType =
| {code: BlockErrorCode.TOO_MUCH_GAS_USED; gasUsed: number; gasLimit: number}
| {code: BlockErrorCode.SAME_PARENT_HASH; blockHash: RootHex}
| {code: BlockErrorCode.TRANSACTIONS_TOO_BIG; size: number; max: number}
- | {code: BlockErrorCode.EXECUTION_ENGINE_ERROR; execStatus: ExecutionErrorStatus; errorMessage: string};
+ | {code: BlockErrorCode.EXECUTION_ENGINE_ERROR; execStatus: ExecutionErrorStatus; errorMessage: string}
+ | {code: BlockErrorCode.DATA_UNAVAILABLE};
export class BlockGossipError extends GossipActionError {}
diff --git a/packages/beacon-node/src/chain/interface.ts b/packages/beacon-node/src/chain/interface.ts
index 7fa60fd76ace..880a5e86071a 100644
--- a/packages/beacon-node/src/chain/interface.ts
+++ b/packages/beacon-node/src/chain/interface.ts
@@ -1,5 +1,17 @@
import {CompositeTypeAny, TreeView, Type} from "@chainsafe/ssz";
-import {allForks, UintNum64, Root, phase0, Slot, RootHex, Epoch, ValidatorIndex, deneb, Wei} from "@lodestar/types";
+import {
+ allForks,
+ UintNum64,
+ Root,
+ phase0,
+ Slot,
+ RootHex,
+ Epoch,
+ ValidatorIndex,
+ deneb,
+ Wei,
+ Gwei,
+} from "@lodestar/types";
import {
BeaconStateAllForks,
CachedBeaconStateAllForks,
@@ -37,6 +49,7 @@ import {CheckpointBalancesCache} from "./balancesCache.js";
import {IChainOptions} from "./options.js";
import {AssembledBlockType, BlockAttributes, BlockType} from "./produceBlock/produceBlockBody.js";
import {SeenAttestationDatas} from "./seenCache/seenAttestationData.js";
+import {SeenGossipBlockInput} from "./seenCache/index.js";
import {ShufflingCache} from "./shufflingCache.js";
export {BlockType, type AssembledBlockType};
@@ -90,14 +103,14 @@ export interface IBeaconChain {
readonly seenSyncCommitteeMessages: SeenSyncCommitteeMessages;
readonly seenContributionAndProof: SeenContributionAndProof;
readonly seenAttestationDatas: SeenAttestationDatas;
+ readonly seenGossipBlockInput: SeenGossipBlockInput;
// Seen cache for liveness checks
readonly seenBlockAttesters: SeenBlockAttesters;
readonly beaconProposerCache: BeaconProposerCache;
readonly checkpointBalancesCache: CheckpointBalancesCache;
- readonly producedBlobSidecarsCache: Map;
+ readonly producedContentsCache: Map;
readonly producedBlockRoot: Map;
- readonly producedBlindedBlobSidecarsCache: Map;
readonly shufflingCache: ShufflingCache;
readonly producedBlindedBlockRoot: Set;
readonly opts: IChainOptions;
@@ -139,12 +152,14 @@ export interface IBeaconChain {
*/
getBlockByRoot(root: RootHex): Promise<{block: allForks.SignedBeaconBlock; executionOptimistic: boolean} | null>;
- getBlobSidecars(beaconBlock: deneb.BeaconBlock): deneb.BlobSidecars;
+ getContents(beaconBlock: deneb.BeaconBlock): deneb.Contents;
- produceBlock(blockAttributes: BlockAttributes): Promise<{block: allForks.BeaconBlock; executionPayloadValue: Wei}>;
+ produceBlock(
+ blockAttributes: BlockAttributes
+ ): Promise<{block: allForks.BeaconBlock; executionPayloadValue: Wei; consensusBlockValue: Gwei}>;
produceBlindedBlock(
blockAttributes: BlockAttributes
- ): Promise<{block: allForks.BlindedBeaconBlock; executionPayloadValue: Wei}>;
+ ): Promise<{block: allForks.BlindedBeaconBlock; executionPayloadValue: Wei; consensusBlockValue: Gwei}>;
/** Process a block until complete */
processBlock(block: BlockInput, opts?: ImportBlockOpts): Promise;
@@ -159,6 +174,7 @@ export interface IBeaconChain {
updateBeaconProposerData(epoch: Epoch, proposers: ProposerPreparationData[]): Promise;
+ persistBlock(data: allForks.BeaconBlock | allForks.BlindedBeaconBlock, suffix?: string): void;
persistInvalidSszValue(type: Type, sszObject: T | Uint8Array, suffix?: string): void;
persistInvalidSszBytes(type: string, sszBytes: Uint8Array, suffix?: string): void;
/** Persist bad items to persistInvalidSszObjectsDir dir, for example invalid state, attestations etc. */
diff --git a/packages/beacon-node/src/chain/opPools/aggregatedAttestationPool.ts b/packages/beacon-node/src/chain/opPools/aggregatedAttestationPool.ts
index f9911275b6ee..00309d322a11 100644
--- a/packages/beacon-node/src/chain/opPools/aggregatedAttestationPool.ts
+++ b/packages/beacon-node/src/chain/opPools/aggregatedAttestationPool.ts
@@ -166,10 +166,16 @@ export class AggregatedAttestationPool {
}
}
- return attestationsByScore
- .sort((a, b) => b.score - a.score)
- .slice(0, MAX_ATTESTATIONS)
- .map((attestation) => attestation.attestation);
+ const sortedAttestationsByScore = attestationsByScore.sort((a, b) => b.score - a.score);
+ const attestationsForBlock: phase0.Attestation[] = [];
+ for (const [i, attestationWithScore] of sortedAttestationsByScore.entries()) {
+ if (i >= MAX_ATTESTATIONS) {
+ break;
+ }
+ // attestations could be modified in this op pool, so we need to clone for block
+ attestationsForBlock.push(ssz.phase0.Attestation.clone(attestationWithScore.attestation));
+ }
+ return attestationsForBlock;
}
/**
diff --git a/packages/beacon-node/src/chain/opPools/opPool.ts b/packages/beacon-node/src/chain/opPools/opPool.ts
index cee8d0614c30..bb436319cd53 100644
--- a/packages/beacon-node/src/chain/opPools/opPool.ts
+++ b/packages/beacon-node/src/chain/opPools/opPool.ts
@@ -19,6 +19,7 @@ import {IBeaconDb} from "../../db/index.js";
import {SignedBLSToExecutionChangeVersioned} from "../../util/types.js";
import {BlockType} from "../interface.js";
import {Metrics} from "../../metrics/metrics.js";
+import {BlockProductionStep} from "../produceBlock/produceBlockBody.js";
import {isValidBlsToExecutionChangeForBlockInclusion} from "./utils.js";
type HexRoot = string;
@@ -201,7 +202,7 @@ export class OpPool {
}
}
endProposerSlashing?.({
- step: "proposerSlashing",
+ step: BlockProductionStep.proposerSlashing,
});
const endAttesterSlashings = stepsMetrics?.startTimer();
@@ -235,7 +236,7 @@ export class OpPool {
}
}
endAttesterSlashings?.({
- step: "attesterSlashings",
+ step: BlockProductionStep.attesterSlashings,
});
const endVoluntaryExits = stepsMetrics?.startTimer();
@@ -256,7 +257,7 @@ export class OpPool {
}
}
endVoluntaryExits?.({
- step: "voluntaryExits",
+ step: BlockProductionStep.voluntaryExits,
});
const endBlsToExecutionChanges = stepsMetrics?.startTimer();
@@ -270,7 +271,7 @@ export class OpPool {
}
}
endBlsToExecutionChanges?.({
- step: "blsToExecutionChanges",
+ step: BlockProductionStep.blsToExecutionChanges,
});
return [attesterSlashings, proposerSlashings, voluntaryExits, blsToExecutionChanges];
diff --git a/packages/beacon-node/src/chain/options.ts b/packages/beacon-node/src/chain/options.ts
index 518c73e072d7..cc7795ade0a1 100644
--- a/packages/beacon-node/src/chain/options.ts
+++ b/packages/beacon-node/src/chain/options.ts
@@ -14,6 +14,7 @@ export type IChainOptions = BlockProcessOpts &
LightClientServerOpts & {
blsVerifyAllMainThread?: boolean;
blsVerifyAllMultiThread?: boolean;
+ persistProducedBlocks?: boolean;
persistInvalidSszObjects?: boolean;
persistInvalidSszObjectsDir?: string;
skipCreateStateCacheIfAvailable?: boolean;
diff --git a/packages/beacon-node/src/chain/prepareNextSlot.ts b/packages/beacon-node/src/chain/prepareNextSlot.ts
index 43fac1d1b120..e2bffd5bc8c6 100644
--- a/packages/beacon-node/src/chain/prepareNextSlot.ts
+++ b/packages/beacon-node/src/chain/prepareNextSlot.ts
@@ -1,4 +1,9 @@
-import {computeEpochAtSlot, isExecutionStateType, computeTimeAtSlot} from "@lodestar/state-transition";
+import {
+ computeEpochAtSlot,
+ isExecutionStateType,
+ computeTimeAtSlot,
+ StateHashTreeRootSource,
+} from "@lodestar/state-transition";
import {ChainForkConfig} from "@lodestar/config";
import {ForkSeq, SLOTS_PER_EPOCH, ForkExecution} from "@lodestar/params";
import {Slot} from "@lodestar/types";
@@ -104,6 +109,14 @@ export class PrepareNextSlotScheduler {
RegenCaller.precomputeEpoch
);
+ // cache HashObjects for faster hashTreeRoot() later, especially for computeNewStateRoot() if we need to produce a block at slot 0 of epoch
+ // see https://github.com/ChainSafe/lodestar/issues/6194
+ const hashTreeRootTimer = this.metrics?.stateHashTreeRootTime.startTimer({
+ source: StateHashTreeRootSource.prepareNextSlot,
+ });
+ prepareState.hashTreeRoot();
+ hashTreeRootTimer?.();
+
// assuming there is no reorg, it caches the checkpoint state & helps avoid doing a full state transition in the next slot
// + when gossip block comes, we need to validate and run state transition
// + if next slot is a skipped slot, it'd help getting target checkpoint state faster to validate attestations
diff --git a/packages/beacon-node/src/chain/produceBlock/computeNewStateRoot.ts b/packages/beacon-node/src/chain/produceBlock/computeNewStateRoot.ts
index bac501ed725c..ccc0595d0db6 100644
--- a/packages/beacon-node/src/chain/produceBlock/computeNewStateRoot.ts
+++ b/packages/beacon-node/src/chain/produceBlock/computeNewStateRoot.ts
@@ -2,9 +2,10 @@ import {
CachedBeaconStateAllForks,
DataAvailableStatus,
ExecutionPayloadStatus,
+ StateHashTreeRootSource,
stateTransition,
} from "@lodestar/state-transition";
-import {allForks, Root} from "@lodestar/types";
+import {allForks, Gwei, Root} from "@lodestar/types";
import {ZERO_HASH} from "../../constants/index.js";
import {Metrics} from "../../metrics/index.js";
@@ -17,7 +18,7 @@ export function computeNewStateRoot(
metrics: Metrics | null,
state: CachedBeaconStateAllForks,
block: allForks.FullOrBlindedBeaconBlock
-): Root {
+): {newStateRoot: Root; proposerReward: Gwei} {
// Set signature to zero to re-use stateTransition() function which requires the SignedBeaconBlock type
const blockEmptySig = {message: block, signature: ZERO_HASH} as allForks.FullOrBlindedSignedBeaconBlock;
@@ -41,5 +42,14 @@ export function computeNewStateRoot(
metrics
);
- return postState.hashTreeRoot();
+ const {attestations, syncAggregate, slashing} = postState.proposerRewards;
+ const proposerReward = BigInt(attestations + syncAggregate + slashing);
+
+ const hashTreeRootTimer = metrics?.stateHashTreeRootTime.startTimer({
+ source: StateHashTreeRootSource.computeNewStateRoot,
+ });
+ const newStateRoot = postState.hashTreeRoot();
+ hashTreeRootTimer?.();
+
+ return {newStateRoot, proposerReward};
}
diff --git a/packages/beacon-node/src/chain/produceBlock/produceBlockBody.ts b/packages/beacon-node/src/chain/produceBlock/produceBlockBody.ts
index 1c522c54a93d..3c2bec223eca 100644
--- a/packages/beacon-node/src/chain/produceBlock/produceBlockBody.ts
+++ b/packages/beacon-node/src/chain/produceBlock/produceBlockBody.ts
@@ -35,17 +35,30 @@ import {PayloadId, IExecutionEngine, IExecutionBuilder, PayloadAttributes} from
import {ZERO_HASH, ZERO_HASH_HEX} from "../../constants/index.js";
import {IEth1ForBlockProduction} from "../../eth1/index.js";
import {numToQuantity} from "../../eth1/provider/utils.js";
-import {
- validateBlobsAndKzgCommitments,
- validateBlindedBlobsAndKzgCommitments,
-} from "./validateBlobsAndKzgCommitments.js";
+import {validateBlobsAndKzgCommitments} from "./validateBlobsAndKzgCommitments.js";
// Time to provide the EL to generate a payload from new payload id
const PAYLOAD_GENERATION_TIME_MS = 500;
-enum PayloadPreparationType {
+
+export enum PayloadPreparationType {
Fresh = "Fresh",
Cached = "Cached",
Reorged = "Reorged",
+ Blinded = "Blinded",
+}
+
+/**
+ * Block production steps tracked in metrics
+ */
+export enum BlockProductionStep {
+ proposerSlashing = "proposerSlashing",
+ attesterSlashings = "attesterSlashings",
+ voluntaryExits = "voluntaryExits",
+ blsToExecutionChanges = "blsToExecutionChanges",
+ attestations = "attestations",
+ eth1DataAndDeposits = "eth1DataAndDeposits",
+ syncAggregate = "syncAggregate",
+ executionPayload = "executionPayload",
}
export type BlockAttributes = {
@@ -74,8 +87,8 @@ export enum BlobsResultType {
export type BlobsResult =
| {type: BlobsResultType.preDeneb}
- | {type: BlobsResultType.produced; blobSidecars: deneb.BlobSidecars; blockHash: RootHex}
- | {type: BlobsResultType.blinded; blobSidecars: deneb.BlindedBlobSidecars; blockHash: RootHex};
+ | {type: BlobsResultType.produced; contents: deneb.Contents; blockHash: RootHex}
+ | {type: BlobsResultType.blinded};
export async function produceBlockBody(
this: BeaconChain,
@@ -134,13 +147,13 @@ export async function produceBlockBody(
const endAttestations = stepsMetrics?.startTimer();
const attestations = this.aggregatedAttestationPool.getAttestationsForBlock(this.forkChoice, currentState);
endAttestations?.({
- step: "attestations",
+ step: BlockProductionStep.attestations,
});
const endEth1DataAndDeposits = stepsMetrics?.startTimer();
const {eth1Data, deposits} = await this.eth1.getEth1DataAndDeposits(currentState);
endEth1DataAndDeposits?.({
- step: "eth1DataAndDeposits",
+ step: BlockProductionStep.eth1DataAndDeposits,
});
const blockBody: phase0.BeaconBlockBody = {
@@ -165,7 +178,7 @@ export async function produceBlockBody(
(blockBody as altair.BeaconBlockBody).syncAggregate = syncAggregate;
}
endSyncAggregate?.({
- step: "syncAggregate",
+ step: BlockProductionStep.syncAggregate,
});
Object.assign(logMeta, {
@@ -221,7 +234,7 @@ export async function produceBlockBody(
executionPayloadValue = builderRes.executionPayloadValue;
const fetchedTime = Date.now() / 1000 - computeTimeAtSlot(this.config, blockSlot, this.genesisTime);
- const prepType = "blinded";
+ const prepType = PayloadPreparationType.Blinded;
this.metrics?.blockPayload.payloadFetchedTime.observe({prepType}, fetchedTime);
this.logger.verbose("Fetched execution payload header from builder", {
slot: blockSlot,
@@ -231,35 +244,14 @@ export async function produceBlockBody(
});
if (ForkSeq[fork] >= ForkSeq.deneb) {
- const {blindedBlobsBundle} = builderRes;
- if (blindedBlobsBundle === undefined) {
- throw Error(`Invalid builder getHeader response for fork=${fork}, missing blindedBlobsBundle`);
- }
-
- // validate blindedBlobsBundle
- if (this.opts.sanityCheckExecutionEngineBlobs) {
- validateBlindedBlobsAndKzgCommitments(builderRes.header, blindedBlobsBundle);
+ const {blobKzgCommitments} = builderRes;
+ if (blobKzgCommitments === undefined) {
+ throw Error(`Invalid builder getHeader response for fork=${fork}, missing blobKzgCommitments`);
}
- (blockBody as deneb.BlindedBeaconBlockBody).blobKzgCommitments = blindedBlobsBundle.commitments;
- const blockHash = toHex(builderRes.header.blockHash);
-
- const blobSidecars = Array.from({length: blindedBlobsBundle.blobRoots.length}, (_v, index) => {
- const blobRoot = blindedBlobsBundle.blobRoots[index];
- const commitment = blindedBlobsBundle.commitments[index];
- const proof = blindedBlobsBundle.proofs[index];
- const blindedBlobSidecar = {
- index,
- blobRoot,
- kzgProof: proof,
- kzgCommitment: commitment,
- };
- // Other fields will be injected after postState is calculated
- return blindedBlobSidecar;
- }) as deneb.BlindedBlobSidecars;
- blobsResult = {type: BlobsResultType.blinded, blobSidecars, blockHash};
-
- Object.assign(logMeta, {blobs: blindedBlobsBundle.commitments.length});
+ (blockBody as deneb.BlindedBeaconBlockBody).blobKzgCommitments = blobKzgCommitments;
+ blobsResult = {type: BlobsResultType.blinded};
+ Object.assign(logMeta, {blobs: blobKzgCommitments.length});
} else {
blobsResult = {type: BlobsResultType.preDeneb};
}
@@ -332,23 +324,10 @@ export async function produceBlockBody(
(blockBody as deneb.BeaconBlockBody).blobKzgCommitments = blobsBundle.commitments;
const blockHash = toHex(executionPayload.blockHash);
+ const contents = {kzgProofs: blobsBundle.proofs, blobs: blobsBundle.blobs};
+ blobsResult = {type: BlobsResultType.produced, contents, blockHash};
- const blobSidecars = Array.from({length: blobsBundle.blobs.length}, (_v, index) => {
- const blob = blobsBundle.blobs[index];
- const commitment = blobsBundle.commitments[index];
- const proof = blobsBundle.proofs[index];
- const blobSidecar = {
- index,
- blob,
- kzgProof: proof,
- kzgCommitment: commitment,
- };
- // Other fields will be injected after postState is calculated
- return blobSidecar;
- }) as deneb.BlobSidecars;
- blobsResult = {type: BlobsResultType.produced, blobSidecars, blockHash};
-
- Object.assign(logMeta, {blobs: blobSidecars.length});
+ Object.assign(logMeta, {blobs: blobsBundle.commitments.length});
} else {
blobsResult = {type: BlobsResultType.preDeneb};
}
@@ -380,7 +359,7 @@ export async function produceBlockBody(
executionPayloadValue = BigInt(0);
}
endExecutionPayload?.({
- step: "executionPayload",
+ step: BlockProductionStep.executionPayload,
});
if (ForkSeq[fork] >= ForkSeq.capella) {
@@ -502,7 +481,7 @@ async function prepareExecutionPayloadHeader(
): Promise<{
header: allForks.ExecutionPayloadHeader;
executionPayloadValue: Wei;
- blindedBlobsBundle?: deneb.BlindedBlobsBundle;
+ blobKzgCommitments?: deneb.BlobKzgCommitments;
}> {
if (!chain.executionBuilder) {
throw Error("executionBuilder required");
diff --git a/packages/beacon-node/src/chain/produceBlock/validateBlobsAndKzgCommitments.ts b/packages/beacon-node/src/chain/produceBlock/validateBlobsAndKzgCommitments.ts
index 0d00d0c8bd72..54e90672d189 100644
--- a/packages/beacon-node/src/chain/produceBlock/validateBlobsAndKzgCommitments.ts
+++ b/packages/beacon-node/src/chain/produceBlock/validateBlobsAndKzgCommitments.ts
@@ -1,4 +1,4 @@
-import {allForks, deneb} from "@lodestar/types";
+import {allForks} from "@lodestar/types";
import {BlobsBundle} from "../../execution/index.js";
/**
@@ -13,15 +13,3 @@ export function validateBlobsAndKzgCommitments(payload: allForks.ExecutionPayloa
);
}
}
-
-export function validateBlindedBlobsAndKzgCommitments(
- payload: allForks.ExecutionPayloadHeader,
- blindedBlobsBundle: deneb.BlindedBlobsBundle
-): void {
- // sanity-check that the KZG commitments match the blobs (as produced by the execution engine)
- if (blindedBlobsBundle.blobRoots.length !== blindedBlobsBundle.commitments.length) {
- throw Error(
- `BlindedBlobs bundle blobs len ${blindedBlobsBundle.blobRoots.length} != commitments len ${blindedBlobsBundle.commitments.length}`
- );
- }
-}
diff --git a/packages/beacon-node/src/chain/regen/queued.ts b/packages/beacon-node/src/chain/regen/queued.ts
index 5305502c8c05..dfda56cc1eea 100644
--- a/packages/beacon-node/src/chain/regen/queued.ts
+++ b/packages/beacon-node/src/chain/regen/queued.ts
@@ -221,7 +221,7 @@ export class QueuedStateRegenerator implements IStateRegenerator {
private jobQueueProcessor = async (regenRequest: RegenRequest): Promise => {
const metricsLabels = {
caller: regenRequest.args[regenRequest.args.length - 1] as RegenCaller,
- entrypoint: regenRequest.key,
+ entrypoint: regenRequest.key as RegenFnName,
};
let timer;
try {
diff --git a/packages/beacon-node/src/chain/reprocess.ts b/packages/beacon-node/src/chain/reprocess.ts
index 3ab6056fb3af..4c91ef07ff69 100644
--- a/packages/beacon-node/src/chain/reprocess.ts
+++ b/packages/beacon-node/src/chain/reprocess.ts
@@ -11,7 +11,7 @@ export const REPROCESS_MIN_TIME_TO_NEXT_SLOT_SEC = 2;
/**
* Reprocess status for metrics
*/
-enum ReprocessStatus {
+export enum ReprocessStatus {
/**
* There are too many attestations that have unknown block root.
*/
@@ -140,7 +140,10 @@ export class ReprocessController {
for (const awaitingPromise of awaitingPromisesByRoot.values()) {
const {resolve, addedTimeMs} = awaitingPromise;
resolve(false);
- this.metrics?.reprocessApiAttestations.waitSecBeforeReject.set((now - addedTimeMs) / 1000);
+ this.metrics?.reprocessApiAttestations.waitSecBeforeReject.set(
+ {reason: ReprocessStatus.expired},
+ (now - addedTimeMs) / 1000
+ );
this.metrics?.reprocessApiAttestations.reject.inc({reason: ReprocessStatus.expired});
}
diff --git a/packages/beacon-node/src/chain/seenCache/index.ts b/packages/beacon-node/src/chain/seenCache/index.ts
index f354a37f93ee..250e6581c312 100644
--- a/packages/beacon-node/src/chain/seenCache/index.ts
+++ b/packages/beacon-node/src/chain/seenCache/index.ts
@@ -2,3 +2,4 @@ export {SeenAggregators, SeenAttesters} from "./seenAttesters.js";
export {SeenBlockProposers} from "./seenBlockProposers.js";
export {SeenSyncCommitteeMessages} from "./seenCommittee.js";
export {SeenContributionAndProof} from "./seenCommitteeContribution.js";
+export {SeenGossipBlockInput} from "./seenGossipBlockInput.js";
diff --git a/packages/beacon-node/src/chain/seenCache/seenAttestationData.ts b/packages/beacon-node/src/chain/seenCache/seenAttestationData.ts
index ded54a5b4a54..a19476497e9f 100644
--- a/packages/beacon-node/src/chain/seenCache/seenAttestationData.ts
+++ b/packages/beacon-node/src/chain/seenCache/seenAttestationData.ts
@@ -17,7 +17,7 @@ export type AttestationDataCacheEntry = {
subnet: number;
};
-enum RejectReason {
+export enum RejectReason {
// attestation data reaches MAX_CACHE_SIZE_PER_SLOT
reached_limit = "reached_limit",
// attestation data is too old
diff --git a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts
new file mode 100644
index 000000000000..8b767975c112
--- /dev/null
+++ b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts
@@ -0,0 +1,170 @@
+import {toHexString} from "@chainsafe/ssz";
+import {deneb, RootHex, ssz, allForks} from "@lodestar/types";
+import {ChainForkConfig} from "@lodestar/config";
+import {pruneSetToMax} from "@lodestar/utils";
+import {BLOBSIDECAR_FIXED_SIZE} from "@lodestar/params";
+
+import {
+ BlockInput,
+ getBlockInput,
+ BlockSource,
+ BlockInputBlobs,
+ BlobsCache,
+ GossipedInputType,
+} from "../blocks/types.js";
+
+type GossipedBlockInput =
+ | {type: GossipedInputType.block; signedBlock: allForks.SignedBeaconBlock; blockBytes: Uint8Array | null}
+ | {type: GossipedInputType.blob; blobSidecar: deneb.BlobSidecar; blobBytes: Uint8Array | null};
+
+type BlockInputCacheType = {
+ block?: allForks.SignedBeaconBlock;
+ blockBytes?: Uint8Array | null;
+ blobsCache: BlobsCache;
+ // promise and its callback cached for delayed resolution
+ availabilityPromise: Promise;
+ resolveAvailability: (blobs: BlockInputBlobs) => void;
+};
+
+const MAX_GOSSIPINPUT_CACHE = 5;
+
+/**
+ * SeenGossipBlockInput tracks and caches the live blobs and blocks on the network to solve data availability
+ * for the blockInput. If no block has been seen yet for some already seen blobs, it responds will null, but
+ * on the first block or the consequent blobs it responds with blobs promise till all blobs become available.
+ *
+ * One can start processing block on blobs promise blockInput response and can await on the promise before
+ * fully importing the block. The blobs promise is gets resolved as soon as all blobs corresponding to that
+ * block are seen by SeenGossipBlockInput
+ */
+export class SeenGossipBlockInput {
+ private blockInputCache = new Map();
+
+ prune(): void {
+ pruneSetToMax(this.blockInputCache, MAX_GOSSIPINPUT_CACHE);
+ }
+
+ getGossipBlockInput(
+ config: ChainForkConfig,
+ gossipedInput: GossipedBlockInput
+ ):
+ | {
+ blockInput: BlockInput;
+ blockInputMeta: {pending: GossipedInputType.blob | null; haveBlobs: number; expectedBlobs: number};
+ }
+ | {blockInput: null; blockInputMeta: {pending: GossipedInputType.block; haveBlobs: number; expectedBlobs: null}} {
+ let blockHex;
+ let blockCache;
+
+ if (gossipedInput.type === GossipedInputType.block) {
+ const {signedBlock, blockBytes} = gossipedInput;
+
+ blockHex = toHexString(
+ config.getForkTypes(signedBlock.message.slot).BeaconBlock.hashTreeRoot(signedBlock.message)
+ );
+ blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry();
+
+ blockCache.block = signedBlock;
+ blockCache.blockBytes = blockBytes;
+ } else {
+ const {blobSidecar, blobBytes} = gossipedInput;
+ const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message);
+ blockHex = toHexString(blockRoot);
+ blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry();
+
+ // TODO: freetheblobs check if its the same blob or a duplicate and throw/take actions
+ blockCache.blobsCache.set(blobSidecar.index, {
+ blobSidecar,
+ // easily splice out the unsigned message as blob is a fixed length type
+ blobBytes: blobBytes?.slice(0, BLOBSIDECAR_FIXED_SIZE) ?? null,
+ });
+ }
+
+ if (!this.blockInputCache.has(blockHex)) {
+ this.blockInputCache.set(blockHex, blockCache);
+ }
+ const {block: signedBlock, blockBytes, blobsCache, availabilityPromise, resolveAvailability} = blockCache;
+
+ if (signedBlock !== undefined) {
+ // block is available, check if all blobs have shown up
+ const {slot, body} = signedBlock.message;
+ const {blobKzgCommitments} = body as deneb.BeaconBlockBody;
+ const blockInfo = `blockHex=${blockHex}, slot=${slot}`;
+
+ if (blobKzgCommitments.length < blobsCache.size) {
+ throw Error(
+ `Received more blobs=${blobsCache.size} than commitments=${blobKzgCommitments.length} for ${blockInfo}`
+ );
+ }
+
+ if (blobKzgCommitments.length === blobsCache.size) {
+ const allBlobs = getBlockInputBlobs(blobsCache);
+ resolveAvailability(allBlobs);
+ const {blobs, blobsBytes} = allBlobs;
+ return {
+ blockInput: getBlockInput.postDeneb(
+ config,
+ signedBlock,
+ BlockSource.gossip,
+ blobs,
+ blockBytes ?? null,
+ blobsBytes
+ ),
+ blockInputMeta: {pending: null, haveBlobs: blobs.length, expectedBlobs: blobKzgCommitments.length},
+ };
+ } else {
+ return {
+ blockInput: getBlockInput.blobsPromise(
+ config,
+ signedBlock,
+ BlockSource.gossip,
+ blobsCache,
+ blockBytes ?? null,
+ availabilityPromise
+ ),
+ blockInputMeta: {
+ pending: GossipedInputType.blob,
+ haveBlobs: blobsCache.size,
+ expectedBlobs: blobKzgCommitments.length,
+ },
+ };
+ }
+ } else {
+ // will need to wait for the block to showup
+ return {
+ blockInput: null,
+ blockInputMeta: {pending: GossipedInputType.block, haveBlobs: blobsCache.size, expectedBlobs: null},
+ };
+ }
+ }
+}
+
+function getEmptyBlockInputCacheEntry(): BlockInputCacheType {
+ // Capture both the promise and its callbacks.
+ // It is not spec'ed but in tests in Firefox and NodeJS the promise constructor is run immediately
+ let resolveAvailability: ((blobs: BlockInputBlobs) => void) | null = null;
+ const availabilityPromise = new Promise((resolveCB) => {
+ resolveAvailability = resolveCB;
+ });
+ if (resolveAvailability === null) {
+ throw Error("Promise Constructor was not executed immediately");
+ }
+ const blobsCache = new Map();
+ return {availabilityPromise, resolveAvailability, blobsCache};
+}
+
+function getBlockInputBlobs(blobsCache: BlobsCache): BlockInputBlobs {
+ const blobs = [];
+ const blobsBytes = [];
+
+ for (let index = 0; index < blobsCache.size; index++) {
+ const blobCache = blobsCache.get(index);
+ if (blobCache === undefined) {
+ throw Error(`Missing blobSidecar at index=${index}`);
+ }
+ const {blobSidecar, blobBytes} = blobCache;
+ blobs.push(blobSidecar);
+ blobsBytes.push(blobBytes);
+ }
+ return {blobs, blobsBytes};
+}
diff --git a/packages/beacon-node/src/chain/shufflingCache.ts b/packages/beacon-node/src/chain/shufflingCache.ts
index c8468f3b6db5..23177142d846 100644
--- a/packages/beacon-node/src/chain/shufflingCache.ts
+++ b/packages/beacon-node/src/chain/shufflingCache.ts
@@ -167,6 +167,23 @@ export class ShufflingCache {
}
}
+ /**
+ * Same to get() function but synchronous.
+ */
+ getSync(shufflingEpoch: Epoch, decisionRootHex: RootHex): EpochShuffling | null {
+ const cacheItem = this.itemsByDecisionRootByEpoch.getOrDefault(shufflingEpoch).get(decisionRootHex);
+ if (cacheItem === undefined) {
+ return null;
+ }
+
+ if (isShufflingCacheItem(cacheItem)) {
+ return cacheItem.shuffling;
+ }
+
+ // ignore promise
+ return null;
+ }
+
private add(shufflingEpoch: Epoch, decisionBlock: RootHex, cacheItem: CacheItem): void {
this.itemsByDecisionRootByEpoch.getOrDefault(shufflingEpoch).set(decisionBlock, cacheItem);
pruneSetToMax(this.itemsByDecisionRootByEpoch, this.maxEpochs);
diff --git a/packages/beacon-node/src/chain/stateCache/datastore/db.ts b/packages/beacon-node/src/chain/stateCache/datastore/db.ts
new file mode 100644
index 000000000000..fef38a7f8dd2
--- /dev/null
+++ b/packages/beacon-node/src/chain/stateCache/datastore/db.ts
@@ -0,0 +1,38 @@
+import {CachedBeaconStateAllForks} from "@lodestar/state-transition";
+import {phase0, ssz} from "@lodestar/types";
+import {IBeaconDb} from "../../../db/interface.js";
+import {CPStateDatastore, DatastoreKey} from "./types.js";
+
+/**
+ * Implementation of CPStateDatastore using db.
+ */
+export class DbCPStateDatastore implements CPStateDatastore {
+ constructor(private readonly db: IBeaconDb) {}
+
+ async write(cpKey: phase0.Checkpoint, state: CachedBeaconStateAllForks): Promise {
+ const serializedCheckpoint = checkpointToDatastoreKey(cpKey);
+ const stateBytes = state.serialize();
+ await this.db.checkpointState.putBinary(serializedCheckpoint, stateBytes);
+ return serializedCheckpoint;
+ }
+
+ async remove(serializedCheckpoint: DatastoreKey): Promise {
+ await this.db.checkpointState.delete(serializedCheckpoint);
+ }
+
+ async read(serializedCheckpoint: DatastoreKey): Promise {
+ return this.db.checkpointState.getBinary(serializedCheckpoint);
+ }
+
+ async readKeys(): Promise {
+ return this.db.checkpointState.keys();
+ }
+}
+
+export function datastoreKeyToCheckpoint(key: DatastoreKey): phase0.Checkpoint {
+ return ssz.phase0.Checkpoint.deserialize(key);
+}
+
+export function checkpointToDatastoreKey(cp: phase0.Checkpoint): DatastoreKey {
+ return ssz.phase0.Checkpoint.serialize(cp);
+}
diff --git a/packages/beacon-node/src/chain/stateCache/datastore/index.ts b/packages/beacon-node/src/chain/stateCache/datastore/index.ts
new file mode 100644
index 000000000000..c37de5292a38
--- /dev/null
+++ b/packages/beacon-node/src/chain/stateCache/datastore/index.ts
@@ -0,0 +1,2 @@
+export * from "./types.js";
+export * from "./db.js";
diff --git a/packages/beacon-node/src/chain/stateCache/datastore/types.ts b/packages/beacon-node/src/chain/stateCache/datastore/types.ts
new file mode 100644
index 000000000000..66ea67f93500
--- /dev/null
+++ b/packages/beacon-node/src/chain/stateCache/datastore/types.ts
@@ -0,0 +1,13 @@
+import {CachedBeaconStateAllForks} from "@lodestar/state-transition";
+import {phase0} from "@lodestar/types";
+
+// With db implementation, persistedKey is serialized data of a checkpoint
+export type DatastoreKey = Uint8Array;
+
+// Make this generic to support testing
+export interface CPStateDatastore {
+ write: (cpKey: phase0.Checkpoint, state: CachedBeaconStateAllForks) => Promise;
+ remove: (key: DatastoreKey) => Promise;
+ read: (key: DatastoreKey) => Promise;
+ readKeys: () => Promise;
+}
diff --git a/packages/beacon-node/src/chain/stateCache/fifoBlockStateCache.ts b/packages/beacon-node/src/chain/stateCache/fifoBlockStateCache.ts
new file mode 100644
index 000000000000..854983101c04
--- /dev/null
+++ b/packages/beacon-node/src/chain/stateCache/fifoBlockStateCache.ts
@@ -0,0 +1,181 @@
+import {toHexString} from "@chainsafe/ssz";
+import {RootHex} from "@lodestar/types";
+import {CachedBeaconStateAllForks} from "@lodestar/state-transition";
+import {routes} from "@lodestar/api";
+import {Metrics} from "../../metrics/index.js";
+import {LinkedList} from "../../util/array.js";
+import {MapTracker} from "./mapMetrics.js";
+import {BlockStateCache} from "./types.js";
+
+export type FIFOBlockStateCacheOpts = {
+ maxBlockStates?: number;
+};
+
+/**
+ * Regen state if there's a reorg distance > 32 slots.
+ */
+export const DEFAULT_MAX_BLOCK_STATES = 32;
+
+/**
+ * New implementation of BlockStateCache that keeps the most recent n states consistently
+ * - Maintain a linked list (FIFO) with special handling for head state, which is always the first item in the list
+ * - Prune per add() instead of per checkpoint so it only keeps n historical states consistently, prune from tail
+ * - No need to prune per finalized checkpoint
+ *
+ * Given this block tree with Block 11 as head:
+ * ```
+ Block 10
+ |
+ +-----+-----+
+ | |
+ Block 11 Block 12
+ ^ |
+ | |
+ head Block 13
+ * ```
+ * The maintained key order would be: 11 -> 13 -> 12 -> 10, and state 10 will be pruned first.
+ */
+export class FIFOBlockStateCache implements BlockStateCache {
+ /**
+ * Max number of states allowed in the cache
+ */
+ readonly maxStates: number;
+
+ private readonly cache: MapTracker;
+ /**
+ * Key order to implement FIFO cache
+ */
+ private readonly keyOrder: LinkedList;
+ private readonly metrics: Metrics["stateCache"] | null | undefined;
+
+ constructor(opts: FIFOBlockStateCacheOpts, {metrics}: {metrics?: Metrics | null}) {
+ this.maxStates = opts.maxBlockStates ?? DEFAULT_MAX_BLOCK_STATES;
+ this.cache = new MapTracker(metrics?.stateCache);
+ if (metrics) {
+ this.metrics = metrics.stateCache;
+ metrics.stateCache.size.addCollect(() => metrics.stateCache.size.set(this.cache.size));
+ }
+ this.keyOrder = new LinkedList();
+ }
+
+ /**
+ * Set a state as head, happens when importing a block and head block is changed.
+ */
+ setHeadState(item: CachedBeaconStateAllForks | null): void {
+ if (item !== null) {
+ this.add(item, true);
+ }
+ }
+
+ /**
+ * Get a state from this cache given a state root hex.
+ */
+ get(rootHex: RootHex): CachedBeaconStateAllForks | null {
+ this.metrics?.lookups.inc();
+ const item = this.cache.get(rootHex);
+ if (!item) {
+ return null;
+ }
+
+ this.metrics?.hits.inc();
+ this.metrics?.stateClonedCount.observe(item.clonedCount);
+
+ return item;
+ }
+
+ /**
+ * Add a state to this cache.
+ * @param isHead if true, move it to the head of the list. Otherwise add to the 2nd position.
+ * In importBlock() steps, normally it'll call add() with isHead = false first. Then call setHeadState() to set the head.
+ */
+ add(item: CachedBeaconStateAllForks, isHead = false): void {
+ const key = toHexString(item.hashTreeRoot());
+ if (this.cache.get(key) != null) {
+ if (!this.keyOrder.has(key)) {
+ throw Error(`State exists but key not found in keyOrder: ${key}`);
+ }
+ if (isHead) {
+ this.keyOrder.moveToHead(key);
+ } else {
+ this.keyOrder.moveToSecond(key);
+ }
+ // same size, no prune
+ return;
+ }
+
+ // new state
+ this.metrics?.adds.inc();
+ this.cache.set(key, item);
+ if (isHead) {
+ this.keyOrder.unshift(key);
+ } else {
+ // insert after head
+ const head = this.keyOrder.first();
+ if (head == null) {
+ // should not happen, however handle just in case
+ this.keyOrder.unshift(key);
+ } else {
+ this.keyOrder.insertAfter(head, key);
+ }
+ }
+ this.prune(key);
+ }
+
+ get size(): number {
+ return this.cache.size;
+ }
+
+ /**
+ * Prune the cache from tail to keep the most recent n states consistently.
+ * The tail of the list is the oldest state, in case regen adds back the same state,
+ * it should stay next to head so that it won't be pruned right away.
+ * The FIFO cache helps with this.
+ */
+ prune(lastAddedKey: string): void {
+ while (this.keyOrder.length > this.maxStates) {
+ const key = this.keyOrder.last();
+ // it does not make sense to prune the last added state
+ // this only happens when max state is 1 in a short period of time
+ if (key === lastAddedKey) {
+ break;
+ }
+ if (!key) {
+ // should not happen
+ throw new Error("No key");
+ }
+ this.keyOrder.pop();
+ this.cache.delete(key);
+ }
+ }
+
+ /**
+ * No need for this implementation
+ * This is only to conform to the old api
+ */
+ deleteAllBeforeEpoch(): void {}
+
+ /**
+ * ONLY FOR DEBUGGING PURPOSES. For lodestar debug API.
+ */
+ clear(): void {
+ this.cache.clear();
+ }
+
+ /** ONLY FOR DEBUGGING PURPOSES. For lodestar debug API */
+ dumpSummary(): routes.lodestar.StateCacheItem[] {
+ return Array.from(this.cache.entries()).map(([key, state]) => ({
+ slot: state.slot,
+ root: toHexString(state.hashTreeRoot()),
+ reads: this.cache.readCount.get(key) ?? 0,
+ lastRead: this.cache.lastRead.get(key) ?? 0,
+ checkpointState: false,
+ }));
+ }
+
+ /**
+ * For unit test only.
+ */
+ dumpKeyOrder(): string[] {
+ return this.keyOrder.toArray();
+ }
+}
diff --git a/packages/beacon-node/src/chain/stateCache/index.ts b/packages/beacon-node/src/chain/stateCache/index.ts
index 69fb34a77e4c..b16d87c3fa0d 100644
--- a/packages/beacon-node/src/chain/stateCache/index.ts
+++ b/packages/beacon-node/src/chain/stateCache/index.ts
@@ -1,2 +1,3 @@
export * from "./stateContextCache.js";
export * from "./stateContextCheckpointsCache.js";
+export * from "./fifoBlockStateCache.js";
diff --git a/packages/beacon-node/src/chain/stateCache/mapMetrics.ts b/packages/beacon-node/src/chain/stateCache/mapMetrics.ts
index eb52755bfc00..bb33323015d4 100644
--- a/packages/beacon-node/src/chain/stateCache/mapMetrics.ts
+++ b/packages/beacon-node/src/chain/stateCache/mapMetrics.ts
@@ -1,8 +1,8 @@
-import {IAvgMinMax} from "../../metrics/index.js";
+import {AvgMinMax} from "@lodestar/utils";
type MapTrackerMetrics = {
- reads: IAvgMinMax;
- secondsSinceLastRead: IAvgMinMax;
+ reads: AvgMinMax;
+ secondsSinceLastRead: AvgMinMax;
};
export class MapTracker extends Map {
diff --git a/packages/beacon-node/src/chain/stateCache/persistentCheckpointsCache.ts b/packages/beacon-node/src/chain/stateCache/persistentCheckpointsCache.ts
new file mode 100644
index 000000000000..8ad5c5098118
--- /dev/null
+++ b/packages/beacon-node/src/chain/stateCache/persistentCheckpointsCache.ts
@@ -0,0 +1,645 @@
+import {fromHexString, toHexString} from "@chainsafe/ssz";
+import {phase0, Epoch, RootHex} from "@lodestar/types";
+import {CachedBeaconStateAllForks, computeStartSlotAtEpoch, getBlockRootAtSlot} from "@lodestar/state-transition";
+import {Logger, MapDef} from "@lodestar/utils";
+import {routes} from "@lodestar/api";
+import {loadCachedBeaconState} from "@lodestar/state-transition";
+import {Metrics} from "../../metrics/index.js";
+import {IClock} from "../../util/clock.js";
+import {ShufflingCache} from "../shufflingCache.js";
+import {MapTracker} from "./mapMetrics.js";
+import {CheckpointHex, CheckpointStateCache, CacheItemType} from "./types.js";
+import {CPStateDatastore, DatastoreKey, datastoreKeyToCheckpoint} from "./datastore/index.js";
+
+type GetHeadStateFn = () => CachedBeaconStateAllForks;
+
+type PersistentCheckpointStateCacheModules = {
+ metrics?: Metrics | null;
+ logger: Logger;
+ clock?: IClock | null;
+ shufflingCache: ShufflingCache;
+ datastore: CPStateDatastore;
+ getHeadState?: GetHeadStateFn;
+};
+
+type PersistentCheckpointStateCacheOpts = {
+ // Keep max n states in memory, persist the rest to disk
+ maxCPStateEpochsInMemory?: number;
+};
+
+/** checkpoint serialized as a string */
+type CacheKey = string;
+
+type InMemoryCacheItem = {
+ type: CacheItemType.inMemory;
+ state: CachedBeaconStateAllForks;
+ // if a cp state is reloaded from disk, it'll keep track of persistedKey to allow us to remove it from disk later
+ // it also helps not to persist it again
+ persistedKey?: DatastoreKey;
+};
+
+type PersistedCacheItem = {
+ type: CacheItemType.persisted;
+ value: DatastoreKey;
+};
+
+type CacheItem = InMemoryCacheItem | PersistedCacheItem;
+
+type LoadedStateBytesData = {persistedKey: DatastoreKey; stateBytes: Uint8Array};
+
+/**
+ * Before n-historical states, lodestar keeps mostly 3 states in memory with 1 finalized state
+ * Since Jan 2024, lodestar stores the finalized state in disk and keeps up to 2 epochs in memory
+ */
+export const DEFAULT_MAX_CP_STATE_EPOCHS_IN_MEMORY = 2;
+
+/**
+ * An implementation of CheckpointStateCache that keep up to n epoch checkpoint states in memory and persist the rest to disk
+ * - If it's more than `maxEpochsInMemory` epochs old, it will persist n last epochs to disk based on the view of the block
+ * - Once a chain gets finalized we'll prune all states from memory and disk for epochs < finalizedEpoch
+ * - In get*() apis if shouldReload is true, it will reload from disk. The reload() api is expensive and should only be called in some important flows:
+ * - Get state for block processing
+ * - updateHeadState
+ * - as with any cache, the state could be evicted from memory at any time, so we should always check if the state is in memory or not
+ * - Each time we process a state, we only persist exactly 1 checkpoint state per epoch based on the view of block and prune all others. The persisted
+ * checkpoint state could be finalized and used later in archive task, it's also used to regen states.
+ * - When we process multiple states in the same epoch, we could persist different checkpoint states of the same epoch because each block could have its
+ * own view. See unit test of this file `packages/beacon-node/test/unit/chain/stateCache/persistentCheckpointsCache.test.ts` for more details.
+ *
+ * The below diagram shows Previous Root Checkpoint State is persisted for epoch (n-2) and Current Root Checkpoint State is persisted for epoch (n-1)
+ * while at epoch (n) and (n+1) we have both of them in memory
+ *
+ * ╔════════════════════════════════════╗═══════════════╗
+ * ║ persisted to db or fs ║ in memory ║
+ * ║ reload if needed ║ ║
+ * ║ -----------------------------------║---------------║
+ * ║ epoch: (n-2) (n-1) ║ n (n+1) ║
+ * ║ |-------|-------|----║--|-------|----║
+ * ║ ^ ^ ║ ^ ^ ║
+ * ║ ║ ^ ^ ║
+ * ╚════════════════════════════════════╝═══════════════╝
+ *
+ * The "in memory" checkpoint states are similar to the old implementation: we have both Previous Root Checkpoint State and Current Root Checkpoint State per epoch.
+ * However in the "persisted to db or fs" part, we usually only persist 1 checkpoint state per epoch, the one that could potentially be justified/finalized later
+ * based on the view of blocks.
+ */
+export class PersistentCheckpointStateCache implements CheckpointStateCache {
+ private readonly cache: MapTracker;
+ /** Epoch -> Set */
+ private readonly epochIndex = new MapDef>(() => new Set());
+ private readonly metrics: Metrics["cpStateCache"] | null | undefined;
+ private readonly logger: Logger;
+ private readonly clock: IClock | null | undefined;
+ private preComputedCheckpoint: string | null = null;
+ private preComputedCheckpointHits: number | null = null;
+ private readonly maxEpochsInMemory: number;
+ private readonly datastore: CPStateDatastore;
+ private readonly shufflingCache: ShufflingCache;
+ private readonly getHeadState?: GetHeadStateFn;
+
+ constructor(
+ {metrics, logger, clock, shufflingCache, datastore, getHeadState}: PersistentCheckpointStateCacheModules,
+ opts: PersistentCheckpointStateCacheOpts
+ ) {
+ this.cache = new MapTracker(metrics?.cpStateCache);
+ if (metrics) {
+ this.metrics = metrics.cpStateCache;
+ metrics.cpStateCache.size.addCollect(() => {
+ let persistCount = 0;
+ let inMemoryCount = 0;
+ const memoryEpochs = new Set();
+ const persistentEpochs = new Set();
+ for (const [key, cacheItem] of this.cache.entries()) {
+ const {epoch} = fromCacheKey(key);
+ if (isPersistedCacheItem(cacheItem)) {
+ persistCount++;
+ persistentEpochs.add(epoch);
+ } else {
+ inMemoryCount++;
+ memoryEpochs.add(epoch);
+ }
+ }
+ metrics.cpStateCache.size.set({type: CacheItemType.persisted}, persistCount);
+ metrics.cpStateCache.size.set({type: CacheItemType.inMemory}, inMemoryCount);
+ metrics.cpStateCache.epochSize.set({type: CacheItemType.persisted}, persistentEpochs.size);
+ metrics.cpStateCache.epochSize.set({type: CacheItemType.inMemory}, memoryEpochs.size);
+ });
+ }
+ this.logger = logger;
+ this.clock = clock;
+ if (opts.maxCPStateEpochsInMemory !== undefined && opts.maxCPStateEpochsInMemory < 0) {
+ throw new Error("maxEpochsInMemory must be >= 0");
+ }
+ this.maxEpochsInMemory = opts.maxCPStateEpochsInMemory ?? DEFAULT_MAX_CP_STATE_EPOCHS_IN_MEMORY;
+ // Specify different datastore for testing
+ this.datastore = datastore;
+ this.shufflingCache = shufflingCache;
+ this.getHeadState = getHeadState;
+ }
+
+ /**
+ * Reload checkpoint state keys from the last run.
+ */
+ async init(): Promise {
+ const persistedKeys = await this.datastore.readKeys();
+ for (const persistedKey of persistedKeys) {
+ const cp = datastoreKeyToCheckpoint(persistedKey);
+ this.cache.set(toCacheKey(cp), {type: CacheItemType.persisted, value: persistedKey});
+ this.epochIndex.getOrDefault(cp.epoch).add(toHexString(cp.root));
+ }
+ this.logger.info("Loaded persisted checkpoint states from the last run", {
+ count: persistedKeys.length,
+ maxEpochsInMemory: this.maxEpochsInMemory,
+ });
+ }
+
+ /**
+ * Get a state from cache, it may reload from disk.
+ * This is an expensive api, should only be called in some important flows:
+ * - Validate a gossip block
+ * - Get block for processing
+ * - Regen head state
+ */
+ async getOrReload(cp: CheckpointHex): Promise {
+ const stateOrStateBytesData = await this.getStateOrLoadDb(cp);
+ if (stateOrStateBytesData === null || isCachedBeaconState(stateOrStateBytesData)) {
+ return stateOrStateBytesData;
+ }
+ const {persistedKey, stateBytes} = stateOrStateBytesData;
+ const logMeta = {persistedKey: toHexString(persistedKey)};
+ this.logger.debug("Reload: read state successful", logMeta);
+ this.metrics?.stateReloadSecFromSlot.observe(this.clock?.secFromSlot(this.clock?.currentSlot ?? 0) ?? 0);
+ const seedState = this.findSeedStateToReload(cp) ?? this.getHeadState?.();
+ if (seedState == null) {
+ throw new Error("No seed state found for cp " + toCacheKey(cp));
+ }
+ this.metrics?.stateReloadEpochDiff.observe(Math.abs(seedState.epochCtx.epoch - cp.epoch));
+ this.logger.debug("Reload: found seed state", {...logMeta, seedSlot: seedState.slot});
+
+ try {
+ const timer = this.metrics?.stateReloadDuration.startTimer();
+ const newCachedState = loadCachedBeaconState(seedState, stateBytes, {
+ shufflingGetter: this.shufflingCache.getSync.bind(this.shufflingCache),
+ });
+ newCachedState.commit();
+ const stateRoot = toHexString(newCachedState.hashTreeRoot());
+ timer?.();
+ this.logger.debug("Reload: cached state load successful", {
+ ...logMeta,
+ stateSlot: newCachedState.slot,
+ stateRoot,
+ seedSlot: seedState.slot,
+ });
+
+ // only remove persisted state once we reload successfully
+ const cpKey = toCacheKey(cp);
+ this.cache.set(cpKey, {type: CacheItemType.inMemory, state: newCachedState, persistedKey});
+ this.epochIndex.getOrDefault(cp.epoch).add(cp.rootHex);
+ // don't prune from memory here, call it at the last 1/3 of slot 0 of an epoch
+ return newCachedState;
+ } catch (e) {
+ this.logger.debug("Reload: error loading cached state", logMeta, e as Error);
+ return null;
+ }
+ }
+
+ /**
+ * Return either state or state bytes loaded from db.
+ */
+ async getStateOrBytes(cp: CheckpointHex): Promise {
+ const stateOrLoadedState = await this.getStateOrLoadDb(cp);
+ if (stateOrLoadedState === null || isCachedBeaconState(stateOrLoadedState)) {
+ return stateOrLoadedState;
+ }
+ return stateOrLoadedState.stateBytes;
+ }
+
+ /**
+ * Return either state or state bytes with persisted key loaded from db.
+ */
+ async getStateOrLoadDb(cp: CheckpointHex): Promise {
+ const cpKey = toCacheKey(cp);
+ const inMemoryState = this.get(cpKey);
+ if (inMemoryState) {
+ return inMemoryState;
+ }
+
+ const cacheItem = this.cache.get(cpKey);
+ if (cacheItem === undefined) {
+ return null;
+ }
+
+ if (isInMemoryCacheItem(cacheItem)) {
+ // should not happen, in-memory state is handled above
+ throw new Error("Expected persistent key");
+ }
+
+ const persistedKey = cacheItem.value;
+ const dbReadTimer = this.metrics?.stateReloadDbReadTime.startTimer();
+ const stateBytes = await this.datastore.read(persistedKey);
+ dbReadTimer?.();
+
+ if (stateBytes === null) {
+ return null;
+ }
+ return {persistedKey, stateBytes};
+ }
+
+ /**
+ * Similar to get() api without reloading from disk
+ */
+ get(cpOrKey: CheckpointHex | string): CachedBeaconStateAllForks | null {
+ this.metrics?.lookups.inc();
+ const cpKey = typeof cpOrKey === "string" ? cpOrKey : toCacheKey(cpOrKey);
+ const cacheItem = this.cache.get(cpKey);
+
+ if (cacheItem === undefined) {
+ return null;
+ }
+
+ this.metrics?.hits.inc();
+
+ if (cpKey === this.preComputedCheckpoint) {
+ this.preComputedCheckpointHits = (this.preComputedCheckpointHits ?? 0) + 1;
+ }
+
+ if (isInMemoryCacheItem(cacheItem)) {
+ const {state} = cacheItem;
+ this.metrics?.stateClonedCount.observe(state.clonedCount);
+ return state;
+ }
+
+ return null;
+ }
+
+ /**
+ * Add a state of a checkpoint to this cache, prune from memory if necessary.
+ */
+ add(cp: phase0.Checkpoint, state: CachedBeaconStateAllForks): void {
+ const cpHex = toCheckpointHex(cp);
+ const key = toCacheKey(cpHex);
+ const cacheItem = this.cache.get(key);
+ this.metrics?.adds.inc();
+ if (cacheItem !== undefined && isPersistedCacheItem(cacheItem)) {
+ const persistedKey = cacheItem.value;
+ // was persisted to disk, set back to memory
+ this.cache.set(key, {type: CacheItemType.inMemory, state, persistedKey});
+ this.logger.verbose("Added checkpoint state to memory but a persisted key existed", {
+ epoch: cp.epoch,
+ rootHex: cpHex.rootHex,
+ persistedKey: toHexString(persistedKey),
+ });
+ } else {
+ this.cache.set(key, {type: CacheItemType.inMemory, state});
+ this.logger.verbose("Added checkpoint state to memory", {epoch: cp.epoch, rootHex: cpHex.rootHex});
+ }
+ this.epochIndex.getOrDefault(cp.epoch).add(cpHex.rootHex);
+ }
+
+ /**
+ * Searches in-memory state for the latest cached state with a `root` without reload, starting with `epoch` and descending
+ */
+ getLatest(rootHex: RootHex, maxEpoch: Epoch): CachedBeaconStateAllForks | null {
+ // sort epochs in descending order, only consider epochs lte `epoch`
+ const epochs = Array.from(this.epochIndex.keys())
+ .sort((a, b) => b - a)
+ .filter((e) => e <= maxEpoch);
+ for (const epoch of epochs) {
+ if (this.epochIndex.get(epoch)?.has(rootHex)) {
+ const inMemoryState = this.get({rootHex, epoch});
+ if (inMemoryState) {
+ return inMemoryState;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Searches state for the latest cached state with a `root`, reload if needed, starting with `epoch` and descending
+ * This is expensive api, should only be called in some important flows:
+ * - Validate a gossip block
+ * - Get block for processing
+ * - Regen head state
+ */
+ async getOrReloadLatest(rootHex: RootHex, maxEpoch: Epoch): Promise {
+ // sort epochs in descending order, only consider epochs lte `epoch`
+ const epochs = Array.from(this.epochIndex.keys())
+ .sort((a, b) => b - a)
+ .filter((e) => e <= maxEpoch);
+ for (const epoch of epochs) {
+ if (this.epochIndex.get(epoch)?.has(rootHex)) {
+ try {
+ const state = await this.getOrReload({rootHex, epoch});
+ if (state) {
+ return state;
+ }
+ } catch (e) {
+ this.logger.debug("Error get or reload state", {epoch, rootHex}, e as Error);
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Update the precomputed checkpoint and return the number of his for the
+ * previous one (if any).
+ */
+ updatePreComputedCheckpoint(rootHex: RootHex, epoch: Epoch): number | null {
+ const previousHits = this.preComputedCheckpointHits;
+ this.preComputedCheckpoint = toCacheKey({rootHex, epoch});
+ this.preComputedCheckpointHits = 0;
+ return previousHits;
+ }
+
+ /**
+ * This is just to conform to the old implementation
+ */
+ prune(): void {
+ // do nothing
+ }
+
+ /**
+ * Prune all checkpoint states before the provided finalized epoch.
+ */
+ pruneFinalized(finalizedEpoch: Epoch): void {
+ for (const epoch of this.epochIndex.keys()) {
+ if (epoch < finalizedEpoch) {
+ this.deleteAllEpochItems(epoch).catch((e) =>
+ this.logger.debug("Error delete all epoch items", {epoch, finalizedEpoch}, e as Error)
+ );
+ }
+ }
+ }
+
+ /**
+ * After processing a block, prune from memory based on the view of that block.
+ * This is likely persist 1 state per epoch, at the last 1/3 of slot 0 of an epoch although it'll be called on every last 1/3 of slot.
+ * Given the following block b was processed with b2, b1, b0 are ancestors in epoch (n-2), (n-1), n respectively
+ *
+ * epoch: (n-2) (n-1) n (n+1)
+ * |-----------|-----------|-----------|-----------|
+ * ^ ^ ^ ^
+ * | | | |
+ * block chain: b2---------->b1--------->b0-->b
+ *
+ * After processing block b, if maxEpochsInMemory is:
+ * - 2 then we'll persist {root: b2, epoch n-2} checkpoint state to disk
+ * - 1 then we'll persist {root: b2, epoch n-2} and {root: b1, epoch n-1} checkpoint state to disk
+ * - 0 then we'll persist {root: b2, epoch n-2} and {root: b1, epoch n-1} and {root: b0, epoch n} checkpoint state to disk
+ * - if any old epochs checkpoint states are persisted, no need to do it again
+ *
+ * Note that for each epoch there could be multiple checkpoint states, usually 2, one for Previous Root Checkpoint State and one for Current Root Checkpoint State.
+ * We normally only persist 1 checkpoint state per epoch, the one that could potentially be justified/finalized later based on the view of the block.
+ * Other checkpoint states are pruned from memory.
+ *
+ * This design also covers the reorg scenario. Given block c in the same epoch n where c.slot > b.slot, c is not descendant of b, and c is built on top of c0
+ * instead of b0 (epoch (n - 1))
+ *
+ * epoch: (n-2) (n-1) n (n+1)
+ * |-----------|-----------|-----------|-----------|
+ * ^ ^ ^ ^ ^ ^
+ * | | | | | |
+ * block chain: b2---------->b1----->c0->b0-->b |
+ * ║ |
+ * ╚═══════════>c (reorg)
+ *
+ * After processing block c, if maxEpochsInMemory is:
+ * - 0 then we'll persist {root: c0, epoch: n} checkpoint state to disk. Note that regen should populate {root: c0, epoch: n} checkpoint state before.
+ *
+ * epoch: (n-1) n (n+1)
+ * |-------------------------------------------------------------|-------------------------------------------------------------|
+ * ^ ^ ^ ^
+ * _______ | | | |
+ * | | | | | |
+ * | db |====== reload ======> {root: b1, epoch: n-1} cp state ======> c0 block state ======> {root: c0, epoch: n} cp state =====> c block state
+ * |_______|
+ *
+ *
+ *
+ * - 1 then we'll persist {root: b1, epoch n-1} checkpoint state to disk. Note that at epoch n there is both {root: b0, epoch: n} and {root: c0, epoch: n} checkpoint states in memory
+ * - 2 then we'll persist {root: b2, epoch n-2} checkpoint state to disk, there are also 2 checkpoint states in memory at epoch n, same to the above (maxEpochsInMemory=1)
+ *
+ * As of Nov 2023, it takes 1.3s to 1.5s to persist a state on holesky on fast server. TODO:
+ * - improve state serialization time
+ * - or research how to only store diff against the finalized state
+ */
+ async processState(blockRootHex: RootHex, state: CachedBeaconStateAllForks): Promise {
+ let persistCount = 0;
+ // it's important to sort the epochs in ascending order, in case of big reorg we always want to keep the most recent checkpoint states
+ const sortedEpochs = Array.from(this.epochIndex.keys()).sort((a, b) => a - b);
+ if (sortedEpochs.length <= this.maxEpochsInMemory) {
+ return 0;
+ }
+
+ for (const lowestEpoch of sortedEpochs.slice(0, sortedEpochs.length - this.maxEpochsInMemory)) {
+ const epochBoundarySlot = computeStartSlotAtEpoch(lowestEpoch);
+ const epochBoundaryRoot =
+ epochBoundarySlot === state.slot ? fromHexString(blockRootHex) : getBlockRootAtSlot(state, epochBoundarySlot);
+ const epochBoundaryHex = toHexString(epochBoundaryRoot);
+
+ // for each epoch, usually there are 2 rootHex respective to the 2 checkpoint states: Previous Root Checkpoint State and Current Root Checkpoint State
+ for (const rootHex of this.epochIndex.get(lowestEpoch) ?? []) {
+ const cpKey = toCacheKey({epoch: lowestEpoch, rootHex});
+ const cacheItem = this.cache.get(cpKey);
+
+ if (cacheItem !== undefined && isInMemoryCacheItem(cacheItem)) {
+ // this is state in memory, we don't care if the checkpoint state is already persisted
+ let {persistedKey} = cacheItem;
+ const {state} = cacheItem;
+ const logMeta = {
+ stateSlot: state.slot,
+ rootHex,
+ epochBoundaryHex,
+ persistedKey: persistedKey ? toHexString(persistedKey) : "",
+ };
+
+ if (rootHex === epochBoundaryHex) {
+ if (persistedKey) {
+ // no need to persist
+ this.logger.verbose("Pruned checkpoint state from memory but no need to persist", logMeta);
+ } else {
+ // persist and do not update epochIndex
+ this.metrics?.statePersistSecFromSlot.observe(this.clock?.secFromSlot(this.clock?.currentSlot ?? 0) ?? 0);
+ const timer = this.metrics?.statePersistDuration.startTimer();
+ const cpPersist = {epoch: lowestEpoch, root: epochBoundaryRoot};
+ persistedKey = await this.datastore.write(cpPersist, state);
+ timer?.();
+ persistCount++;
+ this.logger.verbose("Pruned checkpoint state from memory and persisted to disk", {
+ ...logMeta,
+ persistedKey: toHexString(persistedKey),
+ });
+ }
+ // overwrite cpKey, this means the state is deleted from memory
+ this.cache.set(cpKey, {type: CacheItemType.persisted, value: persistedKey});
+ } else {
+ if (persistedKey) {
+ // persisted file will be eventually deleted by the archive task
+ // this also means the state is deleted from memory
+ this.cache.set(cpKey, {type: CacheItemType.persisted, value: persistedKey});
+ // do not update epochIndex
+ } else {
+ // delete the state from memory
+ this.cache.delete(cpKey);
+ this.epochIndex.get(lowestEpoch)?.delete(rootHex);
+ }
+ this.metrics?.statePruneFromMemoryCount.inc();
+ this.logger.verbose("Pruned checkpoint state from memory", logMeta);
+ }
+ }
+ }
+ }
+
+ return persistCount;
+ }
+
+ /**
+ * Find a seed state to reload the state of provided checkpoint. Based on the design of n-historical state:
+ *
+ * ╔════════════════════════════════════╗═══════════════╗
+ * ║ persisted to db or fs ║ in memory ║
+ * ║ reload if needed ║ ║
+ * ║ -----------------------------------║---------------║
+ * ║ epoch: (n-2) (n-1) ║ n (n+1) ║
+ * ║ |-------|-------|----║--|-------|----║
+ * ║ ^ ^ ║ ^ ^ ║
+ * ║ ║ ^ ^ ║
+ * ╚════════════════════════════════════╝═══════════════╝
+ *
+ * we always reload an epoch in the past. We'll start with epoch n then (n+1) prioritizing ones with the same view of `reloadedCp`.
+ *
+ * This could return null and we should get head state in that case.
+ */
+ findSeedStateToReload(reloadedCp: CheckpointHex): CachedBeaconStateAllForks | null {
+ const maxEpoch = Math.max(...Array.from(this.epochIndex.keys()));
+ const reloadedCpSlot = computeStartSlotAtEpoch(reloadedCp.epoch);
+ let firstState: CachedBeaconStateAllForks | null = null;
+ // no need to check epochs before `maxEpoch - this.maxEpochsInMemory + 1` before they are all persisted
+ for (let epoch = maxEpoch - this.maxEpochsInMemory + 1; epoch <= maxEpoch; epoch++) {
+ // if there's at least 1 state in memory in an epoch, just return the 1st one
+ if (firstState !== null) {
+ return firstState;
+ }
+
+ for (const rootHex of this.epochIndex.get(epoch) || []) {
+ const cpKey = toCacheKey({rootHex, epoch});
+ const cacheItem = this.cache.get(cpKey);
+ if (cacheItem === undefined) {
+ // should not happen
+ continue;
+ }
+ if (isInMemoryCacheItem(cacheItem)) {
+ const {state} = cacheItem;
+ if (firstState === null) {
+ firstState = state;
+ }
+
+ // amongst states of the same epoch, choose the one with the same view of reloadedCp
+ if (
+ reloadedCpSlot < state.slot &&
+ toHexString(getBlockRootAtSlot(state, reloadedCpSlot)) === reloadedCp.rootHex
+ ) {
+ return state;
+ }
+ }
+ }
+ }
+
+ return firstState;
+ }
+
+ clear(): void {
+ this.cache.clear();
+ this.epochIndex.clear();
+ }
+
+ /** ONLY FOR DEBUGGING PURPOSES. For lodestar debug API */
+ dumpSummary(): routes.lodestar.StateCacheItem[] {
+ return Array.from(this.cache.keys()).map((key) => {
+ const cp = fromCacheKey(key);
+ // TODO: add checkpoint key and persistent key to the summary
+ return {
+ slot: computeStartSlotAtEpoch(cp.epoch),
+ root: cp.rootHex,
+ reads: this.cache.readCount.get(key) ?? 0,
+ lastRead: this.cache.lastRead.get(key) ?? 0,
+ checkpointState: true,
+ };
+ });
+ }
+
+ /** ONLY FOR DEBUGGING PURPOSES. For spec tests on error */
+ dumpCheckpointKeys(): string[] {
+ return Array.from(this.cache.keys());
+ }
+
+ /**
+ * Delete all items of an epoch from disk and memory
+ */
+ private async deleteAllEpochItems(epoch: Epoch): Promise {
+ let persistCount = 0;
+ const rootHexes = this.epochIndex.get(epoch) || [];
+ for (const rootHex of rootHexes) {
+ const key = toCacheKey({rootHex, epoch});
+ const cacheItem = this.cache.get(key);
+
+ if (cacheItem) {
+ const persistedKey = isPersistedCacheItem(cacheItem) ? cacheItem.value : cacheItem.persistedKey;
+ if (persistedKey) {
+ await this.datastore.remove(persistedKey);
+ persistCount++;
+ this.metrics?.persistedStateRemoveCount.inc();
+ }
+ }
+ this.cache.delete(key);
+ }
+ this.epochIndex.delete(epoch);
+ this.logger.verbose("Pruned finalized checkpoints states for epoch", {
+ epoch,
+ persistCount,
+ rootHexes: Array.from(rootHexes).join(","),
+ });
+ }
+}
+
+function toCheckpointHex(checkpoint: phase0.Checkpoint): CheckpointHex {
+ return {
+ epoch: checkpoint.epoch,
+ rootHex: toHexString(checkpoint.root),
+ };
+}
+
+function toCacheKey(cp: CheckpointHex | phase0.Checkpoint): CacheKey {
+ if (isCheckpointHex(cp)) {
+ return `${cp.rootHex}_${cp.epoch}`;
+ }
+ return `${toHexString(cp.root)}_${cp.epoch}`;
+}
+
+function fromCacheKey(key: CacheKey): CheckpointHex {
+ const [rootHex, epoch] = key.split("_");
+ return {
+ rootHex,
+ epoch: Number(epoch),
+ };
+}
+
+function isCachedBeaconState(
+ stateOrBytes: CachedBeaconStateAllForks | LoadedStateBytesData
+): stateOrBytes is CachedBeaconStateAllForks {
+ return (stateOrBytes as CachedBeaconStateAllForks).slot !== undefined;
+}
+
+function isInMemoryCacheItem(cacheItem: CacheItem): cacheItem is InMemoryCacheItem {
+ return cacheItem.type === CacheItemType.inMemory;
+}
+
+function isPersistedCacheItem(cacheItem: CacheItem): cacheItem is PersistedCacheItem {
+ return cacheItem.type === CacheItemType.persisted;
+}
+
+function isCheckpointHex(cp: CheckpointHex | phase0.Checkpoint): cp is CheckpointHex {
+ return (cp as CheckpointHex).rootHex !== undefined;
+}
diff --git a/packages/beacon-node/src/chain/stateCache/stateContextCache.ts b/packages/beacon-node/src/chain/stateCache/stateContextCache.ts
index 44523abf799c..3a04c4f4a258 100644
--- a/packages/beacon-node/src/chain/stateCache/stateContextCache.ts
+++ b/packages/beacon-node/src/chain/stateCache/stateContextCache.ts
@@ -4,15 +4,16 @@ import {CachedBeaconStateAllForks} from "@lodestar/state-transition";
import {routes} from "@lodestar/api";
import {Metrics} from "../../metrics/index.js";
import {MapTracker} from "./mapMetrics.js";
+import {BlockStateCache} from "./types.js";
const MAX_STATES = 3 * 32;
/**
- * In memory cache of CachedBeaconState
- *
- * Similar API to Repository
+ * Old implementation of StateCache
+ * - Prune per checkpoint so number of states ranges from 96 to 128
+ * - Keep a separate head state to make sure it is always available
*/
-export class StateContextCache {
+export class StateContextCache implements BlockStateCache {
/**
* Max number of states allowed in the cache
*/
diff --git a/packages/beacon-node/src/chain/stateCache/stateContextCheckpointsCache.ts b/packages/beacon-node/src/chain/stateCache/stateContextCheckpointsCache.ts
index 0cb48f0e2ded..a177db9b7c87 100644
--- a/packages/beacon-node/src/chain/stateCache/stateContextCheckpointsCache.ts
+++ b/packages/beacon-node/src/chain/stateCache/stateContextCheckpointsCache.ts
@@ -5,6 +5,7 @@ import {MapDef} from "@lodestar/utils";
import {routes} from "@lodestar/api";
import {Metrics} from "../../metrics/index.js";
import {MapTracker} from "./mapMetrics.js";
+import {CheckpointStateCache as CheckpointStateCacheInterface, CacheItemType} from "./types.js";
export type CheckpointHex = {epoch: Epoch; rootHex: RootHex};
const MAX_EPOCHS = 10;
@@ -14,8 +15,9 @@ const MAX_EPOCHS = 10;
* belonging to checkpoint
*
* Similar API to Repository
+ * TODO: rename to MemoryCheckpointStateCache in the next PR of n-historical states
*/
-export class CheckpointStateCache {
+export class CheckpointStateCache implements CheckpointStateCacheInterface {
private readonly cache: MapTracker;
/** Epoch -> Set */
private readonly epochIndex = new MapDef>(() => new Set());
@@ -27,11 +29,32 @@ export class CheckpointStateCache {
this.cache = new MapTracker(metrics?.cpStateCache);
if (metrics) {
this.metrics = metrics.cpStateCache;
- metrics.cpStateCache.size.addCollect(() => metrics.cpStateCache.size.set(this.cache.size));
- metrics.cpStateCache.epochSize.addCollect(() => metrics.cpStateCache.epochSize.set(this.epochIndex.size));
+ metrics.cpStateCache.size.addCollect(() =>
+ metrics.cpStateCache.size.set({type: CacheItemType.inMemory}, this.cache.size)
+ );
+ metrics.cpStateCache.epochSize.addCollect(() =>
+ metrics.cpStateCache.epochSize.set({type: CacheItemType.inMemory}, this.epochIndex.size)
+ );
}
}
+ async getOrReload(cp: CheckpointHex): Promise {
+ return this.get(cp);
+ }
+
+ async getStateOrBytes(cp: CheckpointHex): Promise {
+ return this.get(cp);
+ }
+
+ async getOrReloadLatest(rootHex: string, maxEpoch: number): Promise {
+ return this.getLatest(rootHex, maxEpoch);
+ }
+
+ async processState(): Promise {
+ // do nothing, this class does not support prunning
+ return 0;
+ }
+
get(cp: CheckpointHex): CachedBeaconStateAllForks | null {
this.metrics?.lookups.inc();
const cpKey = toCheckpointKey(cp);
diff --git a/packages/beacon-node/src/chain/stateCache/types.ts b/packages/beacon-node/src/chain/stateCache/types.ts
new file mode 100644
index 000000000000..5867d7d356c1
--- /dev/null
+++ b/packages/beacon-node/src/chain/stateCache/types.ts
@@ -0,0 +1,73 @@
+import {CachedBeaconStateAllForks} from "@lodestar/state-transition";
+import {Epoch, RootHex, phase0} from "@lodestar/types";
+import {routes} from "@lodestar/api";
+
+export type CheckpointHex = {epoch: Epoch; rootHex: RootHex};
+
+/**
+ * Lodestar currently keeps two state caches around.
+ *
+ * 1. BlockStateCache is keyed by state root, and intended to keep extremely recent states around (eg: post states from the latest blocks)
+ * These states are most likely to be useful for state transition of new blocks.
+ *
+ * 2. CheckpointStateCache is keyed by checkpoint, and intended to keep states which have just undergone an epoch transition.
+ * These states are useful for gossip verification and for avoiding an epoch transition during state transition of first-in-epoch blocks
+ */
+
+/**
+ * Store up to n recent block states.
+ *
+ * The cache key is state root
+ */
+export interface BlockStateCache {
+ get(rootHex: RootHex): CachedBeaconStateAllForks | null;
+ add(item: CachedBeaconStateAllForks): void;
+ setHeadState(item: CachedBeaconStateAllForks | null): void;
+ clear(): void;
+ size: number;
+ prune(headStateRootHex: RootHex): void;
+ deleteAllBeforeEpoch(finalizedEpoch: Epoch): void;
+ dumpSummary(): routes.lodestar.StateCacheItem[];
+}
+
+/**
+ * Store checkpoint states to preserve epoch transition, this helps lodestar run exactly 1 epoch transition per epoch in normal network conditions.
+ *
+ * There are 2 types of checkpoint states:
+ *
+ * - Previous Root Checkpoint State: where root is from previous epoch, this is added when we prepare for next slot,
+ * or to validate gossip block
+ * ```
+ * epoch: (n-2) (n-1) n (n+1)
+ * |-------|-------|-------|-------|
+ * root ---------------------^
+ * ```
+ *
+ * - Current Root Checkpoint State: this is added when we process block slot 0 of epoch n, note that this block could
+ * be skipped so we don't always have this checkpoint state
+ * ```
+ * epoch: (n-2) (n-1) n (n+1)
+ * |-------|-------|-------|-------|
+ * root ---------------------^
+ * ```
+ */
+export interface CheckpointStateCache {
+ init?: () => Promise;
+ getOrReload(cp: CheckpointHex): Promise;
+ getStateOrBytes(cp: CheckpointHex): Promise;
+ get(cpOrKey: CheckpointHex | string): CachedBeaconStateAllForks | null;
+ add(cp: phase0.Checkpoint, state: CachedBeaconStateAllForks): void;
+ getLatest(rootHex: RootHex, maxEpoch: Epoch): CachedBeaconStateAllForks | null;
+ getOrReloadLatest(rootHex: RootHex, maxEpoch: Epoch): Promise;
+ updatePreComputedCheckpoint(rootHex: RootHex, epoch: Epoch): number | null;
+ prune(finalizedEpoch: Epoch, justifiedEpoch: Epoch): void;
+ pruneFinalized(finalizedEpoch: Epoch): void;
+ processState(blockRootHex: RootHex, state: CachedBeaconStateAllForks): Promise;
+ clear(): void;
+ dumpSummary(): routes.lodestar.StateCacheItem[];
+}
+
+export enum CacheItemType {
+ persisted = "persisted",
+ inMemory = "in-memory",
+}
diff --git a/packages/beacon-node/src/chain/validation/attestation.ts b/packages/beacon-node/src/chain/validation/attestation.ts
index 31e105911ab4..eae171631025 100644
--- a/packages/beacon-node/src/chain/validation/attestation.ts
+++ b/packages/beacon-node/src/chain/validation/attestation.ts
@@ -541,7 +541,7 @@ export function verifyHeadBlockAndTargetRoot(
targetRoot: Root,
attestationSlot: Slot,
attestationEpoch: Epoch,
- caller: string,
+ caller: RegenCaller,
maxSkipSlots?: number
): ProtoBlock {
const headBlock = verifyHeadBlockIsKnown(chain, beaconBlockRoot);
diff --git a/packages/beacon-node/src/chain/validation/blobSidecar.ts b/packages/beacon-node/src/chain/validation/blobSidecar.ts
index b5aab323c269..f1ea7bfa95c8 100644
--- a/packages/beacon-node/src/chain/validation/blobSidecar.ts
+++ b/packages/beacon-node/src/chain/validation/blobSidecar.ts
@@ -1,7 +1,7 @@
-import {ChainForkConfig} from "@lodestar/config";
-import {deneb, Root, Slot} from "@lodestar/types";
-import {toHex} from "@lodestar/utils";
-import {getBlobProposerSignatureSet, computeStartSlotAtEpoch} from "@lodestar/state-transition";
+import {deneb, Root, Slot, ssz} from "@lodestar/types";
+import {toHex, verifyMerkleBranch} from "@lodestar/utils";
+import {computeStartSlotAtEpoch, getBlockHeaderProposerSignatureSet} from "@lodestar/state-transition";
+import {KZG_COMMITMENT_INCLUSION_PROOF_DEPTH, KZG_COMMITMENT_SUBTREE_INDEX0} from "@lodestar/params";
import {BlobSidecarGossipError, BlobSidecarErrorCode} from "../errors/blobSidecarError.js";
import {GossipAction} from "../errors/gossipValidation.js";
@@ -11,13 +11,11 @@ import {IBeaconChain} from "../interface.js";
import {RegenCaller} from "../regen/index.js";
export async function validateGossipBlobSidecar(
- config: ChainForkConfig,
chain: IBeaconChain,
- signedBlob: deneb.SignedBlobSidecar,
+ blobSidecar: deneb.BlobSidecar,
gossipIndex: number
): Promise {
- const blobSidecar = signedBlob.message;
- const blobSlot = blobSidecar.slot;
+ const blobSlot = blobSidecar.signedBlockHeader.message.slot;
// [REJECT] The sidecar is for the correct topic -- i.e. sidecar.index matches the topic {index}.
if (blobSidecar.index !== gossipIndex) {
@@ -58,9 +56,10 @@ export async function validateGossipBlobSidecar(
// reboot if the `observed_block_producers` cache is empty. In that case, without this
// check, we will load the parent and state from disk only to find out later that we
// already know this block.
- const blockRoot = toHex(blobSidecar.blockRoot);
- if (chain.forkChoice.getBlockHex(blockRoot) !== null) {
- throw new BlobSidecarGossipError(GossipAction.IGNORE, {code: BlobSidecarErrorCode.ALREADY_KNOWN, root: blockRoot});
+ const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message);
+ const blockHex = toHex(blockRoot);
+ if (chain.forkChoice.getBlockHex(blockHex) !== null) {
+ throw new BlobSidecarGossipError(GossipAction.IGNORE, {code: BlobSidecarErrorCode.ALREADY_KNOWN, root: blockHex});
}
// TODO: freetheblobs - check for badblock
@@ -69,7 +68,7 @@ export async function validateGossipBlobSidecar(
// _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both
// gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is
// retrieved).
- const parentRoot = toHex(blobSidecar.blockParentRoot);
+ const parentRoot = toHex(blobSidecar.signedBlockHeader.message.parentRoot);
const parentBlock = chain.forkChoice.getBlockHex(parentRoot);
if (parentBlock === null) {
// If fork choice does *not* consider the parent to be a descendant of the finalized block,
@@ -97,18 +96,16 @@ export async function validateGossipBlobSidecar(
// getBlockSlotState also checks for whether the current finalized checkpoint is an ancestor of the block.
// As a result, we throw an IGNORE (whereas the spec says we should REJECT for this scenario).
// this is something we should change this in the future to make the code airtight to the spec.
- // _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both
- // gossip and non-gossip sources) // _[REJECT]_ The blob's block's parent (defined by `sidecar.block_parent_root`) passes validation
- // The above validation will happen while importing
+ // [IGNORE] The block's parent (defined by block.parent_root) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved).
+ // [REJECT] The block's parent (defined by block.parent_root) passes validation.
const blockState = await chain.regen
- .getBlockSlotState(parentRoot, blobSlot, {dontTransferCache: true}, RegenCaller.validateGossipBlob)
+ .getBlockSlotState(parentRoot, blobSlot, {dontTransferCache: true}, RegenCaller.validateGossipBlock)
.catch(() => {
throw new BlobSidecarGossipError(GossipAction.IGNORE, {code: BlobSidecarErrorCode.PARENT_UNKNOWN, parentRoot});
});
- // _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the
- // `sidecar.proposer_index` pubkey.
- const signatureSet = getBlobProposerSignatureSet(blockState, signedBlob);
+ // [REJECT] The proposer signature, signed_beacon_block.signature, is valid with respect to the proposer_index pubkey.
+ const signatureSet = getBlockHeaderProposerSignatureSet(blockState, blobSidecar.signedBlockHeader);
// Don't batch so verification is not delayed
if (!(await chain.bls.verifySignatureSets([signatureSet], {verifyOnMainThread: true}))) {
throw new BlobSidecarGossipError(GossipAction.REJECT, {
@@ -116,6 +113,15 @@ export async function validateGossipBlobSidecar(
});
}
+ // verify if the blob inclusion proof is correct
+ if (!validateInclusionProof(blobSidecar)) {
+ throw new BlobSidecarGossipError(GossipAction.REJECT, {
+ code: BlobSidecarErrorCode.INCLUSION_PROOF_INVALID,
+ slot: blobSidecar.signedBlockHeader.message.slot,
+ blobIdx: blobSidecar.index,
+ });
+ }
+
// _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple
// `(sidecar.block_root, sidecar.index)`
//
@@ -127,7 +133,7 @@ export async function validateGossipBlobSidecar(
// If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar
// MAY be queued for later processing while proposers for the block's branch are calculated -- in such
// a case _do not_ `REJECT`, instead `IGNORE` this message.
- const proposerIndex = blobSidecar.proposerIndex;
+ const proposerIndex = blobSidecar.signedBlockHeader.message.proposerIndex;
if (blockState.epochCtx.getBeaconProposer(blobSlot) !== proposerIndex) {
throw new BlobSidecarGossipError(GossipAction.REJECT, {
code: BlobSidecarErrorCode.INCORRECT_PROPOSER,
@@ -168,16 +174,18 @@ export function validateBlobSidecars(
const proofs = [];
for (let index = 0; index < blobSidecars.length; index++) {
const blobSidecar = blobSidecars[index];
+ const blobBlockHeader = blobSidecar.signedBlockHeader.message;
+ const blobBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobBlockHeader);
if (
- blobSidecar.slot !== blockSlot ||
- !byteArrayEquals(blobSidecar.blockRoot, blockRoot) ||
+ blobBlockHeader.slot !== blockSlot ||
+ !byteArrayEquals(blobBlockRoot, blockRoot) ||
blobSidecar.index !== index ||
!byteArrayEquals(expectedKzgCommitments[index], blobSidecar.kzgCommitment)
) {
throw new Error(
- `Invalid blob with slot=${blobSidecar.slot} blockRoot=${toHex(blockRoot)} index=${
+ `Invalid blob with slot=${blobBlockHeader.slot} blobBlockRoot=${toHex(blobBlockRoot)} index=${
blobSidecar.index
- } for the block root=${toHex(blockRoot)} slot=${blockSlot} index=${index}`
+ } for the block blockRoot=${toHex(blockRoot)} slot=${blockSlot} index=${index}`
);
}
blobs.push(blobSidecar.blob);
@@ -207,3 +215,13 @@ function validateBlobsAndProofs(
throw Error("Invalid verifyBlobKzgProofBatch");
}
}
+
+function validateInclusionProof(blobSidecar: deneb.BlobSidecar): boolean {
+ return verifyMerkleBranch(
+ ssz.deneb.KZGCommitment.hashTreeRoot(blobSidecar.kzgCommitment),
+ blobSidecar.kzgCommitmentInclusionProof,
+ KZG_COMMITMENT_INCLUSION_PROOF_DEPTH,
+ KZG_COMMITMENT_SUBTREE_INDEX0 + blobSidecar.index,
+ blobSidecar.signedBlockHeader.message.bodyRoot
+ );
+}
diff --git a/packages/beacon-node/src/db/beacon.ts b/packages/beacon-node/src/db/beacon.ts
index 58b99f2a37e0..07cc47fa54d8 100644
--- a/packages/beacon-node/src/db/beacon.ts
+++ b/packages/beacon-node/src/db/beacon.ts
@@ -21,6 +21,7 @@ import {
BLSToExecutionChangeRepository,
} from "./repositories/index.js";
import {PreGenesisState, PreGenesisStateLastProcessedBlock} from "./single/index.js";
+import {CheckpointStateRepository} from "./repositories/checkpointState.js";
export type BeaconDbModules = {
config: ChainForkConfig;
@@ -35,6 +36,7 @@ export class BeaconDb implements IBeaconDb {
blobSidecarsArchive: BlobSidecarsArchiveRepository;
stateArchive: StateArchiveRepository;
+ checkpointState: CheckpointStateRepository;
voluntaryExit: VoluntaryExitRepository;
proposerSlashing: ProposerSlashingRepository;
@@ -67,6 +69,7 @@ export class BeaconDb implements IBeaconDb {
this.blobSidecarsArchive = new BlobSidecarsArchiveRepository(config, db);
this.stateArchive = new StateArchiveRepository(config, db);
+ this.checkpointState = new CheckpointStateRepository(config, db);
this.voluntaryExit = new VoluntaryExitRepository(config, db);
this.blsToExecutionChange = new BLSToExecutionChangeRepository(config, db);
this.proposerSlashing = new ProposerSlashingRepository(config, db);
diff --git a/packages/beacon-node/src/db/buckets.ts b/packages/beacon-node/src/db/buckets.ts
index 1a3abfa33623..9dffd0608d52 100644
--- a/packages/beacon-node/src/db/buckets.ts
+++ b/packages/beacon-node/src/db/buckets.ts
@@ -28,6 +28,8 @@ export enum Bucket {
phase0_proposerSlashing = 14, // ValidatorIndex -> ProposerSlashing
phase0_attesterSlashing = 15, // Root -> AttesterSlashing
capella_blsToExecutionChange = 16, // ValidatorIndex -> SignedBLSToExecutionChange
+ // checkpoint states
+ allForks_checkpointState = 17, // Root -> allForks.BeaconState
// allForks_pendingBlock = 25, // Root -> SignedBeaconBlock // DEPRECATED on v0.30.0
phase0_depositEvent = 19, // depositIndex -> DepositEvent
diff --git a/packages/beacon-node/src/db/interface.ts b/packages/beacon-node/src/db/interface.ts
index 58bf25c57aa7..6ffb8992f635 100644
--- a/packages/beacon-node/src/db/interface.ts
+++ b/packages/beacon-node/src/db/interface.ts
@@ -19,6 +19,7 @@ import {
BLSToExecutionChangeRepository,
} from "./repositories/index.js";
import {PreGenesisState, PreGenesisStateLastProcessedBlock} from "./single/index.js";
+import {CheckpointStateRepository} from "./repositories/checkpointState.js";
/**
* The DB service manages the data layer of the beacon chain
@@ -36,6 +37,8 @@ export interface IBeaconDb {
// finalized states
stateArchive: StateArchiveRepository;
+ // checkpoint states
+ checkpointState: CheckpointStateRepository;
// op pool
voluntaryExit: VoluntaryExitRepository;
diff --git a/packages/beacon-node/src/db/repositories/blobSidecars.ts b/packages/beacon-node/src/db/repositories/blobSidecars.ts
index 576a03df9e61..e5750ed31b58 100644
--- a/packages/beacon-node/src/db/repositories/blobSidecars.ts
+++ b/packages/beacon-node/src/db/repositories/blobSidecars.ts
@@ -2,6 +2,7 @@ import {ValueOf, ContainerType} from "@chainsafe/ssz";
import {ChainForkConfig} from "@lodestar/config";
import {Db, Repository} from "@lodestar/db";
import {ssz} from "@lodestar/types";
+
import {Bucket, getBucketNameByValue} from "../buckets.js";
export const blobSidecarsWrapperSsz = new ContainerType(
@@ -14,10 +15,7 @@ export const blobSidecarsWrapperSsz = new ContainerType(
);
export type BlobSidecarsWrapper = ValueOf;
-
export const BLOB_SIDECARS_IN_WRAPPER_INDEX = 44;
-// ssz.deneb.BlobSidecars.elementType.fixedSize;
-export const BLOBSIDECAR_FIXED_SIZE = 131256;
/**
* blobSidecarsWrapper by block root (= hash_tree_root(SignedBeaconBlock.message))
diff --git a/packages/beacon-node/src/db/repositories/checkpointState.ts b/packages/beacon-node/src/db/repositories/checkpointState.ts
new file mode 100644
index 000000000000..8848f4d26d3a
--- /dev/null
+++ b/packages/beacon-node/src/db/repositories/checkpointState.ts
@@ -0,0 +1,31 @@
+import {ChainForkConfig} from "@lodestar/config";
+import {Db, Repository} from "@lodestar/db";
+import {BeaconStateAllForks} from "@lodestar/state-transition";
+import {ssz} from "@lodestar/types";
+import {Bucket, getBucketNameByValue} from "../buckets.js";
+
+/**
+ * Store temporary checkpoint states.
+ * We should only put/get binary data from this repository, consumer will load it into an existing state ViewDU object.
+ */
+export class CheckpointStateRepository extends Repository {
+ constructor(config: ChainForkConfig, db: Db) {
+ // Pick some type but won't be used. Casted to any because no type can match `BeaconStateAllForks`
+ // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-explicit-any
+ const type = ssz.phase0.BeaconState as any;
+ const bucket = Bucket.allForks_checkpointState;
+ super(config, db, bucket, type, getBucketNameByValue(bucket));
+ }
+
+ getId(): Uint8Array {
+ throw Error("CheckpointStateRepository does not work with value");
+ }
+
+ encodeValue(): Uint8Array {
+ throw Error("CheckpointStateRepository does not work with value");
+ }
+
+ decodeValue(): BeaconStateAllForks {
+ throw Error("CheckpointStateRepository does not work with value");
+ }
+}
diff --git a/packages/beacon-node/src/eth1/provider/eth1Provider.ts b/packages/beacon-node/src/eth1/provider/eth1Provider.ts
index 2d1feeb8d1e7..d594c74a3abc 100644
--- a/packages/beacon-node/src/eth1/provider/eth1Provider.ts
+++ b/packages/beacon-node/src/eth1/provider/eth1Provider.ts
@@ -1,7 +1,7 @@
import {toHexString} from "@chainsafe/ssz";
import {phase0} from "@lodestar/types";
import {ChainConfig} from "@lodestar/config";
-import {fromHex, isErrorAborted, createElapsedTimeTracker} from "@lodestar/utils";
+import {fromHex, isErrorAborted, createElapsedTimeTracker, toSafePrintableUrl} from "@lodestar/utils";
import {Logger} from "@lodestar/logger";
import {FetchError, isFetchError} from "@lodestar/api";
@@ -75,7 +75,6 @@ export class Eth1Provider implements IEth1Provider {
this.depositContractAddress = toHexString(config.DEPOSIT_CONTRACT_ADDRESS);
const providerUrls = opts.providerUrls ?? DEFAULT_PROVIDER_URLS;
- this.logger?.info("Eth1 provider", {urls: providerUrls.toString()});
this.rpc = new JsonRpcHttpClient(providerUrls, {
signal,
// Don't fallback with is truncated error. Throw early and let the retry on this class handle it
@@ -85,6 +84,7 @@ export class Eth1Provider implements IEth1Provider {
jwtVersion: opts.jwtVersion,
metrics: metrics,
});
+ this.logger?.info("Eth1 provider", {urls: providerUrls.map(toSafePrintableUrl).toString()});
this.rpc.emitter.on(JsonRpcHttpClientEvent.RESPONSE, () => {
const oldState = this.state;
diff --git a/packages/beacon-node/src/eth1/provider/jsonRpcHttpClient.ts b/packages/beacon-node/src/eth1/provider/jsonRpcHttpClient.ts
index 3a1b4ddb0ce1..faa4e310e10a 100644
--- a/packages/beacon-node/src/eth1/provider/jsonRpcHttpClient.ts
+++ b/packages/beacon-node/src/eth1/provider/jsonRpcHttpClient.ts
@@ -1,8 +1,7 @@
import {EventEmitter} from "events";
import StrictEventEmitter from "strict-event-emitter-types";
import {fetch} from "@lodestar/api";
-import {ErrorAborted, TimeoutError, isValidHttpUrl, retry} from "@lodestar/utils";
-import {IGauge, IHistogram} from "../../metrics/interface.js";
+import {ErrorAborted, Gauge, Histogram, TimeoutError, isValidHttpUrl, retry} from "@lodestar/utils";
import {IJson, RpcPayload} from "../interface.js";
import {JwtClaim, encodeJwtToken} from "./jwt.js";
@@ -58,13 +57,13 @@ export type ReqOpts = {
};
export type JsonRpcHttpClientMetrics = {
- requestTime: IHistogram<"routeId">;
- streamTime: IHistogram<"routeId">;
- requestErrors: IGauge<"routeId">;
- requestUsedFallbackUrl: IGauge<"routeId">;
- activeRequests: IGauge<"routeId">;
- configUrlsCount: IGauge;
- retryCount: IGauge<"routeId">;
+ requestTime: Histogram<{routeId: string}>;
+ streamTime: Histogram<{routeId: string}>;
+ requestErrors: Gauge<{routeId: string}>;
+ requestUsedFallbackUrl: Gauge<{routeId: string}>;
+ activeRequests: Gauge<{routeId: string}>;
+ configUrlsCount: Gauge;
+ retryCount: Gauge<{routeId: string}>;
};
export interface IJsonRpcHttpClient {
diff --git a/packages/beacon-node/src/execution/builder/http.ts b/packages/beacon-node/src/execution/builder/http.ts
index 43710bca83e1..c47e8471f199 100644
--- a/packages/beacon-node/src/execution/builder/http.ts
+++ b/packages/beacon-node/src/execution/builder/http.ts
@@ -1,22 +1,18 @@
import {byteArrayEquals, toHexString} from "@chainsafe/ssz";
import {allForks, bellatrix, Slot, Root, BLSPubkey, ssz, deneb, Wei} from "@lodestar/types";
-import {
- parseSignedBlindedBlockOrContents,
- parseExecutionPayloadAndBlobsBundle,
- reconstructFullBlockOrContents,
-} from "@lodestar/state-transition";
+import {parseExecutionPayloadAndBlobsBundle, reconstructFullBlockOrContents} from "@lodestar/state-transition";
import {ChainForkConfig} from "@lodestar/config";
import {Logger} from "@lodestar/logger";
import {getClient, Api as BuilderApi} from "@lodestar/api/builder";
import {SLOTS_PER_EPOCH, ForkExecution} from "@lodestar/params";
-
+import {toSafePrintableUrl} from "@lodestar/utils";
import {ApiError} from "@lodestar/api";
import {Metrics} from "../../metrics/metrics.js";
import {IExecutionBuilder} from "./interface.js";
export type ExecutionBuilderHttpOpts = {
enabled: boolean;
- urls: string[];
+ url: string;
timeout?: number;
faultInspectionWindow?: number;
allowedFaults?: number;
@@ -29,7 +25,7 @@ export type ExecutionBuilderHttpOpts = {
export const defaultExecutionBuilderHttpOpts: ExecutionBuilderHttpOpts = {
enabled: false,
- urls: ["http://localhost:8661"],
+ url: "http://localhost:8661",
timeout: 12000,
};
@@ -48,9 +44,8 @@ export class ExecutionBuilderHttp implements IExecutionBuilder {
metrics: Metrics | null = null,
logger?: Logger
) {
- const baseUrl = opts.urls[0];
+ const baseUrl = opts.url;
if (!baseUrl) throw Error("No Url provided for executionBuilder");
- logger?.info("External builder", {urls: opts.urls.toString()});
this.api = getClient(
{
baseUrl,
@@ -59,6 +54,7 @@ export class ExecutionBuilderHttp implements IExecutionBuilder {
},
{config, metrics: metrics?.builderHttpClient}
);
+ logger?.info("External builder", {url: toSafePrintableUrl(baseUrl)});
this.config = config;
this.issueLocalFcUWithFeeRecipient = opts.issueLocalFcUWithFeeRecipient;
@@ -110,26 +106,23 @@ export class ExecutionBuilderHttp implements IExecutionBuilder {
): Promise<{
header: allForks.ExecutionPayloadHeader;
executionPayloadValue: Wei;
- blindedBlobsBundle?: deneb.BlindedBlobsBundle;
+ blobKzgCommitments?: deneb.BlobKzgCommitments;
}> {
const res = await this.api.getHeader(slot, parentHash, proposerPubKey);
ApiError.assert(res, "execution.builder.getheader");
const {header, value: executionPayloadValue} = res.response.data.message;
- const {blindedBlobsBundle} = res.response.data.message as deneb.BuilderBid;
- return {header, executionPayloadValue, blindedBlobsBundle};
+ const {blobKzgCommitments} = res.response.data.message as deneb.BuilderBid;
+ return {header, executionPayloadValue, blobKzgCommitments};
}
async submitBlindedBlock(
- signedBlindedBlockOrContents: allForks.SignedBlindedBeaconBlockOrContents
+ signedBlindedBlock: allForks.SignedBlindedBeaconBlock
): Promise {
- const res = await this.api.submitBlindedBlock(signedBlindedBlockOrContents);
+ const res = await this.api.submitBlindedBlock(signedBlindedBlock);
ApiError.assert(res, "execution.builder.submitBlindedBlock");
const {data} = res.response;
const {executionPayload, blobsBundle} = parseExecutionPayloadAndBlobsBundle(data);
- const {signedBlindedBlock, signedBlindedBlobSidecars} =
- parseSignedBlindedBlockOrContents(signedBlindedBlockOrContents);
-
// some validations for execution payload
const expectedTransactionsRoot = signedBlindedBlock.message.body.executionPayloadHeader.transactionsRoot;
const actualTransactionsRoot = ssz.bellatrix.Transactions.hashTreeRoot(executionPayload.transactions);
@@ -141,7 +134,7 @@ export class ExecutionBuilderHttp implements IExecutionBuilder {
);
}
- const blobs = blobsBundle ? blobsBundle.blobs : null;
- return reconstructFullBlockOrContents({signedBlindedBlock, signedBlindedBlobSidecars}, {executionPayload, blobs});
+ const contents = blobsBundle ? {blobs: blobsBundle.blobs, kzgProofs: blobsBundle.proofs} : null;
+ return reconstructFullBlockOrContents(signedBlindedBlock, {executionPayload, contents});
}
}
diff --git a/packages/beacon-node/src/execution/builder/interface.ts b/packages/beacon-node/src/execution/builder/interface.ts
index e9a2cabb69ef..8754a3616610 100644
--- a/packages/beacon-node/src/execution/builder/interface.ts
+++ b/packages/beacon-node/src/execution/builder/interface.ts
@@ -25,9 +25,7 @@ export interface IExecutionBuilder {
): Promise<{
header: allForks.ExecutionPayloadHeader;
executionPayloadValue: Wei;
- blindedBlobsBundle?: deneb.BlindedBlobsBundle;
+ blobKzgCommitments?: deneb.BlobKzgCommitments;
}>;
- submitBlindedBlock(
- signedBlock: allForks.SignedBlindedBeaconBlockOrContents
- ): Promise;
+ submitBlindedBlock(signedBlock: allForks.SignedBlindedBeaconBlock): Promise;
}
diff --git a/packages/beacon-node/src/execution/engine/index.ts b/packages/beacon-node/src/execution/engine/index.ts
index 1692ea61cf92..2d92a439c86d 100644
--- a/packages/beacon-node/src/execution/engine/index.ts
+++ b/packages/beacon-node/src/execution/engine/index.ts
@@ -1,4 +1,4 @@
-import {fromHex} from "@lodestar/utils";
+import {fromHex, toSafePrintableUrl} from "@lodestar/utils";
import {JsonRpcHttpClient} from "../../eth1/provider/jsonRpcHttpClient.js";
import {IExecutionEngine} from "./interface.js";
import {ExecutionEngineDisabled} from "./disabled.js";
@@ -31,7 +31,6 @@ export function getExecutionEngineHttp(
opts: ExecutionEngineHttpOpts,
modules: ExecutionEngineModules
): IExecutionEngine {
- modules.logger.info("Execution client", {urls: opts.urls.toString()});
const rpc = new JsonRpcHttpClient(opts.urls, {
...opts,
signal: modules.signal,
@@ -40,6 +39,7 @@ export function getExecutionEngineHttp(
jwtId: opts.jwtId,
jwtVersion: opts.jwtVersion,
});
+ modules.logger.info("Execution client", {urls: opts.urls.map(toSafePrintableUrl).toString()});
return new ExecutionEngineHttp(rpc, modules);
}
diff --git a/packages/beacon-node/src/execution/engine/mock.ts b/packages/beacon-node/src/execution/engine/mock.ts
index 83a5ea3a7ed6..5779713435a5 100644
--- a/packages/beacon-node/src/execution/engine/mock.ts
+++ b/packages/beacon-node/src/execution/engine/mock.ts
@@ -1,5 +1,4 @@
import crypto from "node:crypto";
-import {kzgCommitmentToVersionedHash} from "@lodestar/state-transition";
import {bellatrix, deneb, RootHex, ssz} from "@lodestar/types";
import {fromHex, toHex} from "@lodestar/utils";
import {
@@ -12,6 +11,7 @@ import {
} from "@lodestar/params";
import {ZERO_HASH_HEX} from "../../constants/index.js";
import {ckzg} from "../../util/kzg.js";
+import {kzgCommitmentToVersionedHash} from "../../util/blobs.js";
import {quantityToNum} from "../../eth1/provider/utils.js";
import {
EngineApiRpcParamTypes,
diff --git a/packages/beacon-node/src/metrics/index.ts b/packages/beacon-node/src/metrics/index.ts
index fb2781333d66..a56591a04090 100644
--- a/packages/beacon-node/src/metrics/index.ts
+++ b/packages/beacon-node/src/metrics/index.ts
@@ -1,5 +1,4 @@
export * from "./metrics.js";
export * from "./server/index.js";
-export * from "./interface.js";
export * from "./nodeJsMetrics.js";
export {RegistryMetricCreator} from "./utils/registryMetricCreator.js";
diff --git a/packages/beacon-node/src/metrics/interface.ts b/packages/beacon-node/src/metrics/interface.ts
deleted file mode 100644
index 2e2a267ca13c..000000000000
--- a/packages/beacon-node/src/metrics/interface.ts
+++ /dev/null
@@ -1,14 +0,0 @@
-import {Gauge, Histogram} from "prom-client";
-
-type CollectFn = (metric: IGauge) => void;
-
-export type IGauge = Pick, "inc" | "dec" | "set"> & {
- addCollect: (collectFn: CollectFn) => void;
-};
-
-export type IHistogram = Pick, "observe" | "startTimer">;
-
-export type IAvgMinMax = {
- addGetValuesFn(getValuesFn: () => number[]): void;
- set(values: number[]): void;
-};
diff --git a/packages/beacon-node/src/metrics/metrics/beacon.ts b/packages/beacon-node/src/metrics/metrics/beacon.ts
index 8d9094f19a25..9366174ef6c6 100644
--- a/packages/beacon-node/src/metrics/metrics/beacon.ts
+++ b/packages/beacon-node/src/metrics/metrics/beacon.ts
@@ -1,4 +1,6 @@
+import {ProducedBlockSource} from "@lodestar/types";
import {RegistryMetricCreator} from "../utils/registryMetricCreator.js";
+import {BlockProductionStep, PayloadPreparationType} from "../../chain/produceBlock/index.js";
export type BeaconMetrics = ReturnType;
@@ -46,7 +48,7 @@ export function createBeaconMetrics(register: RegistryMetricCreator) {
// Additional Metrics
// TODO: Implement
- currentValidators: register.gauge<"status">({
+ currentValidators: register.gauge<{status: string}>({
name: "beacon_current_validators",
labelNames: ["status"],
help: "number of validators in current epoch",
@@ -115,55 +117,35 @@ export function createBeaconMetrics(register: RegistryMetricCreator) {
buckets: [1, 2, 3, 5, 7, 10, 20, 30, 50, 100],
}),
- blockProductionTime: register.histogram<"source">({
+ blockProductionTime: register.histogram<{source: ProducedBlockSource}>({
name: "beacon_block_production_seconds",
help: "Full runtime of block production",
buckets: [0.1, 1, 2, 4, 10],
labelNames: ["source"],
}),
- executionBlockProductionTimeSteps: register.histogram<"step">({
+ executionBlockProductionTimeSteps: register.histogram<{step: BlockProductionStep}>({
name: "beacon_block_production_execution_steps_seconds",
help: "Detailed steps runtime of execution block production",
buckets: [0.01, 0.1, 0.2, 0.5, 1],
- /**
- * - proposerSlashing
- * - attesterSlashings
- * - voluntaryExits
- * - blsToExecutionChanges
- * - attestations
- * - eth1DataAndDeposits
- * - syncAggregate
- * - executionPayload
- */
labelNames: ["step"],
}),
- builderBlockProductionTimeSteps: register.histogram<"step">({
+ builderBlockProductionTimeSteps: register.histogram<{step: BlockProductionStep}>({
name: "beacon_block_production_builder_steps_seconds",
help: "Detailed steps runtime of builder block production",
buckets: [0.01, 0.1, 0.2, 0.5, 1],
- /**
- * - proposerSlashing
- * - attesterSlashings
- * - voluntaryExits
- * - blsToExecutionChanges
- * - attestations
- * - eth1DataAndDeposits
- * - syncAggregate
- * - executionPayload
- */
labelNames: ["step"],
}),
- blockProductionRequests: register.gauge<"source">({
+ blockProductionRequests: register.gauge<{source: ProducedBlockSource}>({
name: "beacon_block_production_requests_total",
help: "Count of all block production requests",
labelNames: ["source"],
}),
- blockProductionSuccess: register.gauge<"source">({
+ blockProductionSuccess: register.gauge<{source: ProducedBlockSource}>({
name: "beacon_block_production_successes_total",
help: "Count of blocks successfully produced",
labelNames: ["source"],
}),
- blockProductionNumAggregated: register.histogram<"source">({
+ blockProductionNumAggregated: register.histogram<{source: ProducedBlockSource}>({
name: "beacon_block_production_num_aggregated_total",
help: "Count of all aggregated attestations in our produced block",
buckets: [32, 64, 96, 128],
@@ -173,34 +155,30 @@ export function createBeaconMetrics(register: RegistryMetricCreator) {
blockProductionCaches: {
producedBlockRoot: register.gauge({
name: "beacon_blockroot_produced_cache_total",
- help: "Count of cached produded block roots",
+ help: "Count of cached produced block roots",
}),
producedBlindedBlockRoot: register.gauge({
name: "beacon_blinded_blockroot_produced_cache_total",
- help: "Count of cached produded blinded block roots",
+ help: "Count of cached produced blinded block roots",
}),
- producedBlobSidecarsCache: register.gauge({
- name: "beacon_blobsidecars_produced_cache_total",
- help: "Count of cached produced blob sidecars",
- }),
- producedBlindedBlobSidecarsCache: register.gauge({
- name: "beacon_blinded_blobsidecars_produced_cache_total",
- help: "Count of cached produced blinded blob sidecars",
+ producedContentsCache: register.gauge({
+ name: "beacon_contents_produced_cache_total",
+ help: "Count of cached produced blob contents",
}),
},
blockPayload: {
payloadAdvancePrepTime: register.histogram({
name: "beacon_block_payload_prepare_time",
- help: "Time for perparing payload in advance",
+ help: "Time for preparing payload in advance",
buckets: [0.1, 1, 3, 5, 10],
}),
- payloadFetchedTime: register.histogram<"prepType">({
+ payloadFetchedTime: register.histogram<{prepType: PayloadPreparationType}>({
name: "beacon_block_payload_fetched_time",
help: "Time to fetch the payload from EL",
labelNames: ["prepType"],
}),
- emptyPayloads: register.gauge<"prepType">({
+ emptyPayloads: register.gauge<{prepType: PayloadPreparationType}>({
name: "beacon_block_payload_empty_total",
help: "Count of payload with empty transactions",
labelNames: ["prepType"],
diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts
index a68fdae0551f..ea2251b3dce5 100644
--- a/packages/beacon-node/src/metrics/metrics/lodestar.ts
+++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts
@@ -1,6 +1,22 @@
+import {EpochTransitionStep, StateCloneSource, StateHashTreeRootSource} from "@lodestar/state-transition";
import {allForks} from "@lodestar/types";
-import {RegistryMetricCreator} from "../utils/registryMetricCreator.js";
+import {BlockSource} from "../../chain/blocks/types.js";
+import {JobQueueItemType} from "../../chain/bls/index.js";
+import {BlockErrorCode} from "../../chain/errors/index.js";
+import {InsertOutcome} from "../../chain/opPools/types.js";
+import {RegenCaller, RegenFnName} from "../../chain/regen/interface.js";
+import {ReprocessStatus} from "../../chain/reprocess.js";
+import {RejectReason} from "../../chain/seenCache/seenAttestationData.js";
+import {ExecutionPayloadStatus} from "../../execution/index.js";
+import {GossipType} from "../../network/index.js";
+import {CannotAcceptWorkReason, ReprocessRejectReason} from "../../network/processor/index.js";
+import {BackfillSyncMethod} from "../../sync/backfill/backfill.js";
+import {PendingBlockType} from "../../sync/interface.js";
+import {PeerSyncType, RangeSyncType} from "../../sync/utils/remoteSyncType.js";
import {LodestarMetadata} from "../options.js";
+import {RegistryMetricCreator} from "../utils/registryMetricCreator.js";
+import {OpSource} from "../validatorMonitor.js";
+import {CacheItemType} from "../../chain/stateCache/types.js";
export type LodestarMetrics = ReturnType;
@@ -14,7 +30,7 @@ export function createLodestarMetrics(
anchorState?: Pick
) {
if (metadata) {
- register.static({
+ register.static({
name: "lodestar_version",
help: "Lodestar version",
value: metadata,
@@ -33,34 +49,34 @@ export function createLodestarMetrics(
return {
gossipValidationQueue: {
- length: register.gauge<"topic">({
+ length: register.gauge<{topic: GossipType}>({
name: "lodestar_gossip_validation_queue_length",
help: "Count of total gossip validation queue length",
labelNames: ["topic"],
}),
- keySize: register.gauge<"topic">({
+ keySize: register.gauge<{topic: GossipType}>({
name: "lodestar_gossip_validation_queue_key_size",
help: "Count of total gossip validation queue key size",
labelNames: ["topic"],
}),
- droppedJobs: register.gauge<"topic">({
+ droppedJobs: register.gauge<{topic: GossipType}>({
name: "lodestar_gossip_validation_queue_dropped_jobs_total",
help: "Count of total gossip validation queue dropped jobs",
labelNames: ["topic"],
}),
- jobTime: register.histogram<"topic">({
+ jobTime: register.histogram<{topic: GossipType}>({
name: "lodestar_gossip_validation_queue_job_time_seconds",
help: "Time to process gossip validation queue job in seconds",
labelNames: ["topic"],
buckets: [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10],
}),
- jobWaitTime: register.histogram<"topic">({
+ jobWaitTime: register.histogram<{topic: GossipType}>({
name: "lodestar_gossip_validation_queue_job_wait_time_seconds",
help: "Time from job added to the queue to starting the job in seconds",
labelNames: ["topic"],
buckets: [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10],
}),
- concurrency: register.gauge<"topic">({
+ concurrency: register.gauge<{topic: GossipType}>({
name: "lodestar_gossip_validation_queue_concurrency",
help: "Current count of jobs being run on network processor for topic",
labelNames: ["topic"],
@@ -79,22 +95,22 @@ export function createLodestarMetrics(
},
networkProcessor: {
- gossipValidationAccept: register.gauge<"topic">({
+ gossipValidationAccept: register.gauge<{topic: GossipType}>({
name: "lodestar_gossip_validation_accept_total",
help: "Count of total gossip validation accept",
labelNames: ["topic"],
}),
- gossipValidationIgnore: register.gauge<"topic">({
+ gossipValidationIgnore: register.gauge<{topic: GossipType}>({
name: "lodestar_gossip_validation_ignore_total",
help: "Count of total gossip validation ignore",
labelNames: ["topic"],
}),
- gossipValidationReject: register.gauge<"topic">({
+ gossipValidationReject: register.gauge<{topic: GossipType}>({
name: "lodestar_gossip_validation_reject_total",
help: "Count of total gossip validation reject",
labelNames: ["topic"],
}),
- gossipValidationError: register.gauge<"topic" | "error">({
+ gossipValidationError: register.gauge<{topic: GossipType; error: string}>({
name: "lodestar_gossip_validation_error_total",
help: "Count of total gossip validation errors detailed",
labelNames: ["topic", "error"],
@@ -108,7 +124,7 @@ export function createLodestarMetrics(
help: "Total calls to network processor execute work fn",
buckets: [0, 1, 5, 128],
}),
- canNotAcceptWork: register.gauge<"reason">({
+ canNotAcceptWork: register.gauge<{reason: CannotAcceptWorkReason}>({
name: "lodestar_network_processor_can_not_accept_work_total",
help: "Total times network processor can not accept work on executeWork",
labelNames: ["reason"],
@@ -121,7 +137,7 @@ export function createLodestarMetrics(
help: "Current count of pending items in reqRespBridgeReqCaller data structure",
}),
},
- networkWorkerWireEventsOnMainThreadLatency: register.histogram<"eventName">({
+ networkWorkerWireEventsOnMainThreadLatency: register.histogram<{eventName: string}>({
name: "lodestar_network_worker_wire_events_on_main_thread_latency_seconds",
help: "Latency in seconds to transmit network events to main thread across worker port",
labelNames: ["eventName"],
@@ -206,19 +222,19 @@ export function createLodestarMetrics(
},
apiRest: {
- responseTime: register.histogram<"operationId">({
+ responseTime: register.histogram<{operationId: string}>({
name: "lodestar_api_rest_response_time_seconds",
help: "REST API time to fulfill a request by operationId",
labelNames: ["operationId"],
// Request times range between 1ms to 100ms in normal conditions. Can get to 1-5 seconds if overloaded
buckets: [0.01, 0.1, 1],
}),
- requests: register.gauge<"operationId">({
+ requests: register.gauge<{operationId: string}>({
name: "lodestar_api_rest_requests_total",
help: "REST API total count requests by operationId",
labelNames: ["operationId"],
}),
- errors: register.gauge<"operationId">({
+ errors: register.gauge<{operationId: string}>({
name: "lodestar_api_rest_errors_total",
help: "REST API total count of errors by operationId",
labelNames: ["operationId"],
@@ -286,6 +302,12 @@ export function createLodestarMetrics(
help: "Time to call commit after process a single epoch transition in seconds",
buckets: [0.01, 0.05, 0.1, 0.2, 0.5, 0.75, 1],
}),
+ epochTransitionStepTime: register.histogram<{step: EpochTransitionStep}>({
+ name: "lodestar_stfn_epoch_transition_step_seconds",
+ help: "Time to call each step of epoch transition in seconds",
+ labelNames: ["step"],
+ buckets: [0.01, 0.05, 0.1, 0.2, 0.5, 0.75, 1],
+ }),
processBlockTime: register.histogram({
name: "lodestar_stfn_process_block_seconds",
help: "Time to process a single block in seconds",
@@ -298,27 +320,28 @@ export function createLodestarMetrics(
help: "Time to call commit after process a single block in seconds",
buckets: [0.005, 0.01, 0.02, 0.05, 0.1, 1],
}),
- stateHashTreeRootTime: register.histogram({
+ stateHashTreeRootTime: register.histogram<{source: StateHashTreeRootSource}>({
name: "lodestar_stfn_hash_tree_root_seconds",
help: "Time to compute the hash tree root of a post state in seconds",
- buckets: [0.005, 0.01, 0.02, 0.05, 0.1, 1],
+ buckets: [0.05, 0.1, 0.2, 0.5, 1, 1.5],
+ labelNames: ["source"],
}),
- preStateBalancesNodesPopulatedMiss: register.gauge<"source">({
+ preStateBalancesNodesPopulatedMiss: register.gauge<{source: StateCloneSource}>({
name: "lodestar_stfn_balances_nodes_populated_miss_total",
help: "Total count state.balances nodesPopulated is false on stfn",
labelNames: ["source"],
}),
- preStateBalancesNodesPopulatedHit: register.gauge<"source">({
+ preStateBalancesNodesPopulatedHit: register.gauge<{source: StateCloneSource}>({
name: "lodestar_stfn_balances_nodes_populated_hit_total",
help: "Total count state.balances nodesPopulated is true on stfn",
labelNames: ["source"],
}),
- preStateValidatorsNodesPopulatedMiss: register.gauge<"source">({
+ preStateValidatorsNodesPopulatedMiss: register.gauge<{source: StateCloneSource}>({
name: "lodestar_stfn_validators_nodes_populated_miss_total",
help: "Total count state.validators nodesPopulated is false on stfn",
labelNames: ["source"],
}),
- preStateValidatorsNodesPopulatedHit: register.gauge<"source">({
+ preStateValidatorsNodesPopulatedHit: register.gauge<{source: StateCloneSource}>({
name: "lodestar_stfn_validators_nodes_populated_hit_total",
help: "Total count state.validators nodesPopulated is true on stfn",
labelNames: ["source"],
@@ -355,7 +378,7 @@ export function createLodestarMetrics(
},
blsThreadPool: {
- jobsWorkerTime: register.gauge<"workerId">({
+ jobsWorkerTime: register.gauge<{workerId: number}>({
name: "lodestar_bls_thread_pool_time_seconds_sum",
help: "Total time spent verifying signature sets measured on the worker",
labelNames: ["workerId"],
@@ -364,7 +387,7 @@ export function createLodestarMetrics(
name: "lodestar_bls_thread_pool_success_jobs_signature_sets_count",
help: "Count of total verified signature sets",
}),
- errorAggregateSignatureSetsCount: register.gauge<"type">({
+ errorAggregateSignatureSetsCount: register.gauge<{type: JobQueueItemType}>({
name: "lodestar_bls_thread_pool_error_aggregate_signature_sets_count",
help: "Count of error when aggregating pubkeys or signatures",
labelNames: ["type"],
@@ -390,12 +413,12 @@ export function createLodestarMetrics(
name: "lodestar_bls_thread_pool_job_groups_started_total",
help: "Count of total jobs groups started in bls thread pool, job groups include +1 jobs",
}),
- totalJobsStarted: register.gauge<"type">({
+ totalJobsStarted: register.gauge<{type: JobQueueItemType}>({
name: "lodestar_bls_thread_pool_jobs_started_total",
help: "Count of total jobs started in bls thread pool, jobs include +1 signature sets",
labelNames: ["type"],
}),
- totalSigSetsStarted: register.gauge<"type">({
+ totalSigSetsStarted: register.gauge<{type: JobQueueItemType}>({
name: "lodestar_bls_thread_pool_sig_sets_started_total",
help: "Count of total signature sets started in bls thread pool, sig sets include 1 pk, msg, sig",
labelNames: ["type"],
@@ -453,9 +476,15 @@ export function createLodestarMetrics(
name: "lodestar_bls_thread_pool_batchable_sig_sets_total",
help: "Count of total batchable signature sets",
}),
- signatureDeserializationMainThreadDuration: register.gauge({
+ signatureDeserializationMainThreadDuration: register.histogram({
name: "lodestar_bls_thread_pool_signature_deserialization_main_thread_time_seconds",
help: "Total time spent deserializing signatures on main thread",
+ buckets: [0.001, 0.005, 0.01, 0.1],
+ }),
+ pubkeysAggregationMainThreadDuration: register.histogram({
+ name: "lodestar_bls_thread_pool_pubkeys_aggregation_main_thread_time_seconds",
+ help: "Total time spent aggregating pubkeys on main thread",
+ buckets: [0.001, 0.005, 0.01, 0.1],
}),
},
@@ -480,29 +509,29 @@ export function createLodestarMetrics(
name: "lodestar_sync_status",
help: "Range sync status: [Stalled, SyncingFinalized, SyncingHead, Synced]",
}),
- syncPeersBySyncType: register.gauge<"syncType">({
+ syncPeersBySyncType: register.gauge<{syncType: PeerSyncType}>({
name: "lodestar_sync_range_sync_peers",
help: "Count of peers by sync type [FullySynced, Advanced, Behind]",
labelNames: ["syncType"],
}),
- syncSwitchGossipSubscriptions: register.gauge<"action">({
+ syncSwitchGossipSubscriptions: register.gauge<{action: string}>({
name: "lodestar_sync_switch_gossip_subscriptions",
help: "Sync switched gossip subscriptions on/off",
labelNames: ["action"],
}),
syncRange: {
- syncChainsEvents: register.gauge<"syncType" | "event">({
+ syncChainsEvents: register.gauge<{syncType: RangeSyncType; event: string}>({
name: "lodestar_sync_chains_events_total",
help: "Total number of sync chains events events, labeled by syncType",
labelNames: ["syncType", "event"],
}),
- syncChains: register.gauge<"syncType">({
+ syncChains: register.gauge<{syncType: RangeSyncType}>({
name: "lodestar_sync_chains_count",
help: "Count of sync chains by syncType",
labelNames: ["syncType"],
}),
- syncChainsPeers: register.histogram<"syncType">({
+ syncChainsPeers: register.histogram<{syncType: RangeSyncType}>({
name: "lodestar_sync_chains_peer_count_by_type",
help: "Count of sync chain peers by syncType",
labelNames: ["syncType"],
@@ -515,12 +544,12 @@ export function createLodestarMetrics(
},
syncUnknownBlock: {
- switchNetworkSubscriptions: register.gauge<"action">({
+ switchNetworkSubscriptions: register.gauge<{action: string}>({
name: "lodestar_sync_unknown_block_network_subscriptions_count",
help: "Switch network subscriptions on/off",
labelNames: ["action"],
}),
- requests: register.gauge<"type">({
+ requests: register.gauge<{type: PendingBlockType}>({
name: "lodestar_sync_unknown_block_requests_total",
help: "Total number of unknown block events or requests",
labelNames: ["type"],
@@ -574,43 +603,43 @@ export function createLodestarMetrics(
// Gossip attestation
gossipAttestation: {
- useHeadBlockState: register.gauge<"caller">({
+ useHeadBlockState: register.gauge<{caller: RegenCaller}>({
name: "lodestar_gossip_attestation_use_head_block_state_count",
help: "Count of gossip attestation verification using head block state",
labelNames: ["caller"],
}),
- useHeadBlockStateDialedToTargetEpoch: register.gauge<"caller">({
+ useHeadBlockStateDialedToTargetEpoch: register.gauge<{caller: RegenCaller}>({
name: "lodestar_gossip_attestation_use_head_block_state_dialed_to_target_epoch_count",
help: "Count of gossip attestation verification using head block state and dialed to target epoch",
labelNames: ["caller"],
}),
- headSlotToAttestationSlot: register.histogram<"caller">({
+ headSlotToAttestationSlot: register.histogram<{caller: RegenCaller}>({
name: "lodestar_gossip_attestation_head_slot_to_attestation_slot",
help: "Slot distance between attestation slot and head slot",
labelNames: ["caller"],
buckets: [0, 1, 2, 4, 8, 16, 32, 64],
}),
- shufflingCacheHit: register.gauge<"caller">({
+ shufflingCacheHit: register.gauge<{caller: RegenCaller}>({
name: "lodestar_gossip_attestation_shuffling_cache_hit_count",
help: "Count of gossip attestation verification shuffling cache hit",
labelNames: ["caller"],
}),
- shufflingCacheMiss: register.gauge<"caller">({
+ shufflingCacheMiss: register.gauge<{caller: RegenCaller}>({
name: "lodestar_gossip_attestation_shuffling_cache_miss_count",
help: "Count of gossip attestation verification shuffling cache miss",
labelNames: ["caller"],
}),
- shufflingCacheRegenHit: register.gauge<"caller">({
+ shufflingCacheRegenHit: register.gauge<{caller: RegenCaller}>({
name: "lodestar_gossip_attestation_shuffling_cache_regen_hit_count",
help: "Count of gossip attestation verification shuffling cache regen hit",
labelNames: ["caller"],
}),
- shufflingCacheRegenMiss: register.gauge<"caller">({
+ shufflingCacheRegenMiss: register.gauge<{caller: RegenCaller}>({
name: "lodestar_gossip_attestation_shuffling_cache_regen_miss_count",
help: "Count of gossip attestation verification shuffling cache regen miss",
labelNames: ["caller"],
}),
- attestationSlotToClockSlot: register.histogram<"caller">({
+ attestationSlotToClockSlot: register.histogram<{caller: RegenCaller}>({
name: "lodestar_gossip_attestation_attestation_slot_to_clock_slot",
help: "Slot distance between clock slot and attestation slot",
labelNames: ["caller"],
@@ -642,29 +671,46 @@ export function createLodestarMetrics(
receivedToGossipValidate: register.histogram({
name: "lodestar_gossip_block_received_to_gossip_validate",
help: "Time elapsed between block received and block validated",
- buckets: [0.05, 0.1, 0.2, 0.5, 1, 1.5, 2, 4],
+ buckets: [0.05, 0.1, 0.3, 0.5, 0.7, 1, 1.3, 1.6, 2, 2.5, 3, 3.5, 4],
}),
receivedToStateTransition: register.histogram({
name: "lodestar_gossip_block_received_to_state_transition",
help: "Time elapsed between block received and block state transition",
- buckets: [0.05, 0.1, 0.2, 0.5, 1, 1.5, 2, 4],
+ buckets: [0.05, 0.1, 0.3, 0.5, 0.7, 1, 1.3, 1.6, 2, 2.5, 3, 3.5, 4],
}),
receivedToSignaturesVerification: register.histogram({
name: "lodestar_gossip_block_received_to_signatures_verification",
help: "Time elapsed between block received and block signatures verification",
- buckets: [0.05, 0.1, 0.2, 0.5, 1, 1.5, 2, 4],
+ buckets: [0.05, 0.1, 0.3, 0.5, 0.7, 1, 1.3, 1.6, 2, 2.5, 3, 3.5, 4],
}),
receivedToExecutionPayloadVerification: register.histogram({
name: "lodestar_gossip_block_received_to_execution_payload_verification",
help: "Time elapsed between block received and execution payload verification",
- buckets: [0.05, 0.1, 0.2, 0.5, 1, 1.5, 2, 4],
+ buckets: [0.05, 0.1, 0.3, 0.5, 0.7, 1, 1.3, 1.6, 2, 2.5, 3, 3.5, 4],
+ }),
+ receivedToBlobsAvailabilityTime: register.histogram<{numBlobs: number}>({
+ name: "lodestar_gossip_block_received_to_blobs_availability_time",
+ help: "Time elapsed between block received and blobs became available",
+ buckets: [0.05, 0.1, 0.3, 0.5, 0.7, 1, 1.3, 1.6, 2, 2.5, 3, 3.5, 4],
+ labelNames: ["numBlobs"],
+ }),
+ receivedToFullyVerifiedTime: register.histogram({
+ name: "lodestar_gossip_block_received_to_fully_verified_time",
+ help: "Time elapsed between block received and fully verified state, signatures and payload",
+ buckets: [0.05, 0.1, 0.3, 0.5, 0.7, 1, 1.3, 1.6, 2, 2.5, 3, 3.5, 4],
+ }),
+ verifiedToBlobsAvailabiltyTime: register.histogram<{numBlobs: number}>({
+ name: "lodestar_gossip_block_verified_to_blobs_availability_time",
+ help: "Time elapsed between block verified and blobs became available",
+ buckets: [0.05, 0.1, 0.3, 0.5, 0.7, 1, 1.3, 1.6, 2, 2.5, 3, 3.5, 4],
+ labelNames: ["numBlobs"],
}),
receivedToBlockImport: register.histogram({
name: "lodestar_gossip_block_received_to_block_import",
help: "Time elapsed between block received and block import",
- buckets: [0.05, 0.1, 0.2, 0.5, 1, 1.5, 2, 4],
+ buckets: [0.05, 0.1, 0.3, 0.5, 0.7, 1, 1.3, 1.6, 2, 2.5, 3, 3.5, 4],
}),
- processBlockErrors: register.gauge<"error">({
+ processBlockErrors: register.gauge<{error: BlockErrorCode | "NOT_BLOCK_ERROR"}>({
name: "lodestar_gossip_block_process_block_errors",
help: "Count of errors, by error type, while processing blocks",
labelNames: ["error"],
@@ -695,13 +741,13 @@ export function createLodestarMetrics(
name: "lodestar_import_block_set_head_after_first_interval_total",
help: "Total times an imported block is set as head after the first slot interval",
}),
- bySource: register.gauge<"source">({
+ bySource: register.gauge<{source: BlockSource}>({
name: "lodestar_import_block_by_source_total",
help: "Total number of imported blocks by source",
labelNames: ["source"],
}),
},
- engineNotifyNewPayloadResult: register.gauge<"result">({
+ engineNotifyNewPayloadResult: register.gauge<{result: ExecutionPayloadStatus}>({
name: "lodestar_execution_engine_notify_new_payload_result_total",
help: "The total result of calling notifyNewPayload execution engine api",
labelNames: ["result"],
@@ -715,7 +761,7 @@ export function createLodestarMetrics(
name: "lodestar_backfill_prev_fin_or_ws_slot",
help: "Slot of previous finalized or wsCheckpoint block to be validated",
}),
- totalBlocks: register.gauge<"method">({
+ totalBlocks: register.gauge<{method: BackfillSyncMethod}>({
name: "lodestar_backfill_sync_blocks_total",
help: "Total amount of backfilled blocks",
labelNames: ["method"],
@@ -746,7 +792,7 @@ export function createLodestarMetrics(
name: "lodestar_oppool_attestation_pool_size",
help: "Current size of the AttestationPool = total attestations unique by data and slot",
}),
- attestationPoolInsertOutcome: register.counter<"insertOutcome">({
+ attestationPoolInsertOutcome: register.counter<{insertOutcome: InsertOutcome}>({
name: "lodestar_attestation_pool_insert_outcome_total",
help: "Total number of InsertOutcome as a result of adding an attestation in a pool",
labelNames: ["insertOutcome"],
@@ -771,7 +817,7 @@ export function createLodestarMetrics(
name: "lodestar_oppool_sync_committee_message_pool_size",
help: "Current size of the SyncCommitteeMessagePool unique by slot subnet and block root",
}),
- syncCommitteeMessagePoolInsertOutcome: register.counter<"insertOutcome">({
+ syncCommitteeMessagePoolInsertOutcome: register.counter<{insertOutcome: InsertOutcome}>({
name: "lodestar_oppool_sync_committee_message_insert_outcome_total",
help: "Total number of InsertOutcome as a result of adding a SyncCommitteeMessage to pool",
labelNames: ["insertOutcome"],
@@ -797,7 +843,7 @@ export function createLodestarMetrics(
// Validator Monitor Metrics (per-epoch summaries)
// Only track prevEpochOnChainBalance per index
- prevEpochOnChainBalance: register.gauge<"index">({
+ prevEpochOnChainBalance: register.gauge<{index: number}>({
name: "validator_monitor_prev_epoch_on_chain_balance",
help: "Balance of validator after an epoch",
labelNames: ["index"],
@@ -906,12 +952,12 @@ export function createLodestarMetrics(
help: "The count of times a sync signature was seen inside an aggregate",
buckets: [0, 1, 2, 3, 5, 10],
}),
- prevEpochAttestationSummary: register.gauge<"summary">({
+ prevEpochAttestationSummary: register.gauge<{summary: string}>({
name: "validator_monitor_prev_epoch_attestation_summary",
help: "Best guess of the node of the result of previous epoch validators attestation actions and causality",
labelNames: ["summary"],
}),
- prevEpochBlockProposalSummary: register.gauge<"summary">({
+ prevEpochBlockProposalSummary: register.gauge<{summary: string}>({
name: "validator_monitor_prev_epoch_block_proposal_summary",
help: "Best guess of the node of the result of previous epoch validators block proposal actions and causality",
labelNames: ["summary"],
@@ -919,12 +965,12 @@ export function createLodestarMetrics(
// Validator Monitor Metrics (real-time)
- unaggregatedAttestationTotal: register.gauge<"src">({
+ unaggregatedAttestationTotal: register.gauge<{src: OpSource}>({
name: "validator_monitor_unaggregated_attestation_total",
help: "Number of unaggregated attestations seen",
labelNames: ["src"],
}),
- unaggregatedAttestationDelaySeconds: register.histogram<"src">({
+ unaggregatedAttestationDelaySeconds: register.histogram<{src: OpSource}>({
name: "validator_monitor_unaggregated_attestation_delay_seconds",
help: "The delay between when the validator should send the attestation and when it was received",
labelNames: ["src"],
@@ -938,23 +984,23 @@ export function createLodestarMetrics(
// refine if we want more reasonable values
buckets: [0, 10, 20, 30],
}),
- aggregatedAttestationTotal: register.gauge<"src">({
+ aggregatedAttestationTotal: register.gauge<{src: OpSource}>({
name: "validator_monitor_aggregated_attestation_total",
help: "Number of aggregated attestations seen",
labelNames: ["src"],
}),
- aggregatedAttestationDelaySeconds: register.histogram<"src">({
+ aggregatedAttestationDelaySeconds: register.histogram<{src: OpSource}>({
name: "validator_monitor_aggregated_attestation_delay_seconds",
help: "The delay between then the validator should send the aggregate and when it was received",
labelNames: ["src"],
buckets: [0.1, 0.25, 0.5, 1, 2, 5, 10],
}),
- attestationInAggregateTotal: register.gauge<"src">({
+ attestationInAggregateTotal: register.gauge<{src: OpSource}>({
name: "validator_monitor_attestation_in_aggregate_total",
help: "Number of times an attestation has been seen in an aggregate",
labelNames: ["src"],
}),
- attestationInAggregateDelaySeconds: register.histogram<"src">({
+ attestationInAggregateDelaySeconds: register.histogram<{src: OpSource}>({
name: "validator_monitor_attestation_in_aggregate_delay_seconds",
help: "The delay between when the validator should send the aggregate and when it was received",
labelNames: ["src"],
@@ -978,12 +1024,12 @@ export function createLodestarMetrics(
name: "validator_monitor_sync_signature_in_aggregate_total",
help: "Number of times a sync signature has been seen in an aggregate",
}),
- beaconBlockTotal: register.gauge<"src">({
+ beaconBlockTotal: register.gauge<{src: OpSource}>({
name: "validator_monitor_beacon_block_total",
help: "Total number of beacon blocks seen",
labelNames: ["src"],
}),
- beaconBlockDelaySeconds: register.histogram<"src">({
+ beaconBlockDelaySeconds: register.histogram<{src: OpSource}>({
name: "validator_monitor_beacon_block_delay_seconds",
help: "The delay between when the validator should send the block and when it was received",
labelNames: ["src"],
@@ -1053,13 +1099,15 @@ export function createLodestarMetrics(
name: "lodestar_cp_state_cache_adds_total",
help: "Total number of items added in checkpoint state cache",
}),
- size: register.gauge({
+ size: register.gauge<{type: CacheItemType}>({
name: "lodestar_cp_state_cache_size",
help: "Checkpoint state cache size",
+ labelNames: ["type"],
}),
- epochSize: register.gauge({
+ epochSize: register.gauge<{type: CacheItemType}>({
name: "lodestar_cp_state_epoch_size",
help: "Checkpoint state cache size",
+ labelNames: ["type"],
}),
reads: register.avgMinMax({
name: "lodestar_cp_state_epoch_reads",
@@ -1074,6 +1122,44 @@ export function createLodestarMetrics(
help: "Histogram of cloned count per state every time state.clone() is called",
buckets: [1, 2, 5, 10, 50, 250],
}),
+ statePersistDuration: register.histogram({
+ name: "lodestar_cp_state_cache_state_persist_seconds",
+ help: "Histogram of time to persist state to db",
+ buckets: [0.1, 0.5, 1, 2, 3, 4],
+ }),
+ statePruneFromMemoryCount: register.gauge({
+ name: "lodestar_cp_state_cache_state_prune_from_memory_count",
+ help: "Total number of states pruned from memory",
+ }),
+ statePersistSecFromSlot: register.histogram({
+ name: "lodestar_cp_state_cache_state_persist_seconds_from_slot",
+ help: "Histogram of time to persist state to db since the clock slot",
+ buckets: [0, 2, 4, 6, 8, 10, 12],
+ }),
+ stateReloadDuration: register.histogram({
+ name: "lodestar_cp_state_cache_state_reload_seconds",
+ help: "Histogram of time to load state from db",
+ buckets: [0, 2, 4, 6, 8, 10, 12],
+ }),
+ stateReloadEpochDiff: register.histogram({
+ name: "lodestar_cp_state_cache_state_reload_epoch_diff",
+ help: "Histogram of epoch difference between seed state epoch and loaded state epoch",
+ buckets: [0, 1, 2, 4, 8, 16, 32],
+ }),
+ stateReloadSecFromSlot: register.histogram({
+ name: "lodestar_cp_state_cache_state_reload_seconds_from_slot",
+ help: "Histogram of time to load state from db since the clock slot",
+ buckets: [0, 2, 4, 6, 8, 10, 12],
+ }),
+ stateReloadDbReadTime: register.histogram({
+ name: "lodestar_cp_state_cache_state_reload_db_read_seconds",
+ help: "Histogram of time to load state bytes from db",
+ buckets: [0.01, 0.05, 0.1, 0.2, 0.5],
+ }),
+ persistedStateRemoveCount: register.gauge({
+ name: "lodestar_cp_state_cache_persisted_state_remove_count",
+ help: "Total number of persisted states removed",
+ }),
},
balancesCache: {
@@ -1085,7 +1171,7 @@ export function createLodestarMetrics(
name: "lodestar_balances_cache_misses_total",
help: "Total number of balances cache misses",
}),
- closestStateResult: register.counter<"stateId">({
+ closestStateResult: register.counter<{stateId: string}>({
name: "lodestar_balances_cache_closest_state_result_total",
help: "Total number of stateIds returned as closest justified balances state by id",
labelNames: ["stateId"],
@@ -1163,7 +1249,7 @@ export function createLodestarMetrics(
name: "lodestar_seen_cache_attestation_data_miss_total",
help: "Total number of attestation data miss in SeenAttestationData",
}),
- reject: register.gauge<"reason">({
+ reject: register.gauge<{reason: RejectReason}>({
name: "lodestar_seen_cache_attestation_data_reject_total",
help: "Total number of attestation data rejected in SeenAttestationData",
labelNames: ["reason"],
@@ -1171,23 +1257,23 @@ export function createLodestarMetrics(
},
},
- regenFnCallTotal: register.gauge<"entrypoint" | "caller">({
+ regenFnCallTotal: register.gauge<{entrypoint: RegenFnName; caller: RegenCaller}>({
name: "lodestar_regen_fn_call_total",
help: "Total number of calls for regen functions",
labelNames: ["entrypoint", "caller"],
}),
- regenFnQueuedTotal: register.gauge<"entrypoint" | "caller">({
+ regenFnQueuedTotal: register.gauge<{entrypoint: RegenFnName; caller: RegenCaller}>({
name: "lodestar_regen_fn_queued_total",
help: "Total number of calls queued for regen functions",
labelNames: ["entrypoint", "caller"],
}),
- regenFnCallDuration: register.histogram<"entrypoint" | "caller">({
+ regenFnCallDuration: register.histogram<{entrypoint: RegenFnName; caller: RegenCaller}>({
name: "lodestar_regen_fn_call_duration",
help: "regen function duration",
labelNames: ["entrypoint", "caller"],
buckets: [0.1, 1, 10, 100],
}),
- regenFnTotalErrors: register.gauge<"entrypoint" | "caller">({
+ regenFnTotalErrors: register.gauge<{entrypoint: RegenFnName; caller: RegenCaller}>({
name: "lodestar_regen_fn_errors_total",
help: "regen function total errors",
labelNames: ["entrypoint", "caller"],
@@ -1199,7 +1285,7 @@ export function createLodestarMetrics(
// Precompute next epoch transition
precomputeNextEpochTransition: {
- count: register.counter<"result">({
+ count: register.counter<{result: string}>({
name: "lodestar_precompute_next_epoch_transition_result_total",
labelNames: ["result"],
help: "Total number of precomputeNextEpochTransition runs by result",
@@ -1228,14 +1314,15 @@ export function createLodestarMetrics(
name: "lodestar_reprocess_attestations_wait_time_resolve_seconds",
help: "Time to wait for unknown block in seconds",
}),
- reject: register.gauge<"reason">({
+ reject: register.gauge<{reason: ReprocessStatus}>({
name: "lodestar_reprocess_attestations_reject_total",
help: "Total number of attestations are rejected to reprocess",
labelNames: ["reason"],
}),
- waitSecBeforeReject: register.gauge<"reason">({
+ waitSecBeforeReject: register.gauge<{reason: ReprocessStatus}>({
name: "lodestar_reprocess_attestations_wait_time_reject_seconds",
help: "Time to wait for unknown block before being rejected",
+ labelNames: ["reason"],
}),
},
@@ -1257,24 +1344,25 @@ export function createLodestarMetrics(
name: "lodestar_reprocess_gossip_attestations_wait_time_resolve_seconds",
help: "Time to wait for unknown block in seconds",
}),
- reject: register.gauge<"reason">({
+ reject: register.gauge<{reason: ReprocessRejectReason}>({
name: "lodestar_reprocess_gossip_attestations_reject_total",
help: "Total number of attestations are rejected to reprocess",
labelNames: ["reason"],
}),
- waitSecBeforeReject: register.gauge<"reason">({
+ waitSecBeforeReject: register.gauge<{reason: ReprocessRejectReason}>({
name: "lodestar_reprocess_gossip_attestations_wait_time_reject_seconds",
help: "Time to wait for unknown block before being rejected",
+ labelNames: ["reason"],
}),
},
lightclientServer: {
- onSyncAggregate: register.gauge<"event">({
+ onSyncAggregate: register.gauge<{event: string}>({
name: "lodestar_lightclient_server_on_sync_aggregate_event_total",
help: "Total number of relevant events onSyncAggregate fn",
labelNames: ["event"],
}),
- highestSlot: register.gauge<"item">({
+ highestSlot: register.gauge<{item: string}>({
name: "lodestar_lightclient_server_highest_slot",
help: "Current highest slot of items stored by LightclientServer",
labelNames: ["item"],
@@ -1385,7 +1473,11 @@ export function createLodestarMetrics(
}),
// Merge details
- eth1MergeBlockDetails: register.gauge<"terminalBlockHash" | "terminalBlockNumber" | "terminalBlockTD">({
+ eth1MergeBlockDetails: register.gauge<{
+ terminalBlockHash: string;
+ terminalBlockNumber: string;
+ terminalBlockTD: string;
+ }>({
name: "lodestar_eth1_merge_block_details",
help: "If found then 1 with terminal block details",
labelNames: ["terminalBlockHash", "terminalBlockNumber", "terminalBlockTD"],
@@ -1393,36 +1485,36 @@ export function createLodestarMetrics(
},
eth1HttpClient: {
- requestTime: register.histogram<"routeId">({
+ requestTime: register.histogram<{routeId: string}>({
name: "lodestar_eth1_http_client_request_time_seconds",
help: "eth1 JsonHttpClient - histogram or roundtrip request times",
labelNames: ["routeId"],
// Provide max resolution on problematic values around 1 second
buckets: [0.1, 0.5, 1, 2, 5, 15],
}),
- streamTime: register.histogram<"routeId">({
+ streamTime: register.histogram<{routeId: string}>({
name: "lodestar_eth1_http_client_stream_time_seconds",
help: "eth1 JsonHttpClient - streaming time by routeId",
labelNames: ["routeId"],
// Provide max resolution on problematic values around 1 second
buckets: [0.1, 0.5, 1, 2, 5, 15],
}),
- requestErrors: register.gauge<"routeId">({
+ requestErrors: register.gauge<{routeId: string}>({
name: "lodestar_eth1_http_client_request_errors_total",
help: "eth1 JsonHttpClient - total count of request errors",
labelNames: ["routeId"],
}),
- retryCount: register.gauge<"routeId">({
+ retryCount: register.gauge<{routeId: string}>({
name: "lodestar_eth1_http_client_request_retries_total",
help: "eth1 JsonHttpClient - total count of request retries",
labelNames: ["routeId"],
}),
- requestUsedFallbackUrl: register.gauge({
+ requestUsedFallbackUrl: register.gauge<{routeId: string}>({
name: "lodestar_eth1_http_client_request_used_fallback_url_total",
help: "eth1 JsonHttpClient - total count of requests on fallback url(s)",
labelNames: ["routeId"],
}),
- activeRequests: register.gauge({
+ activeRequests: register.gauge<{routeId: string}>({
name: "lodestar_eth1_http_client_active_requests",
help: "eth1 JsonHttpClient - current count of active requests",
labelNames: ["routeId"],
@@ -1434,36 +1526,36 @@ export function createLodestarMetrics(
},
executionEnginerHttpClient: {
- requestTime: register.histogram<"routeId">({
+ requestTime: register.histogram<{routeId: string}>({
name: "lodestar_execution_engine_http_client_request_time_seconds",
help: "ExecutionEngineHttp client - histogram or roundtrip request times",
labelNames: ["routeId"],
// Provide max resolution on problematic values around 1 second
buckets: [0.1, 0.5, 1, 2, 5, 15],
}),
- streamTime: register.histogram<"routeId">({
+ streamTime: register.histogram<{routeId: string}>({
name: "lodestar_execution_engine_http_client_stream_time_seconds",
help: "ExecutionEngineHttp client - streaming time by routeId",
labelNames: ["routeId"],
// Provide max resolution on problematic values around 1 second
buckets: [0.1, 0.5, 1, 2, 5, 15],
}),
- requestErrors: register.gauge<"routeId">({
+ requestErrors: register.gauge<{routeId: string}>({
name: "lodestar_execution_engine_http_client_request_errors_total",
help: "ExecutionEngineHttp client - total count of request errors",
labelNames: ["routeId"],
}),
- retryCount: register.gauge<"routeId">({
+ retryCount: register.gauge<{routeId: string}>({
name: "lodestar_execution_engine_http_client_request_retries_total",
help: "ExecutionEngineHttp client - total count of request retries",
labelNames: ["routeId"],
}),
- requestUsedFallbackUrl: register.gauge({
+ requestUsedFallbackUrl: register.gauge<{routeId: string}>({
name: "lodestar_execution_engine_http_client_request_used_fallback_url_total",
help: "ExecutionEngineHttp client - total count of requests on fallback url(s)",
labelNames: ["routeId"],
}),
- activeRequests: register.gauge({
+ activeRequests: register.gauge<{routeId: string}>({
name: "lodestar_execution_engine_http_client_active_requests",
help: "ExecutionEngineHttp client - current count of active requests",
labelNames: ["routeId"],
@@ -1475,32 +1567,32 @@ export function createLodestarMetrics(
},
builderHttpClient: {
- requestTime: register.histogram<"routeId">({
+ requestTime: register.histogram<{routeId: string}>({
name: "lodestar_builder_http_client_request_time_seconds",
help: "Histogram of builder http client request time by routeId",
labelNames: ["routeId"],
// Expected times are ~ 50-500ms, but in an overload NodeJS they can be greater
buckets: [0.01, 0.1, 1, 5],
}),
- streamTime: register.histogram<"routeId">({
+ streamTime: register.histogram<{routeId: string}>({
name: "lodestar_builder_http_client_stream_time_seconds",
help: "Builder api - streaming time by routeId",
labelNames: ["routeId"],
// Provide max resolution on problematic values around 1 second
buckets: [0.1, 0.5, 1, 2, 5, 15],
}),
- requestErrors: register.gauge<"routeId">({
+ requestErrors: register.gauge<{routeId: string}>({
name: "lodestar_builder_http_client_request_errors_total",
help: "Total count of errors on builder http client requests by routeId",
labelNames: ["routeId"],
}),
- requestToFallbacks: register.gauge<"routeId">({
+ requestToFallbacks: register.gauge<{routeId: string}>({
name: "lodestar_builder_http_client_request_to_fallbacks_total",
help: "Total count of requests to fallback URLs on builder http API by routeId",
labelNames: ["routeId"],
}),
- urlsScore: register.gauge<"urlIndex">({
+ urlsScore: register.gauge<{urlIndex: number}>({
name: "lodestar_builder_http_client_urls_score",
help: "Current score of builder http URLs by url index",
labelNames: ["urlIndex"],
@@ -1508,22 +1600,22 @@ export function createLodestarMetrics(
},
db: {
- dbReadReq: register.gauge<"bucket">({
+ dbReadReq: register.gauge<{bucket: string}>({
name: "lodestar_db_read_req_total",
help: "Total count of db read requests, may read 0 or more items",
labelNames: ["bucket"],
}),
- dbReadItems: register.gauge<"bucket">({
+ dbReadItems: register.gauge<{bucket: string}>({
name: "lodestar_db_read_items_total",
help: "Total count of db read items, item = key | value | entry",
labelNames: ["bucket"],
}),
- dbWriteReq: register.gauge<"bucket">({
+ dbWriteReq: register.gauge<{bucket: string}>({
name: "lodestar_db_write_req_total",
help: "Total count of db write requests, may write 0 or more items",
labelNames: ["bucket"],
}),
- dbWriteItems: register.gauge<"bucket">({
+ dbWriteItems: register.gauge<{bucket: string}>({
name: "lodestar_db_write_items_total",
help: "Total count of db write items",
labelNames: ["bucket"],
diff --git a/packages/beacon-node/src/metrics/server/http.ts b/packages/beacon-node/src/metrics/server/http.ts
index b699471e07d5..d8fbb289e951 100644
--- a/packages/beacon-node/src/metrics/server/http.ts
+++ b/packages/beacon-node/src/metrics/server/http.ts
@@ -15,6 +15,11 @@ export type HttpMetricsServer = {
close(): Promise;
};
+enum RequestStatus {
+ success = "success",
+ error = "error",
+}
+
export async function getHttpMetricsServer(
opts: HttpMetricsServerOpts,
{
@@ -26,7 +31,7 @@ export async function getHttpMetricsServer(
// New registry to metric the metrics. Using the same registry would deadlock the .metrics promise
const httpServerRegister = new RegistryMetricCreator();
- const scrapeTimeMetric = httpServerRegister.histogram<"status">({
+ const scrapeTimeMetric = httpServerRegister.histogram<{status: RequestStatus}>({
name: "lodestar_metrics_scrape_seconds",
help: "Lodestar metrics server async time to scrape metrics",
labelNames: ["status"],
@@ -40,7 +45,7 @@ export async function getHttpMetricsServer(
if (req.method === "GET" && req.url && req.url.includes("/metrics")) {
const timer = scrapeTimeMetric.startTimer();
const metricsRes = await Promise.all([wrapError(register.metrics()), getOtherMetrics()]);
- timer({status: metricsRes[0].err ? "error" : "success"});
+ timer({status: metricsRes[0].err ? RequestStatus.error : RequestStatus.success});
// Ensure we only writeHead once
if (metricsRes[0].err) {
diff --git a/packages/beacon-node/src/metrics/utils/avgMinMax.ts b/packages/beacon-node/src/metrics/utils/avgMinMax.ts
index 43f51c821790..709c83ee38d6 100644
--- a/packages/beacon-node/src/metrics/utils/avgMinMax.ts
+++ b/packages/beacon-node/src/metrics/utils/avgMinMax.ts
@@ -1,21 +1,21 @@
import {GaugeConfiguration} from "prom-client";
+import {AvgMinMax as IAvgMinMax, LabelKeys, LabelsGeneric} from "@lodestar/utils";
import {GaugeExtra} from "./gauge.js";
type GetValuesFn = () => number[];
-type Labels = Partial>;
/**
* Special non-standard "Histogram" that captures the avg, min and max of values
*/
-export class AvgMinMax {
- private readonly sum: GaugeExtra;
- private readonly avg: GaugeExtra;
- private readonly min: GaugeExtra;
- private readonly max: GaugeExtra;
+export class AvgMinMax implements IAvgMinMax {
+ private readonly sum: GaugeExtra;
+ private readonly avg: GaugeExtra;
+ private readonly min: GaugeExtra;
+ private readonly max: GaugeExtra;
private getValuesFn: GetValuesFn | null = null;
- constructor(configuration: GaugeConfiguration) {
+ constructor(configuration: GaugeConfiguration>) {
this.sum = new GaugeExtra({...configuration, name: `${configuration.name}_sum`});
this.avg = new GaugeExtra({...configuration, name: `${configuration.name}_avg`});
this.min = new GaugeExtra({...configuration, name: `${configuration.name}_min`});
@@ -33,8 +33,8 @@ export class AvgMinMax {
}
set(values: number[]): void;
- set(labels: Labels, values: number[]): void;
- set(arg1?: Labels | number[], arg2?: number[]): void {
+ set(labels: Labels, values: number[]): void;
+ set(arg1?: Labels | number[], arg2?: number[]): void {
if (arg2 === undefined) {
const values = arg1 as number[];
const {sum, avg, min, max} = getStats(values);
@@ -44,7 +44,7 @@ export class AvgMinMax {
this.max.set(max);
} else {
const values = (arg2 !== undefined ? arg2 : arg1) as number[];
- const labels = arg1 as Labels;
+ const labels = arg1 as Labels;
const {sum, avg, min, max} = getStats(values);
this.sum.set(labels, sum);
this.avg.set(labels, avg);
diff --git a/packages/beacon-node/src/metrics/utils/gauge.ts b/packages/beacon-node/src/metrics/utils/gauge.ts
index fb95fe25d24d..1f527adfcb64 100644
--- a/packages/beacon-node/src/metrics/utils/gauge.ts
+++ b/packages/beacon-node/src/metrics/utils/gauge.ts
@@ -1,29 +1,16 @@
-import {Gauge, GaugeConfiguration} from "prom-client";
-import {IGauge} from "../interface.js";
-
-type CollectFn = (metric: IGauge) => void;
-type Labels = Partial>;
+import {Gauge} from "prom-client";
+import {CollectFn, Gauge as IGauge, LabelKeys, LabelsGeneric} from "@lodestar/utils";
/**
- * Extends the prom-client Gauge with extra features:
- * - Add multiple collect functions after instantiation
- * - Create child gauges with fixed labels
+ * Extends the prom-client Gauge to be able to add multiple collect functions after instantiation
*/
-export class GaugeExtra extends Gauge implements IGauge {
- private collectFns: CollectFn[] = [];
-
- constructor(configuration: GaugeConfiguration) {
- super(configuration);
- }
+export class GaugeExtra extends Gauge> implements IGauge {
+ private collectFns: CollectFn[] = [];
- addCollect(collectFn: CollectFn): void {
+ addCollect(collectFn: CollectFn): void {
this.collectFns.push(collectFn);
}
- child(labels: Labels): GaugeChild {
- return new GaugeChild(labels, this);
- }
-
/**
* @override Metric.collect
*/
@@ -33,48 +20,3 @@ export class GaugeExtra extends Gauge implements IGauge {
}
}
}
-
-export class GaugeChild implements IGauge {
- gauge: GaugeExtra;
- labelsParent: Labels;
- constructor(labelsParent: Labels, gauge: GaugeExtra) {
- this.gauge = gauge;
- this.labelsParent = labelsParent;
- }
-
- // Sorry for this mess, `prom-client` API choices are not great
- // If the function signature was `inc(value: number, labels?: Labels)`, this would be simpler
- inc(value?: number): void;
- inc(labels: Labels, value?: number): void;
- inc(arg1?: Labels | number, arg2?: number): void {
- if (typeof arg1 === "object") {
- this.gauge.inc({...this.labelsParent, ...arg1}, arg2 ?? 1);
- } else {
- this.gauge.inc(this.labelsParent, arg1 ?? 1);
- }
- }
-
- dec(value?: number): void;
- dec(labels: Labels