diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c347c5daaea..04229255fab 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: '1.21' - name: Install dependencies on Linux @@ -58,7 +58,7 @@ jobs: - name: Install golangci-lint if: runner.os == 'Linux' - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: version: v1.59.1 skip-build-cache: true @@ -73,7 +73,7 @@ jobs: - name: SonarCloud if: runner.os == 'Linux' - uses: SonarSource/sonarcloud-github-action@v2.1.1 + uses: SonarSource/sonarcloud-github-action@v2.3.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed to get PR information, if any SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} @@ -92,11 +92,11 @@ jobs: # with: # minimum-size: 8GB # - uses: actions/checkout@v4 - # - uses: actions/setup-go@v4 + # - uses: actions/setup-go@v5 # with: # go-version: '1.21' - # - uses: actions/cache@v3 + # - uses: actions/cache@v4 # with: # path: | # C:\ProgramData\chocolatey\lib\mingw diff --git a/.github/workflows/docker-tags.yml b/.github/workflows/docker-tags.yml index a74dd476551..d169a287bb9 100644 --- a/.github/workflows/docker-tags.yml +++ b/.github/workflows/docker-tags.yml @@ -16,18 +16,18 @@ jobs: fetch-depth: 0 - name: dockerhub-login - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB }} password: ${{ secrets.DOCKERHUB_KEY }} - name: ghcr-login - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - - uses: docker/setup-qemu-action@v2 + - uses: docker/setup-qemu-action@v3 - run: | make release-dry-run @@ -38,4 +38,4 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VERSION: ${GITHUB_REF#refs/tags/} DOCKER_USERNAME: ${{ secrets.DOCKERHUB }} - DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_KEY }} \ No newline at end of file + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_KEY }} diff --git a/.github/workflows/hive-nightly.yml b/.github/workflows/hive-nightly.yml index cf04bee73bc..a96f579570d 100644 --- a/.github/workflows/hive-nightly.yml +++ b/.github/workflows/hive-nightly.yml @@ -38,8 +38,8 @@ jobs: done - name: parse hive results - uses: phoenix-actions/test-reporting@v10 + uses: phoenix-actions/test-reporting@v15 with: name: Tests path: results-${{ github.run_id }}/*.xml - reporter: java-junit \ No newline at end of file + reporter: java-junit diff --git a/.github/workflows/qa-sync-from-scratch.yml b/.github/workflows/qa-sync-from-scratch.yml index 3708c390b54..18bb19aaf10 100644 --- a/.github/workflows/qa-sync-from-scratch.yml +++ b/.github/workflows/qa-sync-from-scratch.yml @@ -8,16 +8,16 @@ on: jobs: sync-from-scratch-test: runs-on: self-hosted - timeout-minutes: 800 + timeout-minutes: 1100 # 18 hours plus 20 minutes strategy: fail-fast: false matrix: - chain: [ sepolia, amoy ] # Chain name as specified on the erigon command line + chain: [ sepolia, holesky, amoy ] # Chain name as specified on the erigon command line env: ERIGON_DATA_DIR: ${{ github.workspace }}/erigon_data ERIGON_QA_PATH: /home/qarunner/erigon-qa TRACKING_TIME_SECONDS: 7200 # 2 hours - TOTAL_TIME_SECONDS: 43200 # 12 hours + TOTAL_TIME_SECONDS: 57600 # 16 hours CHAIN: ${{ matrix.chain }} steps: diff --git a/.github/workflows/test-erigon-is-library.yml b/.github/workflows/test-erigon-is-library.yml index c715696f531..f14e324955d 100644 --- a/.github/workflows/test-erigon-is-library.yml +++ b/.github/workflows/test-erigon-is-library.yml @@ -18,7 +18,7 @@ jobs: steps: - uses: actions/checkout@v4 - run: git submodule update --init --recursive --force - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: '1.21' - name: Install dependencies on Linux diff --git a/.github/workflows/test-integration-caplin.yml b/.github/workflows/test-integration-caplin.yml index 4660d15c080..c3ad6a11e28 100644 --- a/.github/workflows/test-integration-caplin.yml +++ b/.github/workflows/test-integration-caplin.yml @@ -24,7 +24,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: '1.21' - name: Install dependencies on Linux @@ -42,11 +42,11 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: '1.21' - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: | C:\ProgramData\chocolatey\lib\mingw diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 4e95e57aad2..68719e5f364 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -29,7 +29,7 @@ jobs: steps: - uses: actions/checkout@v4 - run: git submodule update --init --recursive --force - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: '1.21' - name: Install dependencies on Linux @@ -48,11 +48,11 @@ jobs: steps: - uses: actions/checkout@v4 - run: git submodule update --init --recursive --force - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: '1.21' - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: | C:\ProgramData\chocolatey\lib\mingw diff --git a/.golangci.yml b/.golangci.yml index 18d05f596ba..fb9241a9e9a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -26,16 +26,16 @@ linters: - errchkjson #TODO: enable me - unused #TODO: enable me - testifylint #TODO: enable me - - perfsprint #TODO: enable me - gocheckcompilerdirectives - protogetter enable: - unconvert # - predeclared #TODO: enable me # - thelper #TODO: enable me - # - wastedassign + - wastedassign - gofmt - gocritic + - perfsprint # - revive # - forcetypeassert # - stylecheck diff --git a/Makefile b/Makefile index 1e936b3b21b..acc2a7218e0 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,7 @@ GOPRIVATE = github.com/erigontech/silkworm-go PACKAGE = github.com/node-real/bsc-erigon -GO_FLAGS += -trimpath -tags $(BUILD_TAGS) -buildvcs=false +GO_FLAGS += -trimpath -tags $(BUILD_TAGS) -buildvcs=false GO_FLAGS += -ldflags "-X ${PACKAGE}/params.GitCommit=${GIT_COMMIT} -X ${PACKAGE}/params.GitBranch=${GIT_BRANCH} -X ${PACKAGE}/params.GitTag=${GIT_TAG}" GOBUILD = CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)" GOPRIVATE="$(GOPRIVATE)" $(GO) build $(GO_FLAGS) @@ -277,7 +277,7 @@ install: @ls -al "$(DIST)" PACKAGE_NAME := github.com/node-real/bsc-erigon -GOLANG_CROSS_VERSION ?= v1.21.6 +GOLANG_CROSS_VERSION ?= v1.21.5 .PHONY: release-dry-run release-dry-run: git-submodules diff --git a/README.md b/README.md index 7a7eb003ef5..8ec21d08d0e 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,11 @@ # Erigon +Documentation: **[erigon.gitbook.io](https://erigon.gitbook.io)** + Erigon is an implementation of Ethereum (execution layer with embeddable consensus layer), on the efficiency frontier. [Archive Node](https://ethereum.org/en/developers/docs/nodes-and-clients/archive-nodes/#what-is-an-archive-node) by default. -An accessible and complete version of the documentation is available at **[erigon.gitbook.io](https://erigon.gitbook.io) -**.
![Build status](https://github.com/erigontech/erigon/actions/workflows/ci.yml/badge.svg) [![Coverage](https://sonarcloud.io/api/project_badges/measure?project=erigontech_erigon&metric=coverage)](https://sonarcloud.io/summary/new_code?id=erigontech_erigon) @@ -63,7 +63,6 @@ System Requirements * Polygon Mainnet Archive: 8.5TiB (December 2023). Polygon Mainnet Full node (see [Pruned Node][pruned_node]) with `--prune.*.older 15768000`: 5.1Tb (September 2023). - Polygon Mumbai Archive: 1TB. (April 2022). SSD or NVMe. Do not recommend HDD - on HDD Erigon will always stay N blocks behind chain tip, but not fall behind. Bear in mind that SSD performance deteriorates when close to capacity. @@ -82,7 +81,6 @@ Usage ### Getting Started -Building erigon requires both a Go (version 1.21 or later) and a C compiler (GCC 10+ or Clang). For building the latest release (this will be suitable for most users just wanting to run a node): ```sh @@ -111,7 +109,7 @@ download speed by flag `--torrent.download.rate=20mb`. 🔬 See [Downloade Use `--datadir` to choose where to store data. Use `--chain=gnosis` for [Gnosis Chain](https://www.gnosis.io/), `--chain=bor-mainnet` for Polygon Mainnet, -`--chain=mumbai` for Polygon Mumbai and `--chain=amoy` for Polygon Amoy. +and `--chain=amoy` for Polygon Amoy. For Gnosis Chain you need a [Consensus Layer](#beacon-chain-consensus-layer) client alongside Erigon (https://docs.gnosischain.com/node/manual/beacon). @@ -363,7 +361,7 @@ Key features ============ 🔬 See more -detailed [overview of functionality and current limitations](https://erigontech.github.io/turbo_geth_release.html). It +detailed [overview of functionality and current limitations](https://ledgerwatch.github.io/turbo_geth_release.html). It is being updated on recurring basis. ### More Efficient State Storage @@ -375,7 +373,7 @@ is being updated on recurring basis. **Preprocessing**. For some operations, Erigon uses temporary files to preprocess data before inserting it into the main DB. That reduces write amplification and DB inserts are orders of magnitude quicker. - 🔬 See our detailed ETL explanation [here](https://github.com/erigontech/erigon-lib/blob/main/etl/README.md). + 🔬 See our detailed ETL explanation [here](https://github.com/erigontech/erigon/blob/main/erigon-lib/etl/README.md). **Plain state**. @@ -776,7 +774,7 @@ Golang 1.21 Almost all RPC methods are implemented - if something doesn't work - just drop it on our head. -Supported networks: all (except Mumbai). +Supported networks: all. ### E3 changes from E2: diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go index ac1fc94a540..6bdbe84a61a 100644 --- a/accounts/abi/argument.go +++ b/accounts/abi/argument.go @@ -21,6 +21,7 @@ package abi import ( "encoding/json" + "errors" "fmt" "reflect" "strings" @@ -82,7 +83,7 @@ func (arguments Arguments) isTuple() bool { func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) { if len(data) == 0 { if len(arguments) != 0 { - return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") + return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected") } // Nothing to unmarshal, return default variables nonIndexedArgs := arguments.NonIndexed() @@ -99,11 +100,11 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) { func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error { // Make sure map is not nil if v == nil { - return fmt.Errorf("abi: cannot unpack into a nil map") + return errors.New("abi: cannot unpack into a nil map") } if len(data) == 0 { if len(arguments) != 0 { - return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") + return errors.New("abi: attempting to unmarshall an empty string while arguments are expected") } return nil // Nothing to unmarshal, return } diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 2ad4c13bb2a..58939db2e91 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -99,6 +99,7 @@ func NewSimulatedBackendWithConfig(t *testing.T, alloc types.GenesisAlloc, confi engine := ethash.NewFaker() checkStateRoot := true m := mock.MockWithGenesisEngine(t, &genesis, engine, false, checkStateRoot) + backend := &SimulatedBackend{ m: m, prependBlock: m.Genesis, @@ -264,6 +265,7 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash libcom return nil, err } defer tx.Rollback() + // Retrieve the context of the receipt based on the transaction hash blockNumber, err := rawdb.ReadTxLookupEntry(tx, txHash) if err != nil { @@ -276,8 +278,12 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash libcom if err != nil { return nil, err } + // Read all the receipts from the block and return the one with the matching hash - receipts := rawdb.ReadReceipts(tx, block, nil) + receipts, err := b.m.ReceiptsReader.GetReceipts(ctx, b.m.ChainConfig, tx, block) + if err != nil { + panic(err) + } for _, receipt := range receipts { if receipt.TxHash == txHash { return receipt, nil diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 6b34001a0e0..50334446195 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -863,9 +863,6 @@ func TestSimulatedBackend_TransactionReceipt(t *testing.T) { } sim.Commit() - if sim.m.HistoryV3 { - return - } receipt, err := sim.TransactionReceipt(bgCtx, signedTx.Hash()) if err != nil { t.Errorf("could not get transaction receipt: %v", err) diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index cee0a0c59b6..6b1b39b28e4 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -218,7 +218,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *libcommon.Address if opts.Value != nil { overflow := value.SetFromBig(opts.Value) if overflow { - return nil, fmt.Errorf("opts.Value higher than 2^256-1") + return nil, errors.New("opts.Value higher than 2^256-1") } } var nonce uint64 @@ -240,7 +240,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *libcommon.Address } gasPrice, overflow := uint256.FromBig(gasPriceBig) if overflow { - return nil, fmt.Errorf("gasPriceBig higher than 2^256-1") + return nil, errors.New("gasPriceBig higher than 2^256-1") } gasLimit := opts.GasLimit if gasLimit == 0 { diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index 84d1b34195c..4e381f3847f 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -233,7 +233,7 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri structFieldName := ToCamelCase(argName) if structFieldName == "" { - return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct") + return nil, errors.New("abi: purely underscored output cannot unpack to struct") } // this abi has already been paired, skip it... unless there exists another, yet unassigned diff --git a/accounts/abi/type.go b/accounts/abi/type.go index 25c9f45c461..3ad141b1120 100644 --- a/accounts/abi/type.go +++ b/accounts/abi/type.go @@ -71,7 +71,7 @@ var ( func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) { // check that array brackets are equal if they exist if strings.Count(t, "[") != strings.Count(t, "]") { - return Type{}, fmt.Errorf("invalid arg type in abi") + return Type{}, errors.New("invalid arg type in abi") } typ.stringKind = t @@ -110,7 +110,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty } typ.stringKind = embeddedType.stringKind + sliced } else { - return Type{}, fmt.Errorf("invalid formatting of array type") + return Type{}, errors.New("invalid formatting of array type") } return typ, err } diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index 3753c57bbdd..0d509bd48be 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -21,6 +21,7 @@ package abi import ( "encoding/binary" + "errors" "fmt" "math/big" "reflect" @@ -98,7 +99,7 @@ func readBool(word []byte) (bool, error) { // readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes) func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { if t.T != FunctionTy { - return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array") + return [24]byte{}, errors.New("abi: invalid type in call to make function type byte array") } if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 { err = fmt.Errorf("abi: got improperly encoded function type, got %v", word) @@ -111,7 +112,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { // ReadFixedBytes uses reflection to create a fixed array to be read from. func ReadFixedBytes(t Type, word []byte) (interface{}, error) { if t.T != FixedBytesTy { - return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array") + return nil, errors.New("abi: invalid type in call to make fixed byte array") } // convert array := reflect.New(t.GetType()).Elem() @@ -140,7 +141,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) // declare our array refSlice = reflect.New(t.GetType()).Elem() } else { - return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage") + return nil, errors.New("abi: invalid type in array/slice unpacking stage") } // Arrays have packed elements, resulting in longer unpack steps. diff --git a/cl/abstract/beacon_state.go b/cl/abstract/beacon_state.go index 09cbff5faab..d378cfc7afd 100644 --- a/cl/abstract/beacon_state.go +++ b/cl/abstract/beacon_state.go @@ -219,4 +219,5 @@ type BeaconStateReader interface { ValidatorForValidatorIndex(index int) (solid.Validator, error) Version() clparams.StateVersion GenesisValidatorsRoot() common.Hash + GetBeaconProposerIndexForSlot(slot uint64) (uint64, error) } diff --git a/cl/abstract/mock_services/beacon_state_reader_mock.go b/cl/abstract/mock_services/beacon_state_reader_mock.go index d5a996a6d74..8927b3b66f1 100644 --- a/cl/abstract/mock_services/beacon_state_reader_mock.go +++ b/cl/abstract/mock_services/beacon_state_reader_mock.go @@ -117,6 +117,45 @@ func (c *MockBeaconStateReaderGenesisValidatorsRootCall) DoAndReturn(f func() co return c } +// GetBeaconProposerIndexForSlot mocks base method. +func (m *MockBeaconStateReader) GetBeaconProposerIndexForSlot(arg0 uint64) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBeaconProposerIndexForSlot", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBeaconProposerIndexForSlot indicates an expected call of GetBeaconProposerIndexForSlot. +func (mr *MockBeaconStateReaderMockRecorder) GetBeaconProposerIndexForSlot(arg0 any) *MockBeaconStateReaderGetBeaconProposerIndexForSlotCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBeaconProposerIndexForSlot", reflect.TypeOf((*MockBeaconStateReader)(nil).GetBeaconProposerIndexForSlot), arg0) + return &MockBeaconStateReaderGetBeaconProposerIndexForSlotCall{Call: call} +} + +// MockBeaconStateReaderGetBeaconProposerIndexForSlotCall wrap *gomock.Call +type MockBeaconStateReaderGetBeaconProposerIndexForSlotCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockBeaconStateReaderGetBeaconProposerIndexForSlotCall) Return(arg0 uint64, arg1 error) *MockBeaconStateReaderGetBeaconProposerIndexForSlotCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockBeaconStateReaderGetBeaconProposerIndexForSlotCall) Do(f func(uint64) (uint64, error)) *MockBeaconStateReaderGetBeaconProposerIndexForSlotCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockBeaconStateReaderGetBeaconProposerIndexForSlotCall) DoAndReturn(f func(uint64) (uint64, error)) *MockBeaconStateReaderGetBeaconProposerIndexForSlotCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // GetDomain mocks base method. func (m *MockBeaconStateReader) GetDomain(arg0 [4]byte, arg1 uint64) ([]byte, error) { m.ctrl.T.Helper() diff --git a/cl/aggregation/pool_impl.go b/cl/aggregation/pool_impl.go index 5cdef889808..38afe528255 100644 --- a/cl/aggregation/pool_impl.go +++ b/cl/aggregation/pool_impl.go @@ -18,7 +18,7 @@ package aggregation import ( "context" - "fmt" + "errors" "sync" "time" @@ -30,7 +30,7 @@ import ( "github.com/erigontech/erigon/cl/utils/eth_clock" ) -var ErrIsSuperset = fmt.Errorf("attestation is superset of existing attestation") +var ErrIsSuperset = errors.New("attestation is superset of existing attestation") var ( blsAggregate = bls.AggregateSignatures @@ -89,7 +89,7 @@ func (p *aggregationPoolImpl) AddAttestation(inAtt *solid.Attestation) error { return err } if len(merged) != 96 { - return fmt.Errorf("merged signature is too long") + return errors.New("merged signature is too long") } var mergedSig [96]byte copy(mergedSig[:], merged) diff --git a/cl/antiquary/antiquary.go b/cl/antiquary/antiquary.go index 518c74a185f..df2ca5760b5 100644 --- a/cl/antiquary/antiquary.go +++ b/cl/antiquary/antiquary.go @@ -18,7 +18,9 @@ package antiquary import ( "context" + "io/ioutil" "math" + "strings" "sync/atomic" "time" @@ -88,6 +90,25 @@ func NewAntiquary(ctx context.Context, blobStorage blob_storage.BlobStorage, gen } } +// Check if the snapshot directory has beacon blocks files aka "contains beaconblock" and has a ".seg" extension over its first layer +func doesSnapshotDirHaveBeaconBlocksFiles(snapshotDir string) bool { + // Iterate over the files in the snapshot directory + files, err := ioutil.ReadDir(snapshotDir) + if err != nil { + return false + } + for _, file := range files { + // Check if the file has a ".seg" extension + if file.IsDir() { + continue + } + if strings.Contains(file.Name(), "beaconblock") && strings.HasSuffix(file.Name(), ".seg") { + return true + } + } + return false +} + // Antiquate is the function that starts transactions seeding and shit, very cool but very shit too as a name. func (a *Antiquary) Loop() error { if a.downloader == nil || !a.blocks { @@ -103,8 +124,9 @@ func (a *Antiquary) Loop() error { } reCheckTicker := time.NewTicker(3 * time.Second) defer reCheckTicker.Stop() + // Fist part of the antiquate is to download caplin snapshots - for !statsReply.Completed { + for (!statsReply.Completed || !doesSnapshotDirHaveBeaconBlocksFiles(a.dirs.Snap)) && !a.backfilled.Load() { select { case <-reCheckTicker.C: statsReply, err = a.downloader.Stats(a.ctx, &proto_downloader.StatsRequest{}) @@ -208,6 +230,7 @@ func (a *Antiquary) Loop() error { if !a.backfilled.Load() { continue } + var ( from uint64 to uint64 diff --git a/cl/antiquary/tests/tests.go b/cl/antiquary/tests/tests.go index feacc4c31c1..74681e445b0 100644 --- a/cl/antiquary/tests/tests.go +++ b/cl/antiquary/tests/tests.go @@ -181,7 +181,7 @@ func GetBellatrixRandom() ([]*cltypes.SignedBeaconBlock, *state.CachingBeaconSta for i := 0; i < 96; i++ { block := cltypes.NewSignedBeaconBlock(&clparams.MainnetBeaconConfig) // Lets do te - b, err := bellatrixFS.ReadFile("test_data/bellatrix/blocks_" + strconv.FormatInt(int64(i), 10) + ".ssz_snappy") + b, err := bellatrixFS.ReadFile("test_data/bellatrix/blocks_" + strconv.Itoa(i) + ".ssz_snappy") if err != nil { panic(err) } diff --git a/cl/beacon/beaconevents/emitter.go b/cl/beacon/beaconevents/emitter.go deleted file mode 100644 index 17eded951a9..00000000000 --- a/cl/beacon/beaconevents/emitter.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package beaconevents - -import ( - "sync" - - "github.com/google/uuid" - "golang.org/x/sync/errgroup" -) - -type Subscription struct { - id string - topics map[string]struct{} - cb func(topic string, item any) -} - -type EventName string - -// Emitters creates pub/sub connection -type Emitters struct { - cbs map[string]*Subscription - mu sync.RWMutex -} - -func NewEmitters() *Emitters { - return &Emitters{ - cbs: map[string]*Subscription{}, - } -} - -// publish to all subscribers. each callback is run in a separate goroutine -func (e *Emitters) Publish(s string, a any) { - // forward gossip object - e.mu.Lock() - values := make([]*Subscription, 0, len(e.cbs)) - for _, v := range e.cbs { - values = append(values, v) - } - e.mu.Unlock() - - egg := errgroup.Group{} - for idx := range values { - v := values[idx] - exec := func() error { v.cb(s, a); return nil } - if _, ok := v.topics["*"]; ok { - egg.Go(exec) - } else if _, ok := v.topics[s]; ok { - egg.Go(exec) - } - } - egg.Wait() -} - -// subscribe with callback. call the returned cancelfunc to unregister the callback -// publish will block until all callbacks for the message are resolved -func (e *Emitters) Subscribe(topics []string, cb func(topic string, item any)) (func(), error) { - subid := uuid.New().String() - sub := &Subscription{ - id: subid, - topics: map[string]struct{}{}, - cb: cb, - } - for _, v := range topics { - sub.topics[v] = struct{}{} - } - e.cbs[subid] = sub - return func() { - e.mu.Lock() - defer e.mu.Unlock() - delete(e.cbs, subid) - }, nil -} diff --git a/cl/beacon/beaconevents/emitter_test.go b/cl/beacon/beaconevents/emitter_test.go index 32a4848c9c9..33ea1c84a18 100644 --- a/cl/beacon/beaconevents/emitter_test.go +++ b/cl/beacon/beaconevents/emitter_test.go @@ -14,57 +14,27 @@ // You should have received a copy of the GNU Lesser General Public License // along with Erigon. If not, see . -package beaconevents_test +package beaconevents import ( - "sync/atomic" "testing" - - "github.com/erigontech/erigon/cl/beacon/beaconevents" - "github.com/stretchr/testify/require" + "time" ) func TestEmitterSet(t *testing.T) { - e := beaconevents.NewEmitters() - var called int - e.Subscribe([]string{"set"}, func(topic string, item any) { - require.EqualValues(t, "set", topic) - require.EqualValues(t, "hello", item.(string)) - called = called + 1 - }) - e.Publish("set", "hello") - require.EqualValues(t, 1, called) -} -func TestEmitterFilters(t *testing.T) { - e := beaconevents.NewEmitters() - var a atomic.Int64 - var b atomic.Int64 - var ab atomic.Int64 - var wild atomic.Int64 - e.Subscribe([]string{"a"}, func(topic string, item any) { - require.EqualValues(t, topic, item.(string)) - a.Add(1) - }) - e.Subscribe([]string{"b"}, func(topic string, item any) { - require.EqualValues(t, topic, item.(string)) - b.Add(1) - }) - e.Subscribe([]string{"a", "b"}, func(topic string, item any) { - require.EqualValues(t, topic, item.(string)) - ab.Add(1) - }) - e.Subscribe([]string{"*"}, func(topic string, item any) { - require.EqualValues(t, topic, item.(string)) - wild.Add(1) - }) - - e.Publish("a", "a") - e.Publish("b", "b") - e.Publish("b", "b") - e.Publish("c", "c") - - require.EqualValues(t, 1, a.Load()) - require.EqualValues(t, 2, b.Load()) - require.EqualValues(t, 3, ab.Load()) - require.EqualValues(t, 4, wild.Load()) + emitter := NewEventEmitter() + done := make(chan struct{}) + go func() { + ch := make(chan *EventStream, 10) + t.Logf("Subscribing to emitter") + sub := emitter.Operation().Subscribe(ch) + defer sub.Unsubscribe() + t.Logf("Subscribed to emitter") + event := <-ch + t.Logf("Received event: %v", event) + close(done) + }() + time.Sleep(100 * time.Millisecond) + emitter.Operation().SendAttestation(&AttestationData{}) + <-done } diff --git a/cl/beacon/beaconevents/event_notifier.go b/cl/beacon/beaconevents/event_notifier.go new file mode 100644 index 00000000000..411116bb335 --- /dev/null +++ b/cl/beacon/beaconevents/event_notifier.go @@ -0,0 +1,21 @@ +package beaconevents + +type EventEmitter struct { + stateFeed *stateFeed // block state feed + operationFeed *operationFeed // block operation feed +} + +func NewEventEmitter() *EventEmitter { + return &EventEmitter{ + operationFeed: newOpFeed(), + stateFeed: newStateFeed(), + } +} + +func (e *EventEmitter) State() *stateFeed { + return e.stateFeed +} + +func (e *EventEmitter) Operation() *operationFeed { + return e.operationFeed +} diff --git a/cl/beacon/beaconevents/model.go b/cl/beacon/beaconevents/model.go new file mode 100644 index 00000000000..feb9b2c609d --- /dev/null +++ b/cl/beacon/beaconevents/model.go @@ -0,0 +1,119 @@ +package beaconevents + +import ( + "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon/cl/cltypes" + "github.com/erigontech/erigon/cl/cltypes/solid" +) + +type EventStream struct { + Event EventTopic `json:"event"` + Data interface{} `json:"data"` +} + +type EventTopic string + +// Operation event topics +const ( + OpAttestation EventTopic = "attestation" + OpVoluntaryExit EventTopic = "voluntary_exit" + OpProposerSlashing EventTopic = "proposer_slashing" + OpAttesterSlashing EventTopic = "attester_slashing" + OpBlsToExecution EventTopic = "bls_to_execution_change" + OpContributionProof EventTopic = "contribution_and_proof" + OpBlobSidecar EventTopic = "blob_sidecar" +) + +type ( + // Operation event data types + AttestationData = solid.Attestation + VoluntaryExitData = cltypes.SignedVoluntaryExit + ProposerSlashingData = cltypes.ProposerSlashing + AttesterSlashingData = cltypes.AttesterSlashing + BlsToExecutionChangesData = cltypes.SignedBLSToExecutionChange + ContributionAndProofData = cltypes.SignedContributionAndProof + BlobSidecarData = cltypes.BlobSidecar +) + +// State event topics +const ( + StateHead EventTopic = "head" + StateBlock EventTopic = "block" + StateBlockGossip EventTopic = "block_gossip" + StateFinalizedCheckpoint EventTopic = "finalized_checkpoint" + StateChainReorg EventTopic = "chain_reorg" + StateFinalityUpdate EventTopic = "light_client_finality_update" + StateOptimisticUpdate EventTopic = "light_client_optimistic_update" + StatePayloadAttributes EventTopic = "payload_attributes" +) + +// State event data types +type HeadData struct { + Slot uint64 `json:"slot,string"` + Block common.Hash `json:"block"` + State common.Hash `json:"state"` + EpochTransition bool `json:"epoch_transition"` + PreviousDutyDependentRoot common.Hash `json:"previous_duty_dependent_root"` + CurrentDutyDependentRoot common.Hash `json:"current_duty_dependent_root"` + ExecutionOptimistic bool `json:"execution_optimistic"` +} + +type BlockData struct { + Slot uint64 `json:"slot,string"` + Block common.Hash `json:"block"` + ExecutionOptimistic bool `json:"execution_optimistic"` +} + +type BlockGossipData struct { + Slot uint64 `json:"slot,string"` + Block common.Hash `json:"block"` +} + +type FinalizedCheckpointData struct { + Block common.Hash `json:"block"` + State common.Hash `json:"state"` + Epoch uint64 `json:"epoch,string"` + ExecutionOptimistic bool `json:"execution_optimistic"` +} + +type ChainReorgData struct { + Slot uint64 `json:"slot,string"` + Depth uint64 `json:"depth,string"` + OldHeadBlock common.Hash `json:"old_head_block"` + NewHeadBlock common.Hash `json:"new_head_block"` + OldHeadState common.Hash `json:"old_head_state"` + NewHeadState common.Hash `json:"new_head_state"` + Epoch uint64 `json:"epoch,string"` + ExecutionOptimistic bool `json:"execution_optimistic"` +} + +type LightClientFinalityUpdateData struct { + Version string `json:"version"` + Data cltypes.LightClientFinalityUpdate `json:"data"` +} + +type LightClientOptimisticUpdateData struct { + Version string `json:"version"` + Data cltypes.LightClientOptimisticUpdate `json:"data"` +} + +type PayloadAttributesData struct { + Version string `json:"version"` + Data PayloadAttributesContent `json:"data"` +} + +type PayloadAttributesContent struct { + ProposerIndex uint64 `json:"proposer_index,string"` + ProposalSlot uint64 `json:"proposal_slot,string"` + ParentBlockNumber uint64 `json:"parent_block_number,string"` + ParentBlockRoot common.Hash `json:"parent_block_root"` + ParentBlockHash common.Hash `json:"parent_block_hash"` + PayloadAttributes PayloadAttributes `json:"payload_attributes"` +} + +type PayloadAttributes struct { + Timestamp uint64 `json:"timestamp,string"` + PrevRandao common.Hash `json:"prev_randao"` + SuggestedFeeRecipient common.Address `json:"suggested_fee_recipient"` + Withdrawals *solid.ListSSZ[*cltypes.Withdrawal] `json:"withdrawals,omitempty"` +} diff --git a/cl/beacon/beaconevents/operation_feed.go b/cl/beacon/beaconevents/operation_feed.go new file mode 100644 index 00000000000..db07622aaba --- /dev/null +++ b/cl/beacon/beaconevents/operation_feed.go @@ -0,0 +1,67 @@ +package beaconevents + +import ethevent "github.com/erigontech/erigon/event" + +type operationFeed struct { + feed *ethevent.Feed +} + +func newOpFeed() *operationFeed { + return &operationFeed{ + feed: ðevent.Feed{}, + } +} + +func (f *operationFeed) Subscribe(channel chan *EventStream) ethevent.Subscription { + return f.feed.Subscribe(channel) +} + +func (f *operationFeed) SendAttestation(value *AttestationData) int { + return f.feed.Send(&EventStream{ + Event: OpAttestation, + Data: value, + }) +} + +func (f *operationFeed) SendVoluntaryExit(value *VoluntaryExitData) int { + return f.feed.Send(&EventStream{ + Event: OpVoluntaryExit, + Data: value, + }) +} + +func (f *operationFeed) SendProposerSlashing(value *ProposerSlashingData) int { + return f.feed.Send(&EventStream{ + Event: OpProposerSlashing, + Data: value, + }) + +} + +func (f *operationFeed) SendAttesterSlashing(value *AttesterSlashingData) int { + return f.feed.Send(&EventStream{ + Event: OpAttesterSlashing, + Data: value, + }) +} + +func (f *operationFeed) SendBlsToExecution(value *BlsToExecutionChangesData) int { + return f.feed.Send(&EventStream{ + Event: OpBlsToExecution, + Data: value, + }) +} + +func (f *operationFeed) SendContributionProof(value *ContributionAndProofData) int { + return f.feed.Send(&EventStream{ + Event: OpContributionProof, + Data: value, + }) +} + +func (f *operationFeed) SendBlobSidecar(value *BlobSidecarData) int { + return f.feed.Send(&EventStream{ + Event: OpBlobSidecar, + Data: value, + }) +} diff --git a/cl/beacon/beaconevents/state_feed.go b/cl/beacon/beaconevents/state_feed.go new file mode 100644 index 00000000000..424f3352a95 --- /dev/null +++ b/cl/beacon/beaconevents/state_feed.go @@ -0,0 +1,40 @@ +package beaconevents + +import ( + ethevent "github.com/erigontech/erigon/event" +) + +type stateFeed struct { + feed *ethevent.Feed +} + +func newStateFeed() *stateFeed { + return &stateFeed{ + feed: ðevent.Feed{}, + } +} + +func (f *stateFeed) Subscribe(channel chan *EventStream) ethevent.Subscription { + return f.feed.Subscribe(channel) +} + +func (f *stateFeed) SendHead(value *HeadData) int { + return f.feed.Send(&EventStream{ + Event: StateHead, + Data: value, + }) +} + +func (f *stateFeed) SendBlock(value *BlockData) int { + return f.feed.Send(&EventStream{ + Event: StateBlock, + Data: value, + }) +} + +func (f *stateFeed) SendFinalizedCheckpoint(value *FinalizedCheckpointData) int { + return f.feed.Send(&EventStream{ + Event: StateFinalizedCheckpoint, + Data: value, + }) +} diff --git a/cl/beacon/beaconhttp/api.go b/cl/beacon/beaconhttp/api.go index 262f66feb7d..8b8f0bf2292 100644 --- a/cl/beacon/beaconhttp/api.go +++ b/cl/beacon/beaconhttp/api.go @@ -147,7 +147,7 @@ func HandleEndpoint[T any](h EndpointHandler[T]) http.HandlerFunc { case strings.Contains(contentType, "text/event-stream"): return default: - http.Error(w, fmt.Sprintf("content type must include application/json, application/octet-stream, or text/event-stream, got %s", contentType), http.StatusBadRequest) + http.Error(w, "content type must include application/json, application/octet-stream, or text/event-stream, got "+contentType, http.StatusBadRequest) } }) } diff --git a/cl/beacon/beaconhttp/args.go b/cl/beacon/beaconhttp/args.go index d4ff2c04171..72572601756 100644 --- a/cl/beacon/beaconhttp/args.go +++ b/cl/beacon/beaconhttp/args.go @@ -17,7 +17,7 @@ package beaconhttp import ( - "fmt" + "errors" "net/http" "regexp" "strconv" @@ -73,7 +73,7 @@ func EpochFromRequest(r *http.Request) (uint64, error) { regex := regexp.MustCompile(`^\d+$`) epoch := chi.URLParam(r, "epoch") if !regex.MatchString(epoch) { - return 0, fmt.Errorf("invalid path variable: {epoch}") + return 0, errors.New("invalid path variable: {epoch}") } epochMaybe, err := strconv.ParseUint(epoch, 10, 64) if err != nil { @@ -95,7 +95,7 @@ func BlockIdFromRequest(r *http.Request) (*SegmentID, error) { blockId := chi.URLParam(r, "block_id") if !regex.MatchString(blockId) { - return nil, fmt.Errorf("invalid path variable: {block_id}") + return nil, errors.New("invalid path variable: {block_id}") } if blockId == "head" { @@ -122,7 +122,7 @@ func StateIdFromRequest(r *http.Request) (*SegmentID, error) { stateId := chi.URLParam(r, "state_id") if !regex.MatchString(stateId) { - return nil, fmt.Errorf("invalid path variable: {state_id}") + return nil, errors.New("invalid path variable: {state_id}") } if stateId == "head" { @@ -154,17 +154,17 @@ func HashFromQueryParams(r *http.Request, name string) (*common.Hash, error) { } // check if hashstr is an hex string if len(hashStr) != 2+2*32 { - return nil, fmt.Errorf("invalid hash length") + return nil, errors.New("invalid hash length") } if hashStr[:2] != "0x" { - return nil, fmt.Errorf("invalid hash prefix") + return nil, errors.New("invalid hash prefix") } notHex, err := regexp.MatchString("[^0-9A-Fa-f]", hashStr[2:]) if err != nil { return nil, err } if notHex { - return nil, fmt.Errorf("invalid hash characters") + return nil, errors.New("invalid hash characters") } hash := common.HexToHash(hashStr) diff --git a/cl/beacon/builder/client.go b/cl/beacon/builder/client.go index 8a64d426995..510d502bdb5 100644 --- a/cl/beacon/builder/client.go +++ b/cl/beacon/builder/client.go @@ -36,7 +36,7 @@ import ( var _ BuilderClient = &builderClient{} var ( - ErrNoContent = fmt.Errorf("no http content") + ErrNoContent = errors.New("no http content") ) type builderClient struct { diff --git a/cl/beacon/handler/attestation_rewards.go b/cl/beacon/handler/attestation_rewards.go index a218adf0d74..a555eb28593 100644 --- a/cl/beacon/handler/attestation_rewards.go +++ b/cl/beacon/handler/attestation_rewards.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "io" "net/http" @@ -94,7 +94,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r } headEpoch := headSlot / a.beaconChainCfg.SlotsPerEpoch if epoch > headEpoch { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch is in the future")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch is in the future")) } // Few cases to handle: // 1) finalized data @@ -115,14 +115,14 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r continue } if version == clparams.Phase0Version { - return nil, beaconhttp.NewEndpointError(http.StatusHTTPVersionNotSupported, fmt.Errorf("phase0 state is not supported when there is no antiquation")) + return nil, beaconhttp.NewEndpointError(http.StatusHTTPVersionNotSupported, errors.New("phase0 state is not supported when there is no antiquation")) } inactivityScores, err := a.forkchoiceStore.GetInactivitiesScores(blockRoot) if err != nil { return nil, err } if inactivityScores == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no inactivity scores found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no inactivity scores found for this epoch")) } prevPartecipation, err := a.forkchoiceStore.GetPreviousPartecipationIndicies(blockRoot) @@ -130,24 +130,24 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, err } if prevPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no previous partecipation found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no previous partecipation found for this epoch")) } validatorSet, err := a.forkchoiceStore.GetValidatorSet(blockRoot) if err != nil { return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no validator set found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no validator set found for this epoch")) } ok, finalizedCheckpoint, _, _ := a.forkchoiceStore.GetFinalityCheckpoints(blockRoot) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no finalized checkpoint found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no finalized checkpoint found for this epoch")) } return a.computeAttestationsRewardsForAltair(validatorSet, inactivityScores, prevPartecipation, a.isInactivityLeaking(epoch, finalizedCheckpoint), filterIndicies, epoch) } - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no block found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no block found for this epoch")) } root, err := a.findEpochRoot(tx, epoch) @@ -159,7 +159,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, err } if lastSlotPtr == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no block found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no block found for this epoch")) } lastSlot := *lastSlotPtr @@ -168,7 +168,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, err } if lastSlot > stateProgress { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("requested range is not yet processed or the node is not archivial")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("requested range is not yet processed or the node is not archivial")) } epochData, err := state_accessors.ReadEpochData(tx, a.beaconChainCfg.RoundSlotToEpoch(lastSlot)) @@ -181,7 +181,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no validator set found for this epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no validator set found for this epoch")) } _, previousIdx, err := a.stateReader.ReadPartecipations(tx, lastSlot) diff --git a/cl/beacon/handler/blobs.go b/cl/beacon/handler/blobs.go index fef1bf82660..718cb4643ac 100644 --- a/cl/beacon/handler/blobs.go +++ b/cl/beacon/handler/blobs.go @@ -17,7 +17,7 @@ package handler import ( - "fmt" + "errors" "net/http" "strconv" @@ -50,7 +50,7 @@ func (a *ApiHandler) GetEthV1BeaconBlobSidecars(w http.ResponseWriter, r *http.R return nil, err } if slot == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("block not found")) } if a.caplinSnapshots != nil && *slot <= a.caplinSnapshots.FrozenBlobs() { out, err := a.caplinSnapshots.ReadBlobSidecars(*slot) diff --git a/cl/beacon/handler/block_production.go b/cl/beacon/handler/block_production.go index c6b607f8927..6641c8d4638 100644 --- a/cl/beacon/handler/block_production.go +++ b/cl/beacon/handler/block_production.go @@ -68,7 +68,7 @@ const ( ) var ( - errBuilderNotEnabled = fmt.Errorf("builder is not enabled") + errBuilderNotEnabled = errors.New("builder is not enabled") ) var defaultGraffitiString = "Caplin" @@ -88,14 +88,14 @@ func (a *ApiHandler) GetEthV1ValidatorAttestationData( if slot == nil || committeeIndex == nil { return nil, beaconhttp.NewEndpointError( http.StatusBadRequest, - fmt.Errorf("slot and committee_index url params are required"), + errors.New("slot and committee_index url params are required"), ) } headState := a.syncedData.HeadState() if headState == nil { return nil, beaconhttp.NewEndpointError( http.StatusServiceUnavailable, - fmt.Errorf("beacon node is still syncing"), + errors.New("beacon node is still syncing"), ) } @@ -164,7 +164,7 @@ func (a *ApiHandler) GetEthV3ValidatorBlock( if s == nil { return nil, beaconhttp.NewEndpointError( http.StatusServiceUnavailable, - fmt.Errorf("node is syncing"), + errors.New("node is syncing"), ) } @@ -400,7 +400,7 @@ func (a *ApiHandler) getBuilderPayload( if err != nil { return nil, err } else if header == nil { - return nil, fmt.Errorf("no error but nil header") + return nil, errors.New("no error but nil header") } // check the version @@ -419,10 +419,10 @@ func (a *ApiHandler) getBuilderPayload( for i := 0; i < header.Data.Message.BlobKzgCommitments.Len(); i++ { c := header.Data.Message.BlobKzgCommitments.Get(i) if c == nil { - return nil, fmt.Errorf("nil blob kzg commitment") + return nil, errors.New("nil blob kzg commitment") } if len(c) != length.Bytes48 { - return nil, fmt.Errorf("invalid blob kzg commitment length") + return nil, errors.New("invalid blob kzg commitment length") } } } @@ -626,7 +626,7 @@ func (a *ApiHandler) produceBeaconBody( wg.Wait() if executionPayload == nil { - return nil, 0, fmt.Errorf("failed to produce execution payload") + return nil, 0, errors.New("failed to produce execution payload") } beaconBody.ExecutionPayload = executionPayload return beaconBody, executionValue, nil @@ -859,7 +859,7 @@ func (a *ApiHandler) publishBlindedBlocks(w http.ResponseWriter, r *http.Request // check commitments blockCommitments := signedBlindedBlock.Block.Body.BlobKzgCommitments if len(blobsBundle.Commitments) != blockCommitments.Len() { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("commitments length mismatch")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("commitments length mismatch")) } for i := range blobsBundle.Commitments { // add the bundle to recently produced blobs @@ -885,7 +885,7 @@ func (a *ApiHandler) parseEthConsensusVersion( apiVersion int, ) (clparams.StateVersion, error) { if str == "" && apiVersion == 2 { - return 0, fmt.Errorf("Eth-Consensus-Version header is required") + return 0, errors.New("Eth-Consensus-Version header is required") } if str == "" && apiVersion == 1 { currentEpoch := a.ethClock.GetCurrentEpoch() @@ -931,7 +931,7 @@ func (a *ApiHandler) parseRequestBeaconBlock( block.SignedBlock.Block.SetVersion(version) return block, nil } - return nil, fmt.Errorf("invalid content type") + return nil, errors.New("invalid content type") } func (a *ApiHandler) broadcastBlock(ctx context.Context, blk *cltypes.SignedBeaconBlock) error { @@ -1040,6 +1040,7 @@ func (a *ApiHandler) storeBlockAndBlobs( if _, err := a.engine.ForkChoiceUpdate(ctx, a.forkchoiceStore.GetEth1Hash(finalizedBlockRoot), a.forkchoiceStore.GetEth1Hash(blockRoot), nil); err != nil { return err } + a.validatorsMonitor.OnNewBlock(block.Block) return nil } @@ -1080,9 +1081,10 @@ func (a *ApiHandler) findBestAttestationsForBlockProduction( sort.Slice(attestationCandidates, func(i, j int) bool { return attestationCandidates[i].reward > attestationCandidates[j].reward }) + // Some aggregates can be supersets of existing ones so let's filter out the supersets // this MAP is HashTreeRoot(AttestationData) => AggregationBits - aggregationBitsByAttestationData := make(map[libcommon.Hash][]byte) + hashToMergedAtt := make(map[libcommon.Hash]*solid.Attestation) for _, candidate := range attestationCandidates { // Check if it is a superset of a pre-included attestation with higher reward attestationDataRoot, err := candidate.attestation.AttestantionData().HashSSZ() @@ -1090,26 +1092,28 @@ func (a *ApiHandler) findBestAttestationsForBlockProduction( log.Warn("[Block Production] Cannot compute attestation data root", "err", err) continue } - currAggregationBits, exists := aggregationBitsByAttestationData[attestationDataRoot] - if exists { - if utils.IsNonStrictSupersetBitlist( + if curAtt, exists := hashToMergedAtt[attestationDataRoot]; exists { + currAggregationBits := curAtt.AggregationBits() + if !utils.IsNonStrictSupersetBitlist( currAggregationBits, candidate.attestation.AggregationBits(), ) { - continue + // merge if not a superset + utils.MergeBitlists(currAggregationBits, candidate.attestation.AggregationBits()) + curAtt.SetAggregationBits(currAggregationBits) } - utils.MergeBitlists(currAggregationBits, candidate.attestation.AggregationBits()) } else { - currAggregationBits = candidate.attestation.AggregationBits() + // Update the currently built superset + hashToMergedAtt[attestationDataRoot] = candidate.attestation.Copy() } - // Update the currently built superset - aggregationBitsByAttestationData[attestationDataRoot] = currAggregationBits - ret.Append(candidate.attestation) - if ret.Len() >= int(a.beaconChainCfg.MaxAttestations) { + if len(hashToMergedAtt) >= int(a.beaconChainCfg.MaxAttestations) { break } } + for _, att := range hashToMergedAtt { + ret.Append(att) + } return ret } diff --git a/cl/beacon/handler/blocks.go b/cl/beacon/handler/blocks.go index 32c1e99d84f..59926604d73 100644 --- a/cl/beacon/handler/blocks.go +++ b/cl/beacon/handler/blocks.go @@ -18,6 +18,7 @@ package handler import ( "context" + "errors" "fmt" "net/http" @@ -56,7 +57,7 @@ func (a *ApiHandler) rootFromBlockId(ctx context.Context, tx kv.Tx, blockId *bea return libcommon.Hash{}, err } if root == (libcommon.Hash{}) { - return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("genesis block not found")) + return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("genesis block not found")) } case blockId.GetSlot() != nil: root, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *blockId.GetSlot()) @@ -70,7 +71,7 @@ func (a *ApiHandler) rootFromBlockId(ctx context.Context, tx kv.Tx, blockId *bea // first check if it exists root = *blockId.GetRoot() default: - return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusInternalServerError, fmt.Errorf("cannot parse block id")) + return libcommon.Hash{}, beaconhttp.NewEndpointError(http.StatusInternalServerError, errors.New("cannot parse block id")) } return } diff --git a/cl/beacon/handler/builder.go b/cl/beacon/handler/builder.go index 9f7a6e0e1da..649f58dbd4c 100644 --- a/cl/beacon/handler/builder.go +++ b/cl/beacon/handler/builder.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "net/http" libcommon "github.com/erigontech/erigon-lib/common" @@ -53,17 +53,17 @@ func (a *ApiHandler) GetEth1V1BuilderStatesExpectedWithdrawals(w http.ResponseWr return nil, err } if slot == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("state not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("state not found")) } if a.beaconChainCfg.GetCurrentStateVersion(*slot/a.beaconChainCfg.SlotsPerEpoch) < clparams.CapellaVersion { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("the specified state is not a capella state")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("the specified state is not a capella state")) } headRoot, _, err := a.forkchoiceStore.GetHead() if err != nil { return nil, err } if a.syncedData.Syncing() { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("beacon node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("beacon node is syncing")) } if root == headRoot { return newBeaconResponse(state.ExpectedWithdrawals(a.syncedData.HeadState(), state.Epoch(a.syncedData.HeadState()))).WithFinalized(false), nil @@ -71,7 +71,7 @@ func (a *ApiHandler) GetEth1V1BuilderStatesExpectedWithdrawals(w http.ResponseWr lookAhead := 1024 for currSlot := *slot + 1; currSlot < *slot+uint64(lookAhead); currSlot++ { if currSlot > a.syncedData.HeadSlot() { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("state not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("state not found")) } blockRoot, err := beacon_indicies.ReadCanonicalBlockRoot(tx, currSlot) if err != nil { @@ -87,7 +87,7 @@ func (a *ApiHandler) GetEth1V1BuilderStatesExpectedWithdrawals(w http.ResponseWr return newBeaconResponse(blk.Block.Body.ExecutionPayload.Withdrawals).WithFinalized(false).WithOptimistic(isOptimistic), nil } - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("state not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("state not found")) } func (a *ApiHandler) PostEthV1BuilderRegisterValidator(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { @@ -96,7 +96,7 @@ func (a *ApiHandler) PostEthV1BuilderRegisterValidator(w http.ResponseWriter, r return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err) } if len(registerReq) == 0 { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("empty request")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("empty request")) } if err := a.builderClient.RegisterValidator(r.Context(), registerReq); err != nil { return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, err) diff --git a/cl/beacon/handler/committees.go b/cl/beacon/handler/committees.go index 7eb38c20a6e..3866d31e9a3 100644 --- a/cl/beacon/handler/committees.go +++ b/cl/beacon/handler/committees.go @@ -17,6 +17,7 @@ package handler import ( + "errors" "fmt" "net/http" "strconv" @@ -89,7 +90,7 @@ func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*bea // non-finality case s := a.syncedData.HeadState() if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) } if epoch > state.Epoch(s)+1 { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch)) diff --git a/cl/beacon/handler/duties_attester.go b/cl/beacon/handler/duties_attester.go index 89aac15efbb..b6804596f3c 100644 --- a/cl/beacon/handler/duties_attester.go +++ b/cl/beacon/handler/duties_attester.go @@ -18,6 +18,7 @@ package handler import ( "encoding/json" + "errors" "fmt" "net/http" "strconv" @@ -63,7 +64,7 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( } s := a.syncedData.HeadState() if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) } dependentRoot := a.getDependentRoot(s, epoch) @@ -101,7 +102,7 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( // non-finality case if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) } if epoch > state.Epoch(s)+3 { diff --git a/cl/beacon/handler/duties_proposer.go b/cl/beacon/handler/duties_proposer.go index e71a5c1fd2b..6c513ff551b 100644 --- a/cl/beacon/handler/duties_proposer.go +++ b/cl/beacon/handler/duties_proposer.go @@ -19,7 +19,7 @@ package handler import ( "crypto/sha256" "encoding/binary" - "fmt" + "errors" "net/http" "sync" @@ -45,7 +45,7 @@ func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) ( } s := a.syncedData.HeadState() if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("node is syncing")) } dependentRoot := a.getDependentRoot(s, epoch) if epoch < a.forkchoiceStore.FinalizedCheckpoint().Epoch() { @@ -60,7 +60,7 @@ func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) ( return nil, err } if len(indiciesBytes) != int(a.beaconChainCfg.SlotsPerEpoch*4) { - return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, fmt.Errorf("proposer duties is corrupted")) + return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, errors.New("proposer duties is corrupted")) } duties := make([]proposerDuties, a.beaconChainCfg.SlotsPerEpoch) for i := uint64(0); i < a.beaconChainCfg.SlotsPerEpoch; i++ { @@ -86,7 +86,7 @@ func (a *ApiHandler) getDutiesProposer(w http.ResponseWriter, r *http.Request) ( // We need to compute our duties state := a.syncedData.HeadState() if state == nil { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("beacon node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("beacon node is syncing")) } diff --git a/cl/beacon/handler/events.go b/cl/beacon/handler/events.go index 6ca1448ac6b..9eecd2c1d32 100644 --- a/cl/beacon/handler/events.go +++ b/cl/beacon/handler/events.go @@ -17,67 +17,103 @@ package handler import ( - "bytes" "encoding/json" "fmt" "net/http" - "sync" - - "github.com/gfx-labs/sse" + "time" + mapset "github.com/deckarep/golang-set/v2" "github.com/erigontech/erigon-lib/log/v3" + event "github.com/erigontech/erigon/cl/beacon/beaconevents" ) -var validTopics = map[string]struct{}{ - "head": {}, - "block": {}, - "attestation": {}, - "voluntary_exit": {}, - "bls_to_execution_change": {}, - "finalized_checkpoint": {}, - "chain_reorg": {}, - "contribution_and_proof": {}, - "light_client_finality_update": {}, - "light_client_optimistic_update": {}, - "payload_attributes": {}, - "*": {}, +var validTopics = map[event.EventTopic]struct{}{ + // operation events + event.OpAttestation: {}, + event.OpAttesterSlashing: {}, + event.OpBlobSidecar: {}, + event.OpBlsToExecution: {}, + event.OpContributionProof: {}, + event.OpProposerSlashing: {}, + event.OpVoluntaryExit: {}, + // state events + event.StateBlock: {}, + event.StateBlockGossip: {}, + event.StateChainReorg: {}, + event.StateFinalityUpdate: {}, + event.StateFinalizedCheckpoint: {}, + event.StateHead: {}, + event.StateOptimisticUpdate: {}, + event.StatePayloadAttributes: {}, } func (a *ApiHandler) EventSourceGetV1Events(w http.ResponseWriter, r *http.Request) { - sink, err := sse.DefaultUpgrader.Upgrade(w, r) - if err != nil { - http.Error(w, "failed to upgrade", http.StatusInternalServerError) + if _, ok := w.(http.Flusher); !ok { + http.Error(w, "streaming unsupported", http.StatusBadRequest) + return } + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Connection", "keep-alive") + topics := r.URL.Query()["topics"] + subscribeTopics := mapset.NewSet[event.EventTopic]() for _, v := range topics { - if _, ok := validTopics[v]; !ok { - http.Error(w, fmt.Sprintf("invalid Topic: %s", v), http.StatusBadRequest) + topic := event.EventTopic(v) + if _, ok := validTopics[topic]; !ok { + http.Error(w, "invalid Topic: "+v, http.StatusBadRequest) + return } + subscribeTopics.Add(topic) } - var mu sync.Mutex - closer, err := a.emitters.Subscribe(topics, func(topic string, item any) { - buf := &bytes.Buffer{} - err := json.NewEncoder(buf).Encode(item) - if err != nil { - // return early + log.Info("Subscribed to topics", "topics", subscribeTopics) + + eventCh := make(chan *event.EventStream, 128) + opSub := a.emitters.Operation().Subscribe(eventCh) + stateSub := a.emitters.State().Subscribe(eventCh) + defer opSub.Unsubscribe() + defer stateSub.Unsubscribe() + + ticker := time.NewTicker(time.Duration(a.beaconChainCfg.SecondsPerSlot) * time.Second) + defer ticker.Stop() + + for { + select { + case event := <-eventCh: + if !subscribeTopics.Contains(event.Event) { + continue + } + if event.Data == nil { + log.Warn("event data is nil", "event", event) + continue + } + // marshal and send + buf, err := json.Marshal(event.Data) + if err != nil { + log.Warn("failed to encode data", "err", err, "topic", event.Event) + continue + } + if _, err := fmt.Fprintf(w, "event: %s\ndata: %s\n\n", event.Event, string(buf)); err != nil { + log.Warn("failed to write event", "err", err) + continue + } + w.(http.Flusher).Flush() + case <-ticker.C: + // keep connection alive + if _, err := w.Write([]byte(":\n\n")); err != nil { + log.Warn("failed to write keep alive", "err", err) + continue + } + w.(http.Flusher).Flush() + case err := <-stateSub.Err(): + log.Warn("event error", "err", err) + http.Error(w, fmt.Sprintf("event error %v", err), http.StatusInternalServerError) + case err := <-opSub.Err(): + log.Warn("event error", "err", err) + http.Error(w, fmt.Sprintf("event error %v", err), http.StatusInternalServerError) + return + case <-r.Context().Done(): + log.Info("Client disconnected") return } - mu.Lock() - err = sink.Encode(&sse.Event{ - Event: []byte(topic), - Data: buf, - }) - mu.Unlock() - if err != nil { - log.Error("failed to encode data", "topic", topic, "err", err) - } - // OK to ignore this error. maybe should log it later? - }) - if err != nil { - http.Error(w, "failed to subscribe", http.StatusInternalServerError) - return } - defer closer() - <-r.Context().Done() - } diff --git a/cl/beacon/handler/forkchoice.go b/cl/beacon/handler/forkchoice.go index e5cbfe34687..2563f79d52b 100644 --- a/cl/beacon/handler/forkchoice.go +++ b/cl/beacon/handler/forkchoice.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "net/http" "strconv" @@ -27,7 +27,7 @@ import ( func (a *ApiHandler) GetEthV2DebugBeaconHeads(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { if a.syncedData.Syncing() { - return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, fmt.Errorf("beacon node is syncing")) + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, errors.New("beacon node is syncing")) } hash, slotNumber, err := a.forkchoiceStore.GetHead() if err != nil { diff --git a/cl/beacon/handler/handler.go b/cl/beacon/handler/handler.go index ef6cc8fc34f..2bca0cdfff3 100644 --- a/cl/beacon/handler/handler.go +++ b/cl/beacon/handler/handler.go @@ -35,6 +35,7 @@ import ( "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" + "github.com/erigontech/erigon/cl/monitor" "github.com/erigontech/erigon/cl/persistence/blob_storage" "github.com/erigontech/erigon/cl/persistence/state/historical_states_reader" "github.com/erigontech/erigon/cl/phase1/core/state/lru" @@ -82,7 +83,7 @@ type ApiHandler struct { // caches lighthouseInclusionCache sync.Map - emitters *beaconevents.Emitters + emitters *beaconevents.EventEmitter routerCfg *beacon_router_configuration.RouterConfiguration logger log.Logger @@ -105,6 +106,7 @@ type ApiHandler struct { blsToExecutionChangeService services.BLSToExecutionChangeService proposerSlashingService services.ProposerSlashingService builderClient builder.BuilderClient + validatorsMonitor monitor.ValidatorMonitor } func NewApiHandler( @@ -121,7 +123,7 @@ func NewApiHandler( sentinel sentinel.SentinelClient, version string, routerCfg *beacon_router_configuration.RouterConfiguration, - emitters *beaconevents.Emitters, + emitters *beaconevents.EventEmitter, blobStoage blob_storage.BlobStorage, caplinSnapshots *freezeblocks.CaplinSnapshots, validatorParams *validator_params.ValidatorParams, @@ -138,6 +140,7 @@ func NewApiHandler( blsToExecutionChangeService services.BLSToExecutionChangeService, proposerSlashingService services.ProposerSlashingService, builderClient builder.BuilderClient, + validatorMonitor monitor.ValidatorMonitor, ) *ApiHandler { blobBundles, err := lru.New[common.Bytes48, BlobBundle]("blobs", maxBlobBundleCacheSize) if err != nil { @@ -179,6 +182,7 @@ func NewApiHandler( blsToExecutionChangeService: blsToExecutionChangeService, proposerSlashingService: proposerSlashingService, builderClient: builderClient, + validatorsMonitor: validatorMonitor, } } @@ -332,7 +336,7 @@ func (a *ApiHandler) init() { } if a.routerCfg.Validator { r.Route("/validator", func(r chi.Router) { - r.Post("/blocks/{slot}", http.NotFound) + r.Get("/blocks/{slot}", beaconhttp.HandleEndpointFunc(a.GetEthV3ValidatorBlock)) // deprecate }) } }) diff --git a/cl/beacon/handler/lightclient.go b/cl/beacon/handler/lightclient.go index f14a58814b3..82c36931632 100644 --- a/cl/beacon/handler/lightclient.go +++ b/cl/beacon/handler/lightclient.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "net/http" "github.com/erigontech/erigon/cl/beacon/beaconhttp" @@ -45,7 +45,7 @@ func (a *ApiHandler) GetEthV1BeaconLightClientBootstrap(w http.ResponseWriter, r bootstrap, ok := a.forkchoiceStore.GetLightClientBootstrap(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("bootstrap object evicted")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("bootstrap object evicted")) } return newBeaconResponse(bootstrap).WithVersion(bootstrap.Header.Version()), nil } @@ -53,7 +53,7 @@ func (a *ApiHandler) GetEthV1BeaconLightClientBootstrap(w http.ResponseWriter, r func (a *ApiHandler) GetEthV1BeaconLightClientOptimisticUpdate(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { update := a.forkchoiceStore.NewestLightClientUpdate() if update == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no optimistic update loaded yet, try again later. it may take a few minutes for it to load.")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no optimistic update loaded yet, try again later. it may take a few minutes for it to load.")) } version := update.AttestedHeader.Version() return newBeaconResponse(&cltypes.LightClientOptimisticUpdate{ @@ -66,7 +66,7 @@ func (a *ApiHandler) GetEthV1BeaconLightClientOptimisticUpdate(w http.ResponseWr func (a *ApiHandler) GetEthV1BeaconLightClientFinalityUpdate(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { update := a.forkchoiceStore.NewestLightClientUpdate() if update == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("no finility update loaded yet, try again later. it may take a few minutes for it to load.")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no finility update loaded yet, try again later. it may take a few minutes for it to load.")) } version := update.AttestedHeader.Version() return newBeaconResponse(&cltypes.LightClientFinalityUpdate{ diff --git a/cl/beacon/handler/lighthouse.go b/cl/beacon/handler/lighthouse.go index c120fcd4f48..201c98c1c6d 100644 --- a/cl/beacon/handler/lighthouse.go +++ b/cl/beacon/handler/lighthouse.go @@ -17,7 +17,7 @@ package handler import ( - "fmt" + "errors" "net/http" "github.com/erigontech/erigon-lib/common" @@ -84,32 +84,32 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter } activeBalance, ok := a.forkchoiceStore.TotalActiveBalance(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("active balance not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("active balance not found for current epoch")) } prevActiveBalance, ok := a.forkchoiceStore.TotalActiveBalance(prevRoot) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("active balance not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("active balance not found for previous epoch")) } validatorSet, err := a.forkchoiceStore.GetValidatorSet(root) if err != nil { return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator set not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } currentEpochPartecipation, err := a.forkchoiceStore.GetCurrentPartecipationIndicies(root) if err != nil { return nil, err } if currentEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for current epoch")) } previousEpochPartecipation, err := a.forkchoiceStore.GetPreviousPartecipationIndicies(root) if err != nil { return nil, err } if previousEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for previous epoch")) } return newBeaconResponse(a.computeLighthouseValidatorInclusionGlobal(epoch, activeBalance, prevActiveBalance, validatorSet, currentEpochPartecipation, previousEpochPartecipation)), nil } @@ -120,14 +120,14 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter return nil, err } if epochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch data not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for current epoch")) } prevEpochData, err := state_accessors.ReadEpochData(tx, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if prevEpochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch data not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for previous epoch")) } // read the validator set validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, slot) @@ -135,17 +135,17 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator set not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } currentEpochPartecipation, previousEpochPartecipation, err := a.stateReader.ReadPartecipations(tx, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) if err != nil { return nil, err } if currentEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for current epoch")) } if previousEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for previous epoch")) } return newBeaconResponse(a.computeLighthouseValidatorInclusionGlobal(epoch, epochData.TotalActiveBalance, prevEpochData.TotalActiveBalance, validatorSet, currentEpochPartecipation, previousEpochPartecipation)), nil } @@ -242,32 +242,32 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h } activeBalance, ok := a.forkchoiceStore.TotalActiveBalance(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("active balance not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("active balance not found for current epoch")) } prevActiveBalance, ok := a.forkchoiceStore.TotalActiveBalance(prevRoot) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("active balance not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("active balance not found for previous epoch")) } validatorSet, err := a.forkchoiceStore.GetValidatorSet(root) if err != nil { return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator set not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } currentEpochPartecipation, err := a.forkchoiceStore.GetCurrentPartecipationIndicies(root) if err != nil { return nil, err } if currentEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for current epoch")) } previousEpochPartecipation, err := a.forkchoiceStore.GetPreviousPartecipationIndicies(root) if err != nil { return nil, err } if previousEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for previous epoch")) } return newBeaconResponse(a.computeLighthouseValidatorInclusion(int(validatorIndex), prevEpoch, epoch, activeBalance, prevActiveBalance, validatorSet, currentEpochPartecipation, previousEpochPartecipation)), nil } @@ -278,14 +278,14 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h return nil, err } if epochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch data not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for current epoch")) } prevEpochData, err := state_accessors.ReadEpochData(tx, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if prevEpochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("epoch data not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for previous epoch")) } // read the validator set validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, slot) @@ -293,17 +293,17 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h return nil, err } if validatorSet == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator set not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } currentEpochPartecipation, previousEpochPartecipation, err := a.stateReader.ReadPartecipations(tx, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) if err != nil { return nil, err } if currentEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for current epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for current epoch")) } if previousEpochPartecipation == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("partecipation not found for previous epoch")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("partecipation not found for previous epoch")) } return newBeaconResponse(a.computeLighthouseValidatorInclusion(int(validatorIndex), prevEpoch, epoch, epochData.TotalActiveBalance, prevEpochData.TotalActiveBalance, validatorSet, currentEpochPartecipation, previousEpochPartecipation)), nil } diff --git a/cl/beacon/handler/rewards.go b/cl/beacon/handler/rewards.go index 01c904d9be0..398a4565f46 100644 --- a/cl/beacon/handler/rewards.go +++ b/cl/beacon/handler/rewards.go @@ -18,7 +18,7 @@ package handler import ( "encoding/json" - "fmt" + "errors" "io" "net/http" "sort" @@ -62,7 +62,7 @@ func (a *ApiHandler) GetEthV1BeaconRewardsBlocks(w http.ResponseWriter, r *http. return nil, err } if blk == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("block not found")) } slot := blk.Header.Slot isFinalized := slot <= a.forkchoiceStore.FinalizedSlot() @@ -70,7 +70,7 @@ func (a *ApiHandler) GetEthV1BeaconRewardsBlocks(w http.ResponseWriter, r *http. // finalized case blkRewards, ok := a.forkchoiceStore.BlockRewards(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("block not found")) } return newBeaconResponse(blockRewardsResponse{ ProposerIndex: blk.Header.ProposerIndex, @@ -86,7 +86,7 @@ func (a *ApiHandler) GetEthV1BeaconRewardsBlocks(w http.ResponseWriter, r *http. return nil, err } if slotData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read historical block rewards, node may not be archive or it still processing historical states")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("could not read historical block rewards, node may not be archive or it still processing historical states")) } return newBeaconResponse(blockRewardsResponse{ ProposerIndex: blk.Header.ProposerIndex, @@ -142,12 +142,12 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, return nil, err } if blk == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("block not found")) } slot := blk.Block.Slot version := a.beaconChainCfg.GetCurrentStateVersion(blk.Block.Slot / a.beaconChainCfg.SlotsPerEpoch) if version < clparams.AltairVersion { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("sync committee rewards not available before Altair fork")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("sync committee rewards not available before Altair fork")) } // retrieve the state we need ----------------------------------------------- // We need: @@ -167,14 +167,14 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, ) if isFinalized { if !isCanonical { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("non-canonical finalized block not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("non-canonical finalized block not found")) } epochData, err := state_accessors.ReadEpochData(tx, blk.Block.Slot) if err != nil { return nil, err } if epochData == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read historical sync committee rewards, node may not be archive or it still processing historical states")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("could not read historical sync committee rewards, node may not be archive or it still processing historical states")) } totalActiveBalance = epochData.TotalActiveBalance syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(blk.Block.Slot)) @@ -182,17 +182,17 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, return nil, err } if syncCommittee == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read historical sync committee, node may not be archive or it still processing historical states")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("could not read historical sync committee, node may not be archive or it still processing historical states")) } } else { var ok bool syncCommittee, _, ok = a.forkchoiceStore.GetSyncCommittees(a.beaconChainCfg.SyncCommitteePeriod(slot)) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("non-finalized sync committee not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("non-finalized sync committee not found")) } totalActiveBalance, ok = a.forkchoiceStore.TotalActiveBalance(root) if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("non-finalized total active balance not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("non-finalized total active balance not found")) } } committee := syncCommittee.GetCommittee() @@ -217,7 +217,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, return nil, err } if !ok { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("sync committee public key not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("sync committee public key not found")) } if len(filterIndiciesSet) > 0 { if _, ok := filterIndiciesSet[idx]; !ok { diff --git a/cl/beacon/handler/states.go b/cl/beacon/handler/states.go index 2654fb4ee80..17c0d4662c8 100644 --- a/cl/beacon/handler/states.go +++ b/cl/beacon/handler/states.go @@ -18,6 +18,7 @@ package handler import ( "context" + "errors" "fmt" "net/http" "strconv" @@ -53,7 +54,7 @@ func (a *ApiHandler) blockRootFromStateId(ctx context.Context, tx kv.Tx, stateId return libcommon.Hash{}, http.StatusInternalServerError, err } if root == (libcommon.Hash{}) { - return libcommon.Hash{}, http.StatusNotFound, fmt.Errorf("genesis block not found") + return libcommon.Hash{}, http.StatusNotFound, errors.New("genesis block not found") } return case stateId.GetSlot() != nil: @@ -72,7 +73,7 @@ func (a *ApiHandler) blockRootFromStateId(ctx context.Context, tx kv.Tx, stateId } return default: - return libcommon.Hash{}, http.StatusInternalServerError, fmt.Errorf("cannot parse state id") + return libcommon.Hash{}, http.StatusInternalServerError, errors.New("cannot parse state id") } } @@ -345,7 +346,7 @@ func (a *ApiHandler) getSyncCommittees(w http.ResponseWriter, r *http.Request) ( if requestPeriod == statePeriod+1 { committee = nextSyncCommittee.GetCommittee() } else if requestPeriod != statePeriod { - return nil, fmt.Errorf("epoch is outside the sync committee period of the state") + return nil, errors.New("epoch is outside the sync committee period of the state") } } // Lastly construct the response diff --git a/cl/beacon/handler/subscription.go b/cl/beacon/handler/subscription.go index 9eb1e5e4fa2..d8f9e652aa0 100644 --- a/cl/beacon/handler/subscription.go +++ b/cl/beacon/handler/subscription.go @@ -19,6 +19,7 @@ package handler import ( "context" "encoding/json" + "errors" "fmt" "net/http" "strconv" @@ -118,15 +119,15 @@ func parseSyncCommitteeContribution(r *http.Request) (slot, subcommitteeIndex ui blockRootStr := r.URL.Query().Get("beacon_block_root") // check if they required fields are present if slotStr == "" { - err = fmt.Errorf("slot as query param is required") + err = errors.New("slot as query param is required") return } if subCommitteeIndexStr == "" { - err = fmt.Errorf("subcommittee_index as query param is required") + err = errors.New("subcommittee_index as query param is required") return } if blockRootStr == "" { - err = fmt.Errorf("beacon_block_root as query param is required") + err = errors.New("beacon_block_root as query param is required") return } slot, err = strconv.ParseUint(slotStr, 10, 64) diff --git a/cl/beacon/handler/utils_test.go b/cl/beacon/handler/utils_test.go index 8b2636ea70c..e33799e7d08 100644 --- a/cl/beacon/handler/utils_test.go +++ b/cl/beacon/handler/utils_test.go @@ -38,6 +38,7 @@ import ( "github.com/erigontech/erigon/cl/clparams/initial_state" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" + mockMonitor "github.com/erigontech/erigon/cl/monitor/mock_services" "github.com/erigontech/erigon/cl/persistence/blob_storage" state_accessors "github.com/erigontech/erigon/cl/persistence/state" "github.com/erigontech/erigon/cl/persistence/state/historical_states_reader" @@ -109,6 +110,7 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge voluntaryExitService := mock_services.NewMockVoluntaryExitService(ctrl) blsToExecutionChangeService := mock_services.NewMockBLSToExecutionChangeService(ctrl) proposerSlashingService := mock_services.NewMockProposerSlashingService(ctrl) + mockValidatorMonitor := mockMonitor.NewMockValidatorMonitor(ctrl) // ctx context.Context, subnetID *uint64, msg *cltypes.SyncCommitteeMessage) error syncCommitteeMessagesService.EXPECT().ProcessMessage(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, subnetID *uint64, msg *cltypes.SyncCommitteeMessage) error { @@ -134,6 +136,7 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge opPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(msg), msg) return nil }).AnyTimes() + mockValidatorMonitor.EXPECT().ObserveValidator(gomock.Any()).AnyTimes() vp = validator_params.NewValidatorParams() h = NewApiHandler( @@ -166,6 +169,7 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge blsToExecutionChangeService, proposerSlashingService, nil, + mockValidatorMonitor, ) // TODO: add tests h.Init() return diff --git a/cl/beacon/handler/validator_registration.go b/cl/beacon/handler/validator_registration.go index 89291b8d8ab..8e235c54707 100644 --- a/cl/beacon/handler/validator_registration.go +++ b/cl/beacon/handler/validator_registration.go @@ -38,6 +38,7 @@ func (a *ApiHandler) PostEthV1ValidatorPrepareBeaconProposal(w http.ResponseWrit for _, v := range req { a.logger.Debug("[Caplin] Registered new validator", "index", v.ValidatorIndex, "fee_recipient", v.FeeRecipient.String()) a.validatorParams.SetFeeRecipient(v.ValidatorIndex, v.FeeRecipient) + a.validatorsMonitor.ObserveValidator(v.ValidatorIndex) } w.WriteHeader(http.StatusOK) } diff --git a/cl/beacon/handler/validator_test.go b/cl/beacon/handler/validator_test.go index 5a892090e08..9cbd11e20d7 100644 --- a/cl/beacon/handler/validator_test.go +++ b/cl/beacon/handler/validator_test.go @@ -74,6 +74,7 @@ func (t *validatorTestSuite) SetupTest() { nil, nil, nil, + nil, ) t.gomockCtrl = gomockCtrl } diff --git a/cl/beacon/handler/validators.go b/cl/beacon/handler/validators.go index a1f2b51d149..0c2ba41d6a5 100644 --- a/cl/beacon/handler/validators.go +++ b/cl/beacon/handler/validators.go @@ -180,7 +180,7 @@ func parseStatuses(s []string) ([]validatorStatus, error) { statuses := make([]validatorStatus, 0, len(s)) if len(s) > maxValidatorsLookupFilter { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("too many statuses requested")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("too many statuses requested")) } for _, status := range s { @@ -208,7 +208,7 @@ func checkValidValidatorId(s string) (bool, error) { } // If it is not 0x prefixed, then it must be a number, check if it is a base-10 number if _, err := strconv.ParseUint(s, 10, 64); err != nil { - return false, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("invalid validator id")) + return false, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("invalid validator id")) } return false, nil } @@ -248,7 +248,7 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidators(w http.ResponseWriter, r *ht } if len(validatorIds) > maxValidatorsLookupFilter { - http.Error(w, fmt.Errorf("too many validators requested").Error(), http.StatusBadRequest) + http.Error(w, errors.New("too many validators requested").Error(), http.StatusBadRequest) return } a.writeValidatorsResponse(w, r, tx, blockId, blockRoot, validatorIds, queryFilters) @@ -288,7 +288,7 @@ func (a *ApiHandler) PostEthV1BeaconStatesValidators(w http.ResponseWriter, r *h } if len(req.Ids) > maxValidatorsLookupFilter { - http.Error(w, fmt.Errorf("too many validators requested").Error(), http.StatusBadRequest) + http.Error(w, errors.New("too many validators requested").Error(), http.StatusBadRequest) return } @@ -320,7 +320,7 @@ func (a *ApiHandler) writeValidatorsResponse( if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. s := a.syncedData.HeadState() if s == nil { - http.Error(w, fmt.Errorf("node is not synced").Error(), http.StatusServiceUnavailable) + http.Error(w, errors.New("node is not synced").Error(), http.StatusServiceUnavailable) return } responseValidators(w, filterIndicies, statusFilters, state.Epoch(s), s.Balances(), s.Validators(), false, isOptimistic) @@ -333,7 +333,7 @@ func (a *ApiHandler) writeValidatorsResponse( } if slot == nil { - http.Error(w, fmt.Errorf("state not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("state not found").Error(), http.StatusNotFound) return } stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch @@ -361,7 +361,7 @@ func (a *ApiHandler) writeValidatorsResponse( return } if balances == nil { - http.Error(w, fmt.Errorf("balances not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("balances not found").Error(), http.StatusNotFound) return } validators, err := a.forkchoiceStore.GetValidatorSet(blockRoot) @@ -370,7 +370,7 @@ func (a *ApiHandler) writeValidatorsResponse( return } if validators == nil { - http.Error(w, fmt.Errorf("validators not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("validators not found").Error(), http.StatusNotFound) return } responseValidators(w, filterIndicies, statusFilters, stateEpoch, balances, validators, *slot <= a.forkchoiceStore.FinalizedSlot(), isOptimistic) @@ -398,7 +398,7 @@ func parseQueryValidatorIndex(tx kv.Tx, id string) (uint64, error) { return 0, err } if !ok { - return 0, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator not found")) + return 0, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator not found")) } return idx, nil } @@ -457,14 +457,11 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. s := a.syncedData.HeadState() if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("node is not synced")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("node is not synced")) } if s.ValidatorLength() <= int(validatorIndex) { return newBeaconResponse([]int{}).WithFinalized(false), nil } - if s == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("node is not synced")) - } return responseValidator(validatorIndex, state.Epoch(s), s.Balances(), s.Validators(), false, isOptimistic) } slot, err := beacon_indicies.ReadBlockSlotByBlockRoot(tx, blockRoot) @@ -473,7 +470,7 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt } if slot == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("state not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("state not found")) } stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch @@ -482,25 +479,32 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt if err != nil { return nil, err } + if validatorSet == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validators not found")) + } balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) if err != nil { return nil, err } + if balances == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("balances not found")) + } return responseValidator(validatorIndex, stateEpoch, balances, validatorSet, true, isOptimistic) } + balances, err := a.forkchoiceStore.GetBalances(blockRoot) if err != nil { return nil, err } if balances == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("balances not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("balances not found")) } validators, err := a.forkchoiceStore.GetValidatorSet(blockRoot) if err != nil { return nil, err } if validators == nil { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validators not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validators not found")) } return responseValidator(validatorIndex, stateEpoch, balances, validators, *slot <= a.forkchoiceStore.FinalizedSlot(), isOptimistic) } @@ -521,7 +525,7 @@ func (a *ApiHandler) PostEthV1BeaconValidatorsBalances(w http.ResponseWriter, r } if len(validatorIds) > maxValidatorsLookupFilter { - http.Error(w, fmt.Errorf("too many validators requested").Error(), http.StatusBadRequest) + http.Error(w, errors.New("too many validators requested").Error(), http.StatusBadRequest) return } @@ -543,7 +547,7 @@ func (a *ApiHandler) GetEthV1BeaconValidatorsBalances(w http.ResponseWriter, r * } if len(validatorIds) > maxValidatorsLookupFilter { - http.Error(w, fmt.Errorf("too many validators requested").Error(), http.StatusBadRequest) + http.Error(w, errors.New("too many validators requested").Error(), http.StatusBadRequest) return } a.getValidatorBalances(r.Context(), w, blockId, validatorIds) @@ -574,7 +578,7 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. s := a.syncedData.HeadState() if s == nil { - http.Error(w, fmt.Errorf("node is not synced").Error(), http.StatusServiceUnavailable) + http.Error(w, errors.New("node is not synced").Error(), http.StatusServiceUnavailable) return } responseValidatorsBalances(w, filterIndicies, s.Balances(), false, isOptimistic) @@ -587,7 +591,7 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr } if slot == nil { - http.Error(w, fmt.Errorf("state not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("state not found").Error(), http.StatusNotFound) return } @@ -599,7 +603,7 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr } if balances == nil { - http.Error(w, fmt.Errorf("validators not found, node may node be running in archivial node").Error(), http.StatusNotFound) + http.Error(w, errors.New("validators not found, node may node be running in archivial node").Error(), http.StatusNotFound) } responseValidatorsBalances(w, filterIndicies, balances, true, isOptimistic) return @@ -610,7 +614,7 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr return } if balances == nil { - http.Error(w, fmt.Errorf("balances not found").Error(), http.StatusNotFound) + http.Error(w, errors.New("balances not found").Error(), http.StatusNotFound) return } responseValidatorsBalances(w, filterIndicies, balances, *slot <= a.forkchoiceStore.FinalizedSlot(), isOptimistic) @@ -692,7 +696,7 @@ func responseValidator(idx uint64, stateEpoch uint64, balances solid.Uint64ListS return newBeaconResponse([]int{}).WithFinalized(finalized), nil } if idx >= uint64(validators.Length()) { - return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("validator not found")) + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator not found")) } v := validators.Get(int(idx)) @@ -772,11 +776,11 @@ func shouldStatusBeFiltered(status validatorStatus, statuses []validatorStatus) func (a *ApiHandler) GetEthV1ValidatorAggregateAttestation(w http.ResponseWriter, r *http.Request) (*beaconhttp.BeaconResponse, error) { attDataRoot := r.URL.Query().Get("attestation_data_root") if attDataRoot == "" { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("attestation_data_root is required")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("attestation_data_root is required")) } slot := r.URL.Query().Get("slot") if slot == "" { - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("slot is required")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("slot is required")) } slotNum, err := strconv.ParseUint(slot, 10, 64) if err != nil { @@ -790,7 +794,7 @@ func (a *ApiHandler) GetEthV1ValidatorAggregateAttestation(w http.ResponseWriter } if slotNum != att.AttestantionData().Slot() { log.Debug("attestation slot does not match", "attestation_data_root", attDataRoot, "slot_inquire", slot) - return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("attestation slot mismatch")) + return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, errors.New("attestation slot mismatch")) } return newBeaconResponse(att), nil diff --git a/cl/clparams/config.go b/cl/clparams/config.go index 12a4a8e49b5..179d6457e68 100644 --- a/cl/clparams/config.go +++ b/cl/clparams/config.go @@ -18,12 +18,14 @@ package clparams import ( "crypto/rand" + "errors" "fmt" "math" "math/big" mathrand "math/rand" "os" "path" + "strconv" "time" "gopkg.in/yaml.v2" @@ -34,14 +36,21 @@ import ( "github.com/erigontech/erigon/cl/utils" ) +var LatestStateFileName = "latest.ssz_snappy" + type CaplinConfig struct { Backfilling bool BlobBackfilling bool BlobPruningDisabled bool Archive bool + NetworkId NetworkType + // DisableCheckpointSync is optional and is used to disable checkpoint sync used by default in the node + DisabledCheckpointSync bool // CaplinMeVRelayUrl is optional and is used to connect to the external builder service. // If it's set, the node will start in builder mode MevRelayUrl string + // EnableValidatorMonitor is used to enable the validator monitor metrics and corresponding logs + EnableValidatorMonitor bool } func (c CaplinConfig) RelayUrlExist() bool { @@ -431,6 +440,8 @@ type BeaconChainConfig struct { CapellaForkEpoch uint64 `yaml:"CAPELLA_FORK_EPOCH" spec:"true" json:"CAPELLA_FORK_EPOCH,string"` // CapellaForkEpoch is used to represent the assigned fork epoch for Capella. DenebForkVersion ConfigForkVersion `yaml:"DENEB_FORK_VERSION" spec:"true" json:"DENEB_FORK_VERSION"` // DenebForkVersion is used to represent the fork version for Deneb. DenebForkEpoch uint64 `yaml:"DENEB_FORK_EPOCH" spec:"true" json:"DENEB_FORK_EPOCH,string"` // DenebForkEpoch is used to represent the assigned fork epoch for Deneb. + ElectraForkVersion ConfigForkVersion `yaml:"ELECTRA_FORK_VERSION" spec:"true" json:"ELECTRA_FORK_VERSION"` // ElectraForkVersion is used to represent the fork version for Electra. + ElectraForkEpoch uint64 `yaml:"ELECTRA_FORK_EPOCH" spec:"true" json:"ELECTRA_FORK_EPOCH,string"` // ElectraForkEpoch is used to represent the assigned fork epoch for Electra. ForkVersionSchedule map[libcommon.Bytes4]uint64 `json:"-"` // Schedule of fork epochs by version. ForkVersionNames map[libcommon.Bytes4]string `json:"-"` // Human-readable names of fork versions. @@ -484,6 +495,22 @@ type BeaconChainConfig struct { MaxBlobGasPerBlock uint64 `yaml:"MAX_BLOB_GAS_PER_BLOCK" json:"MAX_BLOB_GAS_PER_BLOCK,string"` // MaxBlobGasPerBlock defines the maximum gas limit for blob sidecar per block. MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" json:"MAX_BLOBS_PER_BLOCK,string"` // MaxBlobsPerBlock defines the maximum number of blobs per block. + // Whisk + WhiskEpochsPerShufflingPhase uint64 `yaml:"WHISK_EPOCHS_PER_SHUFFLING_PHASE" spec:"true" json:"WHISK_EPOCHS_PER_SHUFFLING_PHASE,string"` // WhiskEpochsPerShufflingPhase defines the number of epochs per shuffling phase. + WhiskProposerSelectionGap uint64 `yaml:"WHISK_PROPOSER_SELECTION_GAP" spec:"true" json:"WHISK_PROPOSER_SELECTION_GAP,string"` // WhiskProposerSelectionGap defines the proposer selection gap. + + // EIP7594 + NumberOfColumns uint64 `yaml:"NUMBER_OF_COLUMNS" spec:"true" json:"NUMBER_OF_COLUMNS,string"` // NumberOfColumns defines the number of columns in the extended matrix. + MaxCellsInExtendedMatrix uint64 `yaml:"MAX_CELLS_IN_EXTENDED_MATRIX" spec:"true" json:"MAX_CELLS_IN_EXTENDED_MATRIX,string"` // MaxCellsInExtendedMatrix defines the maximum number of cells in the extended matrix. + DataColumnSidecarSubnetCount uint64 `yaml:"DATA_COLUMN_SIDECAR_SUBNET_COUNT" spec:"true" json:"DATA_COLUMN_SIDECAR_SUBNET_COUNT,string"` // DataColumnSidecarSubnetCount defines the number of sidecars in the data column subnet. + MaxRequestDataColumnSidecars uint64 `yaml:"MAX_REQUEST_DATA_COLUMN_SIDECARS" spec:"true" json:"MAX_REQUEST_DATA_COLUMN_SIDECARS,string"` // MaxRequestDataColumnSidecars defines the maximum number of data column sidecars that can be requested. + SamplesPerSlot uint64 `yaml:"SAMPLES_PER_SLOT" spec:"true" json:"SAMPLES_PER_SLOT,string"` // SamplesPerSlot defines the number of samples per slot. + CustodyRequirement uint64 `yaml:"CUSTODY_REQUIREMENT" spec:"true" json:"CUSTODY_REQUIREMENT,string"` // CustodyRequirement defines the custody requirement. + TargetNumberOfPeers uint64 `yaml:"TARGET_NUMBER_OF_PEERS" spec:"true" json:"TARGET_NUMBER_OF_PEERS,string"` // TargetNumberOfPeers defines the target number of peers. + + // Electra + MinPerEpochChurnLimitElectra uint64 `yaml:"MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA" spec:"true" json:"MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA,string"` // MinPerEpochChurnLimitElectra defines the minimum per epoch churn limit for Electra. + MaxPerEpochActivationExitChurnLimit uint64 `yaml:"MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT" spec:"true" json:"MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT,string"` // MaxPerEpochActivationExitChurnLimit defines the maximum per epoch activation exit churn limit for Electra. } func (b *BeaconChainConfig) RoundSlotToEpoch(slot uint64) uint64 { @@ -529,6 +556,7 @@ func configForkSchedule(b *BeaconChainConfig) map[libcommon.Bytes4]uint64 { fvs[utils.Uint32ToBytes4(uint32(b.BellatrixForkVersion))] = b.BellatrixForkEpoch fvs[utils.Uint32ToBytes4(uint32(b.CapellaForkVersion))] = b.CapellaForkEpoch fvs[utils.Uint32ToBytes4(uint32(b.DenebForkVersion))] = b.DenebForkEpoch + fvs[utils.Uint32ToBytes4(uint32(b.ElectraForkVersion))] = b.ElectraForkEpoch return fvs } @@ -539,6 +567,7 @@ func configForkNames(b *BeaconChainConfig) map[libcommon.Bytes4]string { fvn[utils.Uint32ToBytes4(uint32(b.BellatrixForkVersion))] = "bellatrix" fvn[utils.Uint32ToBytes4(uint32(b.CapellaForkVersion))] = "capella" fvn[utils.Uint32ToBytes4(uint32(b.DenebForkVersion))] = "deneb" + fvn[utils.Uint32ToBytes4(uint32(b.ElectraForkVersion))] = "electra" return fvn } @@ -676,6 +705,8 @@ var MainnetBeaconConfig BeaconChainConfig = BeaconChainConfig{ CapellaForkEpoch: 194048, DenebForkVersion: 0x04000000, DenebForkEpoch: 269568, + // ElectraForkVersion: Not Set, + ElectraForkEpoch: math.MaxUint64, // New values introduced in Altair hard fork 1. // Participation flag indices. @@ -724,6 +755,20 @@ var MainnetBeaconConfig BeaconChainConfig = BeaconChainConfig{ MaxBlobGasPerBlock: 786432, MaxBlobsPerBlock: 6, + + WhiskEpochsPerShufflingPhase: 256, + WhiskProposerSelectionGap: 2, + + NumberOfColumns: 128, + MaxCellsInExtendedMatrix: 768, + DataColumnSidecarSubnetCount: 32, + MaxRequestDataColumnSidecars: 16384, + SamplesPerSlot: 8, + CustodyRequirement: 1, + TargetNumberOfPeers: 70, + + MinPerEpochChurnLimitElectra: 128000000000, + MaxPerEpochActivationExitChurnLimit: 256000000000, } func mainnetConfig() BeaconChainConfig { @@ -948,6 +993,8 @@ func (b *BeaconChainConfig) GetForkVersionByVersion(v StateVersion) uint32 { return uint32(b.CapellaForkVersion) case DenebVersion: return uint32(b.DenebForkVersion) + case ElectraVersion: + return uint32(b.ElectraForkVersion) } panic("invalid version") } @@ -964,6 +1011,8 @@ func (b *BeaconChainConfig) GetForkEpochByVersion(v StateVersion) uint64 { return b.CapellaForkEpoch case DenebVersion: return b.DenebForkEpoch + case ElectraVersion: + return b.ElectraForkEpoch } panic("invalid version") } @@ -992,7 +1041,7 @@ func GetConfigsByNetworkName(net string) (*NetworkConfig, *BeaconChainConfig, Ne networkCfg, beaconCfg := GetConfigsByNetwork(HoleskyNetwork) return networkCfg, beaconCfg, HoleskyNetwork, nil default: - return nil, nil, MainnetNetwork, fmt.Errorf("chain not found") + return nil, nil, MainnetNetwork, errors.New("chain not found") } } @@ -1072,6 +1121,6 @@ func SupportBackfilling(networkId uint64) bool { } func EpochToPaths(slot uint64, config *BeaconChainConfig, suffix string) (string, string) { - folderPath := path.Clean(fmt.Sprintf("%d", slot/SubDivisionFolderSize)) + folderPath := path.Clean(strconv.FormatUint(slot/SubDivisionFolderSize, 10)) return folderPath, path.Clean(fmt.Sprintf("%s/%d.%s.sz", folderPath, slot, suffix)) } diff --git a/cl/cltypes/beacon_block.go b/cl/cltypes/beacon_block.go index aa8a9f8f063..1c8cd4aed4c 100644 --- a/cl/cltypes/beacon_block.go +++ b/cl/cltypes/beacon_block.go @@ -18,6 +18,7 @@ package cltypes import ( "encoding/json" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -369,7 +370,7 @@ func (b *BeaconBody) ExecutionPayloadMerkleProof() ([][32]byte, error) { func (b *BeaconBody) KzgCommitmentMerkleProof(index int) ([][32]byte, error) { if index >= b.BlobKzgCommitments.Len() { - return nil, fmt.Errorf("index out of range") + return nil, errors.New("index out of range") } kzgCommitmentsProof, err := merkle_tree.MerkleProof(4, 11, b.getSchema(false)...) if err != nil { diff --git a/cl/cltypes/beacon_block_blinded.go b/cl/cltypes/beacon_block_blinded.go index c9adcbad6e2..af610d90684 100644 --- a/cl/cltypes/beacon_block_blinded.go +++ b/cl/cltypes/beacon_block_blinded.go @@ -17,6 +17,7 @@ package cltypes import ( + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -76,7 +77,7 @@ func (b *SignedBlindedBeaconBlock) Clone() clonable.Clonable { func (b *SignedBlindedBeaconBlock) Unblind(blockPayload *Eth1Block) (*SignedBeaconBlock, error) { if b == nil { - return nil, fmt.Errorf("nil block") + return nil, errors.New("nil block") } // check root blindedRoot := b.Block.Body.ExecutionPayload.StateRoot diff --git a/cl/cltypes/solid/attestation.go b/cl/cltypes/solid/attestation.go index a21a64f8db5..38a4c871921 100644 --- a/cl/cltypes/solid/attestation.go +++ b/cl/cltypes/solid/attestation.go @@ -104,12 +104,16 @@ func (a *Attestation) UnmarshalJSON(buf []byte) error { // AggregationBits returns the aggregation bits buffer of the Attestation instance. func (a *Attestation) AggregationBits() []byte { - return a.aggregationBitsBuffer + buf := make([]byte, len(a.aggregationBitsBuffer)) + copy(buf, a.aggregationBitsBuffer) + return buf } // SetAggregationBits sets the aggregation bits buffer of the Attestation instance. func (a *Attestation) SetAggregationBits(bits []byte) { - a.aggregationBitsBuffer = bits + buf := make([]byte, len(bits)) + copy(buf, bits) + a.aggregationBitsBuffer = buf } // AttestantionData returns the attestation data of the Attestation instance. diff --git a/cl/cltypes/solid/hash_list.go b/cl/cltypes/solid/hash_list.go index cfca7e9bcb0..5980c73083d 100644 --- a/cl/cltypes/solid/hash_list.go +++ b/cl/cltypes/solid/hash_list.go @@ -19,7 +19,6 @@ package solid import ( "encoding/json" - "github.com/erigontech/erigon-lib/common" libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/length" "github.com/erigontech/erigon-lib/types/clonable" @@ -32,7 +31,7 @@ type hashList struct { u []byte l, c int - hashBuf + *merkle_tree.MerkleTree } func NewHashList(c int) HashListSSZ { @@ -68,6 +67,9 @@ func (arr *hashList) UnmarshalJSON(buf []byte) error { func (h *hashList) Append(val libcommon.Hash) { offset := h.l * length.Hash + if h.MerkleTree != nil { + h.MerkleTree.AppendLeaf() + } if offset == len(h.u) { h.u = append(h.u, val[:]...) h.l++ @@ -87,6 +89,7 @@ func (h *hashList) Length() int { func (h *hashList) Clear() { h.l = 0 + h.MerkleTree = nil } func (h *hashList) Clone() clonable.Clonable { @@ -100,6 +103,12 @@ func (h *hashList) CopyTo(t IterableSSZ[libcommon.Hash]) { if len(h.u) > len(tu.u) { tu.u = make([]byte, len(h.u)) } + if h.MerkleTree != nil { + if tu.MerkleTree == nil { + tu.MerkleTree = &merkle_tree.MerkleTree{} + } + h.MerkleTree.CopyInto(tu.MerkleTree) + } copy(tu.u, h.u) } @@ -111,6 +120,7 @@ func (h *hashList) DecodeSSZ(buf []byte, _ int) error { if len(buf)%length.Hash > 0 { return ssz.ErrBadDynamicLength } + h.MerkleTree = nil h.u = libcommon.Copy(buf) h.l = len(h.u) / length.Hash return nil @@ -136,43 +146,30 @@ func (h *hashList) Set(index int, newValue libcommon.Hash) { if index >= h.l { panic("too big bruh") } + if h.MerkleTree != nil { + h.MerkleTree.MarkLeafAsDirty(index) + } copy(h.u[index*length.Hash:], newValue[:]) } func (h *hashList) hashVectorSSZ() ([32]byte, error) { - depth := GetDepth(uint64(h.c)) - offset := length.Hash * h.l - elements := common.Copy(h.u[:offset]) - for i := uint8(0); i < depth; i++ { - // Sequential - if len(elements)%64 != 0 { - elements = append(elements, merkle_tree.ZeroHashes[i][:]...) - } - outputLen := len(elements) / 2 - h.makeBuf(outputLen) - if err := merkle_tree.HashByteSlice(h.buf, elements); err != nil { - return [32]byte{}, err - } - elements = h.buf + if h.MerkleTree == nil { + cap := uint64(h.c) + h.MerkleTree = &merkle_tree.MerkleTree{} + h.MerkleTree.Initialize(h.l, merkle_tree.OptimalMaxTreeCacheDepth, func(idx int, out []byte) { + copy(out, h.u[idx*length.Hash:(idx+1)*length.Hash]) + }, /*limit=*/ &cap) } - - return common.BytesToHash(elements[:32]), nil + return h.MerkleTree.ComputeRoot(), nil } func (h *hashList) HashSSZ() ([32]byte, error) { - depth := GetDepth(uint64(h.c)) - baseRoot := [32]byte{} - var err error - if h.l == 0 { - copy(baseRoot[:], merkle_tree.ZeroHashes[depth][:]) - } else { - baseRoot, err = h.hashVectorSSZ() - if err != nil { - return [32]byte{}, err - } - } lengthRoot := merkle_tree.Uint64Root(uint64(h.l)) - return utils.Sha256(baseRoot[:], lengthRoot[:]), nil + coreRoot, err := h.hashVectorSSZ() + if err != nil { + return [32]byte{}, err + } + return utils.Sha256(coreRoot[:], lengthRoot[:]), nil } func (h *hashList) Range(fn func(int, libcommon.Hash, int) bool) { diff --git a/cl/cltypes/solid/uint64_raw_list.go b/cl/cltypes/solid/uint64_raw_list.go index 368a8d684d5..c25a7d7a193 100644 --- a/cl/cltypes/solid/uint64_raw_list.go +++ b/cl/cltypes/solid/uint64_raw_list.go @@ -126,7 +126,7 @@ func (arr *RawUint64List) SetReusableHashBuffer(buf []byte) { } func (arr *RawUint64List) hashBufLength() int { - return (((len(arr.u) * 4) + 3) / 4) * length.Hash + return ((len(arr.u) + 3) / 4) * length.Hash } func (arr *RawUint64List) HashSSZ() ([32]byte, error) { diff --git a/cl/cltypes/solid/uint64_vector.go b/cl/cltypes/solid/uint64_vector.go index 9422b6ed9a3..60e133c72f8 100644 --- a/cl/cltypes/solid/uint64_vector.go +++ b/cl/cltypes/solid/uint64_vector.go @@ -19,7 +19,6 @@ package solid import ( "encoding/json" - "github.com/erigontech/erigon-lib/common/length" "github.com/erigontech/erigon-lib/types/clonable" ) @@ -29,10 +28,9 @@ type uint64VectorSSZ struct { func NewUint64VectorSSZ(size int) Uint64VectorSSZ { o := &byteBasedUint64Slice{ - c: size, - l: size, - u: make([]byte, size*8), - treeCacheBuffer: make([]byte, getTreeCacheSize((size+3)/4, treeCacheDepthUint64Slice)*length.Hash), + c: size, + l: size, + u: make([]byte, size*8), } return &uint64VectorSSZ{ u: o, diff --git a/cl/cltypes/solid/uint64slice_byte.go b/cl/cltypes/solid/uint64slice_byte.go index f471e480cbb..f908759e2c7 100644 --- a/cl/cltypes/solid/uint64slice_byte.go +++ b/cl/cltypes/solid/uint64slice_byte.go @@ -17,20 +17,16 @@ package solid import ( - "bytes" "encoding/binary" "encoding/json" "strconv" - "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/length" "github.com/erigontech/erigon-lib/types/ssz" "github.com/erigontech/erigon/cl/merkle_tree" "github.com/erigontech/erigon/cl/utils" ) -const treeCacheDepthUint64Slice = 0 - func convertDepthToChunkSize(d int) int { return (1 << d) // just power of 2 } @@ -46,8 +42,7 @@ func getTreeCacheSize(listLen int, cacheDepth int) int { // memory usage, especially when dealing with large slices. type byteBasedUint64Slice struct { // The bytes that back the slice - u []byte - treeCacheBuffer []byte + u []byte // Length of the slice l int @@ -55,7 +50,7 @@ type byteBasedUint64Slice struct { // Capacity of the slice c int - hashBuf + *merkle_tree.MerkleTree } // NewUint64Slice creates a new instance of byteBasedUint64Slice with a specified capacity limit. @@ -70,25 +65,27 @@ func NewUint64Slice(limit int) *byteBasedUint64Slice { func (arr *byteBasedUint64Slice) Clear() { arr.l = 0 clear(arr.u) - clear(arr.treeCacheBuffer) + arr.MerkleTree = nil } // CopyTo copies the slice to a target slice. func (arr *byteBasedUint64Slice) CopyTo(target *byteBasedUint64Slice) { target.Clear() - + // TODO: implement CopyTo for MPT target.c = arr.c target.l = arr.l if len(target.u) < len(arr.u) { target.u = make([]byte, len(arr.u)) } - if len(target.treeCacheBuffer) < len(arr.treeCacheBuffer) { - target.treeCacheBuffer = make([]byte, len(arr.treeCacheBuffer)) + if arr.MerkleTree != nil { + if target.MerkleTree == nil { + target.MerkleTree = &merkle_tree.MerkleTree{} + } + arr.MerkleTree.CopyInto(target.MerkleTree) } - target.treeCacheBuffer = target.treeCacheBuffer[:len(arr.treeCacheBuffer)] + target.u = target.u[:len(arr.u)] copy(target.u, arr.u) - copy(target.treeCacheBuffer, arr.treeCacheBuffer) } func (arr *byteBasedUint64Slice) MarshalJSON() ([]byte, error) { @@ -110,6 +107,7 @@ func (arr *byteBasedUint64Slice) UnmarshalJSON(buf []byte) error { for _, elem := range list { arr.Append(elem) } + arr.MerkleTree = nil return nil } @@ -129,27 +127,24 @@ func (arr *byteBasedUint64Slice) Pop() uint64 { val := binary.LittleEndian.Uint64(arr.u[offset : offset+8]) binary.LittleEndian.PutUint64(arr.u[offset:offset+8], 0) arr.l = arr.l - 1 - arr.treeCacheBuffer = arr.treeCacheBuffer[:getTreeCacheSize((arr.l+3)/4, treeCacheDepthUint64Slice)*length.Hash] + arr.MerkleTree = nil return val } // Append adds a new element to the end of the slice. func (arr *byteBasedUint64Slice) Append(v uint64) { if len(arr.u) <= arr.l*8 { - arr.u = append(arr.u, make([]byte, 32)...) + arr.u = append(arr.u, merkle_tree.ZeroHashes[0][:]...) + if arr.MerkleTree != nil { + arr.MerkleTree.AppendLeaf() + } } - offset := arr.l * 8 binary.LittleEndian.PutUint64(arr.u[offset:offset+8], v) - arr.l = arr.l + 1 - treeBufferExpectCache := getTreeCacheSize((arr.l+3)/4, treeCacheDepthUint64Slice) * length.Hash - if len(arr.treeCacheBuffer) < treeBufferExpectCache { - arr.treeCacheBuffer = append(arr.treeCacheBuffer, make([]byte, treeBufferExpectCache-len(arr.treeCacheBuffer))...) - } - ihIdx := (((arr.l - 1) / 4) / convertDepthToChunkSize(treeCacheDepthUint64Slice)) * length.Hash - for i := ihIdx; i < ihIdx+length.Hash; i++ { - arr.treeCacheBuffer[i] = 0 + if arr.MerkleTree != nil { + arr.MerkleTree.MarkLeafAsDirty(arr.l / 4) } + arr.l++ } // Get returns the element at the given index. @@ -163,11 +158,10 @@ func (arr *byteBasedUint64Slice) Get(index int) uint64 { // Set replaces the element at the given index with a new value. func (arr *byteBasedUint64Slice) Set(index int, v uint64) { - offset := index * 8 - ihIdx := ((index / 4) / convertDepthToChunkSize(treeCacheDepthUint64Slice)) * length.Hash - for i := ihIdx; i < ihIdx+length.Hash; i++ { - arr.treeCacheBuffer[i] = 0 + if arr.MerkleTree != nil { + arr.MerkleTree.MarkLeafAsDirty(index / 4) } + offset := index * 8 binary.LittleEndian.PutUint64(arr.u[offset:offset+8], v) } @@ -183,66 +177,30 @@ func (arr *byteBasedUint64Slice) Cap() int { // HashListSSZ computes the SSZ hash of the slice as a list. It returns the hash and any error encountered. func (arr *byteBasedUint64Slice) HashListSSZ() ([32]byte, error) { - depth := GetDepth((uint64(arr.c)*8 + 31) / 32) - baseRoot := [32]byte{} - var err error - if arr.l == 0 { - copy(baseRoot[:], merkle_tree.ZeroHashes[depth][:]) - } else { - baseRoot, err = arr.HashVectorSSZ() - if err != nil { - return [32]byte{}, err - } + if arr.MerkleTree == nil { + arr.MerkleTree = &merkle_tree.MerkleTree{} + cap := uint64((arr.c*8 + length.Hash - 1) / length.Hash) + + arr.MerkleTree.Initialize((arr.l+3)/4, merkle_tree.OptimalMaxTreeCacheDepth, func(idx int, out []byte) { + copy(out, arr.u[idx*length.Hash:]) + }, &cap) } + + coreRoot := arr.ComputeRoot() lengthRoot := merkle_tree.Uint64Root(uint64(arr.l)) - return utils.Sha256(baseRoot[:], lengthRoot[:]), nil + return utils.Sha256(coreRoot[:], lengthRoot[:]), nil } // HashVectorSSZ computes the SSZ hash of the slice as a vector. It returns the hash and any error encountered. func (arr *byteBasedUint64Slice) HashVectorSSZ() ([32]byte, error) { - chunkSize := convertDepthToChunkSize(treeCacheDepthUint64Slice) * length.Hash - depth := GetDepth((uint64(arr.c)*8 + length.Hash - 1) / length.Hash) - emptyHashBytes := make([]byte, length.Hash) - - layerBuffer := make([]byte, chunkSize) - maxTo := length.Hash*((arr.l-1)/4) + length.Hash - - offset := 0 - for i := 0; i < maxTo; i += chunkSize { - offset = (i / chunkSize) * length.Hash - from := i - to := min(from+chunkSize, maxTo) - - if !bytes.Equal(arr.treeCacheBuffer[offset:offset+length.Hash], emptyHashBytes) { - continue - } - layerBuffer = layerBuffer[:to-from] - copy(layerBuffer, arr.u[from:to]) - if err := computeFlatRootsToBuffer(uint8(min(treeCacheDepthUint64Slice, uint64(depth))), layerBuffer, arr.treeCacheBuffer[offset:]); err != nil { - return [32]byte{}, err - } - } - if treeCacheDepthUint64Slice >= depth { - return common.BytesToHash(arr.treeCacheBuffer[:32]), nil - } - - arr.makeBuf(offset + length.Hash) - copy(arr.buf, arr.treeCacheBuffer[:offset+length.Hash]) - elements := arr.buf - for i := uint8(treeCacheDepthUint64Slice); i < depth; i++ { - layerLen := len(elements) - if layerLen%64 == 32 { - elements = append(elements, merkle_tree.ZeroHashes[i][:]...) - } - outputLen := len(elements) / 2 - arr.makeBuf(outputLen) - if err := merkle_tree.HashByteSlice(arr.buf, elements); err != nil { - return [32]byte{}, err - } - elements = arr.buf + if arr.MerkleTree == nil { + arr.MerkleTree = &merkle_tree.MerkleTree{} + arr.MerkleTree.Initialize((arr.l+3)/4, merkle_tree.OptimalMaxTreeCacheDepth, func(idx int, out []byte) { + copy(out, arr.u[idx*length.Hash:]) + }, nil) } - return common.BytesToHash(elements[:32]), nil + return arr.ComputeRoot(), nil } // EncodeSSZ encodes the slice in SSZ format. It appends the encoded data to the provided buffer and returns the result. @@ -259,7 +217,7 @@ func (arr *byteBasedUint64Slice) DecodeSSZ(buf []byte, _ int) error { bufferLength := length.Hash*((arr.l-1)/4) + length.Hash arr.u = make([]byte, bufferLength) copy(arr.u, buf) - arr.treeCacheBuffer = make([]byte, getTreeCacheSize((arr.l+3)/4, treeCacheDepthUint64Slice)*length.Hash) + arr.MerkleTree = nil return nil } diff --git a/cl/cltypes/solid/uint64slice_byte_test.go b/cl/cltypes/solid/uint64slice_byte_test.go index 868a88ba560..9c63d82c753 100644 --- a/cl/cltypes/solid/uint64slice_byte_test.go +++ b/cl/cltypes/solid/uint64slice_byte_test.go @@ -38,6 +38,7 @@ func TestUint64SliceBasic(t *testing.T) { out, err := slice.HashListSSZ() require.NoError(t, err) + require.EqualValues(t, common.HexToHash("eb8cec5eaec74a32e8b9b56cc42f7627cef722f81081ead786c97a4df1c8be5d"), out) } diff --git a/cl/cltypes/solid/validator_set.go b/cl/cltypes/solid/validator_set.go index 3a5d5cd3de4..1bdf231ac03 100644 --- a/cl/cltypes/solid/validator_set.go +++ b/cl/cltypes/solid/validator_set.go @@ -17,7 +17,6 @@ package solid import ( - "bytes" "encoding/json" libcommon "github.com/erigontech/erigon-lib/common" @@ -37,10 +36,7 @@ const ( IsPreviousMatchingHeadAttesterBit = 0x5 ) -const ( - validatorSetCapacityMultiplier = 1.01 // allocate 20% to the validator set when re-allocation is needed. - validatorTreeCacheGroupLayer = 3 // It will cache group validatorTreeCacheGroupLayer^2 accordingly -) +const validatorSetCapacityMultiplier = 1.01 // allocate 20% to the validator set when re-allocation is needed.) // This is all stuff used by phase0 state transition. It makes many operations faster. type Phase0Data struct { @@ -50,8 +46,8 @@ type Phase0Data struct { } type ValidatorSet struct { - buffer []byte - treeCacheBuffer []byte + *merkle_tree.MerkleTree + buffer []byte l, c int @@ -70,12 +66,11 @@ func NewValidatorSet(c int) *ValidatorSet { func NewValidatorSetWithLength(c int, l int) *ValidatorSet { return &ValidatorSet{ - c: c, - l: l, - buffer: make([]byte, l*validatorSize), - treeCacheBuffer: make([]byte, getTreeCacheSize(l, validatorTreeCacheGroupLayer)*length.Hash), - phase0Data: make([]Phase0Data, l), - attesterBits: make([]byte, l), + c: c, + l: l, + buffer: make([]byte, l*validatorSize), + phase0Data: make([]Phase0Data, l), + attesterBits: make([]byte, l), } } @@ -85,19 +80,14 @@ func (v *ValidatorSet) Bytes() []byte { func (v *ValidatorSet) expandBuffer(newValidatorSetLength int) { size := newValidatorSetLength * validatorSize - treeCacheSize := getTreeCacheSize(newValidatorSetLength, validatorTreeCacheGroupLayer) * length.Hash if size <= cap(v.buffer) { - v.treeCacheBuffer = v.treeCacheBuffer[:treeCacheSize] v.buffer = v.buffer[:size] return } increasedValidatorsCapacity := uint64(float64(newValidatorSetLength)*validatorSetCapacityMultiplier) + 1 buffer := make([]byte, size, increasedValidatorsCapacity*validatorSize) - cacheBuffer := make([]byte, treeCacheSize, increasedValidatorsCapacity*length.Hash) copy(buffer, v.buffer) - copy(cacheBuffer, v.treeCacheBuffer) - v.treeCacheBuffer = cacheBuffer v.buffer = buffer } @@ -113,6 +103,9 @@ func (v *ValidatorSet) Append(val Validator) { v.phase0Data[v.l] = Phase0Data{} // initialize to empty. v.attesterBits = append(v.attesterBits, 0x0) v.l++ + if v.MerkleTree != nil { + v.MerkleTree.AppendLeaf() + } } func (v *ValidatorSet) Cap() int { @@ -130,6 +123,7 @@ func (v *ValidatorSet) Pop() Validator { func (v *ValidatorSet) Clear() { v.l = 0 v.attesterBits = v.attesterBits[:0] + v.MerkleTree = nil } func (v *ValidatorSet) Clone() clonable.Clonable { @@ -144,10 +138,15 @@ func (v *ValidatorSet) CopyTo(t *ValidatorSet) { t.expandBuffer(v.l) t.attesterBits = make([]byte, len(v.attesterBits)) } + if v.MerkleTree != nil { + if t.MerkleTree == nil { + t.MerkleTree = &merkle_tree.MerkleTree{} + } + v.MerkleTree.CopyInto(t.MerkleTree) + } // skip copying (unsupported for phase0) t.phase0Data = make([]Phase0Data, t.l) copy(t.buffer, v.buffer) - copy(t.treeCacheBuffer, v.treeCacheBuffer) copy(t.attesterBits, v.attesterBits) t.attesterBits = t.attesterBits[:v.l] } @@ -189,60 +188,24 @@ func (v *ValidatorSet) Get(idx int) Validator { func (v *ValidatorSet) HashSSZ() ([32]byte, error) { // generate root list - validatorsLeafChunkSize := convertDepthToChunkSize(validatorTreeCacheGroupLayer) - hashBuffer := make([]byte, 8*32) - depth := GetDepth(uint64(v.c)) - lengthRoot := merkle_tree.Uint64Root(uint64(v.l)) - - if v.l == 0 { - return utils.Sha256(merkle_tree.ZeroHashes[depth][:], lengthRoot[:]), nil - } - - emptyHashBytes := make([]byte, length.Hash) - - layerBuffer := make([]byte, validatorsLeafChunkSize*length.Hash) - for i := 0; i < v.l; i += validatorsLeafChunkSize { - from := uint64(i) - to := min(from+uint64(validatorsLeafChunkSize), uint64(v.l)) - offset := (i / validatorsLeafChunkSize) * length.Hash - - if !bytes.Equal(v.treeCacheBuffer[offset:offset+length.Hash], emptyHashBytes) { - continue - } - for i := from; i < to; i++ { - validator := v.Get(int(i)) + if v.MerkleTree == nil { + v.MerkleTree = &merkle_tree.MerkleTree{} + cap := uint64(v.c) + hashBuffer := make([]byte, 8*32) + v.MerkleTree.Initialize(v.l, merkle_tree.OptimalMaxTreeCacheDepth, func(idx int, out []byte) { + validator := v.Get(idx) if err := validator.CopyHashBufferTo(hashBuffer); err != nil { - return [32]byte{}, err + panic(err) } hashBuffer = hashBuffer[:(8 * 32)] - if err := merkle_tree.MerkleRootFromFlatLeaves(hashBuffer, layerBuffer[(i-from)*length.Hash:]); err != nil { - return [32]byte{}, err + if err := merkle_tree.MerkleRootFromFlatLeaves(hashBuffer, out); err != nil { + panic(err) } - } - endOffset := (to - from) * length.Hash - if err := computeFlatRootsToBuffer(validatorTreeCacheGroupLayer, layerBuffer[:endOffset], v.treeCacheBuffer[offset:]); err != nil { - return [32]byte{}, err - } - + }, &cap) } - - offset := length.Hash * ((v.l + validatorsLeafChunkSize - 1) / validatorsLeafChunkSize) - v.makeBuf(offset) - copy(v.buf, v.treeCacheBuffer[:offset]) - elements := v.buf - for i := uint8(validatorTreeCacheGroupLayer); i < depth; i++ { - // Sequential - if len(elements)%64 != 0 { - elements = append(elements, merkle_tree.ZeroHashes[i][:]...) - } - outputLen := len(elements) / 2 - if err := merkle_tree.HashByteSlice(elements, elements); err != nil { - return [32]byte{}, err - } - elements = elements[:outputLen] - } - - return utils.Sha256(elements[:length.Hash], lengthRoot[:]), nil + lengthRoot := merkle_tree.Uint64Root(uint64(v.l)) + coreRoot := v.MerkleTree.ComputeRoot() + return utils.Sha256(coreRoot[:], lengthRoot[:]), nil } func computeFlatRootsToBuffer(depth uint8, layerBuffer, output []byte) error { @@ -302,9 +265,8 @@ func (v *ValidatorSet) Range(fn func(int, Validator, int) bool) { } func (v *ValidatorSet) zeroTreeHash(idx int) { - iNodeIdx := (idx / (1 << validatorTreeCacheGroupLayer)) * length.Hash - for i := iNodeIdx; i < iNodeIdx+length.Hash; i++ { - v.treeCacheBuffer[i] = 0 + if v.MerkleTree != nil { + v.MerkleTree.MarkLeafAsDirty(idx) } } diff --git a/cl/gossip/gossip.go b/cl/gossip/gossip.go index 773c5e69d42..cd1fd0e2b39 100644 --- a/cl/gossip/gossip.go +++ b/cl/gossip/gossip.go @@ -17,6 +17,7 @@ package gossip import ( + "errors" "fmt" "strings" ) @@ -63,7 +64,7 @@ func IsTopicBeaconAttestation(d string) bool { func SubnetIdFromTopicBeaconAttestation(d string) (uint64, error) { if !IsTopicBeaconAttestation(d) { - return 0, fmt.Errorf("not a beacon attestation topic") + return 0, errors.New("not a beacon attestation topic") } var id uint64 _, err := fmt.Sscanf(d, TopicNamePrefixBeaconAttestation, &id) diff --git a/cl/merkle_tree/hasher.go b/cl/merkle_tree/hasher.go index c95cc488191..067ff323ad8 100644 --- a/cl/merkle_tree/hasher.go +++ b/cl/merkle_tree/hasher.go @@ -46,10 +46,14 @@ func newMerkleHasher() *merkleHasher { // merkleizeTrieLeaves returns intermediate roots of given leaves. func (m *merkleHasher) merkleizeTrieLeavesFlat(leaves []byte, out []byte, limit uint64) (err error) { + return m.merkleizeTrieLeavesFlatWithStart(leaves, out, limit, 0) +} + +func (m *merkleHasher) merkleizeTrieLeavesFlatWithStart(leaves []byte, out []byte, limit, start uint64) (err error) { m.mu.Lock() defer m.mu.Unlock() layer := m.getBufferFromFlat(leaves) - for i := uint8(0); i < GetDepth(limit); i++ { + for i := uint8(start); i < GetDepth(limit); i++ { layerLen := len(layer) if layerLen%2 != 0 { layer = append(layer, ZeroHashes[i]) diff --git a/cl/merkle_tree/merkle_root.go b/cl/merkle_tree/merkle_root.go index 137f841415b..d3afcea1d4f 100644 --- a/cl/merkle_tree/merkle_root.go +++ b/cl/merkle_tree/merkle_root.go @@ -130,6 +130,18 @@ func MerkleRootFromFlatLeaves(leaves []byte, out []byte) (err error) { return globalHasher.merkleizeTrieLeavesFlat(leaves, out, NextPowerOfTwo(uint64((len(leaves)+31)/32))) } +func MerkleRootFromFlatFromIntermediateLevel(nodes []byte, out []byte, leavesLen, intermediateLevel int) (err error) { + if len(nodes) <= 32 { + copy(out, nodes) + return + } + return globalHasher.merkleizeTrieLeavesFlatWithStart(nodes, out, NextPowerOfTwo(uint64((leavesLen+31)/32)), uint64(intermediateLevel)) +} + +func MerkleRootFromFlatFromIntermediateLevelWithLimit(nodes []byte, out []byte, limit, intermediateLevel int) (err error) { + return globalHasher.merkleizeTrieLeavesFlatWithStart(nodes, out, uint64(limit), uint64(intermediateLevel)) +} + func MerkleRootFromFlatLeavesWithLimit(leaves []byte, out []byte, limit uint64) (err error) { return globalHasher.merkleizeTrieLeavesFlat(leaves, out, limit) } diff --git a/cl/merkle_tree/merkle_tree.go b/cl/merkle_tree/merkle_tree.go new file mode 100644 index 00000000000..292d5339624 --- /dev/null +++ b/cl/merkle_tree/merkle_tree.go @@ -0,0 +1,251 @@ +package merkle_tree + +import ( + "bytes" + + libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/length" +) + +func ceil(num, divisor int) int { + return (num + (divisor - 1)) / divisor +} + +const OptimalMaxTreeCacheDepth = 12 + +type MerkleTree struct { + computeLeaf func(idx int, out []byte) + layers [][]byte // Flat hash-layers + leavesCount int + + hashBuf [64]byte // buffer to store the input for hash(hash1, hash2) + limit *uint64 // Optional limit for the number of leaves (this will enable limit-oriented hashing) +} + +// Layout of the layers: + +// 0-n: intermediate layers +// Root is not stored in the layers, root is recomputed on demand +// The first layer is not the leaf layer, but the first intermediate layer, the leaf layer is not stored in the layers. + +// Initialize initializes the Merkle tree with the given number of leaves and the maximum depth of the tree cache. +func (m *MerkleTree) Initialize(leavesCount, maxTreeCacheDepth int, computeLeaf func(idx int, out []byte), limitOptional *uint64) { + m.computeLeaf = computeLeaf + m.layers = make([][]byte, maxTreeCacheDepth) + m.leavesCount = leavesCount + firstLayerSize := ((leavesCount + 1) / 2) * length.Hash + capacity := (firstLayerSize / 2) * 3 + m.layers[0] = make([]byte, firstLayerSize, capacity) + if limitOptional != nil { + m.limit = new(uint64) + *m.limit = *limitOptional + } +} + +// MarkLeafAsDirty resets the leaf at the given index, so that it will be recomputed on the next call to ComputeRoot. +func (m *MerkleTree) MarkLeafAsDirty(idx int) { + for i := 0; i < len(m.layers); i++ { + currDivisor := 1 << (i + 1) // i+1 because the first layer is not the leaf layer + layerSize := (m.leavesCount + (currDivisor - 1)) / currDivisor + if layerSize == 0 { + break + } + if m.layers[i] == nil { + capacity := (layerSize / 2) * 3 + if capacity == 0 { + capacity = 1024 + } + m.layers[i] = make([]byte, layerSize, capacity) + } + copy(m.layers[i][(idx/currDivisor)*length.Hash:], ZeroHashes[0][:]) + if layerSize == 1 { + break + } + } +} + +func (m *MerkleTree) AppendLeaf() { + /* + Step 1: Append a new dirty leaf + Step 2: Extend each layer with the new leaf when needed (1.5x extension) + */ + for i := 0; i < len(m.layers); i++ { + m.extendLayer(i) + } + m.leavesCount++ +} + +// extendLayer extends the layer with the given index by 1.5x, by marking the new leaf as dirty. +func (m *MerkleTree) extendLayer(layerIdx int) { + var prevLayerNodeCount int + if layerIdx == 0 { + prevLayerNodeCount = m.leavesCount + 1 + } else { + prevLayerNodeCount = len(m.layers[layerIdx-1]) / length.Hash + } + // find previous layer nodes count and round to the next power of 2 + newExpectendLayerNodeCount := prevLayerNodeCount / 2 + if newExpectendLayerNodeCount == 0 { + m.layers[layerIdx] = m.layers[layerIdx][:0] + return + } + if prevLayerNodeCount%2 != 0 { + newExpectendLayerNodeCount++ + } + + newLayerSize := newExpectendLayerNodeCount * length.Hash + + if m.layers[layerIdx] == nil { + capacity := (newLayerSize / 2) * 3 + m.layers[layerIdx] = make([]byte, newLayerSize, capacity) + } else { + if newLayerSize > cap(m.layers[layerIdx]) { + capacity := (newLayerSize / 2) * 3 + tmp := m.layers[layerIdx] + m.layers[layerIdx] = make([]byte, newLayerSize, capacity) + copy(m.layers[layerIdx], tmp) + } + m.layers[layerIdx] = m.layers[layerIdx][:newLayerSize] + copy(m.layers[layerIdx][newLayerSize-length.Hash:], ZeroHashes[0][:]) + } +} + +// ComputeRoot computes the root of the Merkle tree. +func (m *MerkleTree) ComputeRoot() libcommon.Hash { + var root libcommon.Hash + if len(m.layers) == 0 { + return ZeroHashes[0] + } + + if m.leavesCount == 0 { + if m.limit == nil { + return ZeroHashes[0] + } + return ZeroHashes[GetDepth(*m.limit)] + } + + if m.leavesCount <= 3 { + buf := make([]byte, 0, 3*length.Hash) + for i := 0; i < m.leavesCount; i++ { + m.computeLeaf(i, m.hashBuf[:length.Hash]) + buf = append(buf, m.hashBuf[:length.Hash]...) + } + if m.limit != nil { + if err := MerkleRootFromFlatFromIntermediateLevelWithLimit(buf, root[:], int(*m.limit), 0); err != nil { + panic(err) + } + return root + } + if err := MerkleRootFromFlatFromIntermediateLevel(buf, root[:], m.leavesCount*length.Hash, 0); err != nil { + panic(err) + } + return root + } + + if len(m.layers[0]) == length.Hash { + var node libcommon.Hash + m.computeLeaf(0, node[:]) + if m.limit != nil { + if err := MerkleRootFromFlatFromIntermediateLevelWithLimit(node[:], root[:], int(*m.limit), 0); err != nil { + panic(err) + } + return root + } + return node + } + + // Compute the root + for i := 0; i < len(m.layers); i++ { + m.computeLayer(i) + } + // Find last layer with more than 0 elements + for i := 0; i < len(m.layers); i++ { + if len(m.layers[i]) == 0 { + m.finishHashing(i-1, root[:]) + return root + } + } + m.finishHashing(len(m.layers)-1, root[:]) + return root +} + +func (m *MerkleTree) CopyInto(other *MerkleTree) { + other.computeLeaf = m.computeLeaf + other.layers = make([][]byte, len(m.layers)) + for i := 0; i < len(m.layers); i++ { + other.layers[i] = make([]byte, len(m.layers[i])) + copy(other.layers[i], m.layers[i]) + } + other.leavesCount = m.leavesCount + other.limit = m.limit +} + +func (m *MerkleTree) finishHashing(lastLayerIdx int, root []byte) { + if m.limit == nil { + if err := MerkleRootFromFlatFromIntermediateLevel(m.layers[lastLayerIdx], root, m.leavesCount*length.Hash, lastLayerIdx+1); err != nil { + panic(err) + } + return + } + + if err := MerkleRootFromFlatFromIntermediateLevelWithLimit(m.layers[lastLayerIdx], root, int(*m.limit), lastLayerIdx+1); err != nil { + panic(err) + } +} + +func (m *MerkleTree) computeLayer(layerIdx int) { + currentDivisor := 1 << uint(layerIdx+1) + if m.layers[layerIdx] == nil { + // find previous layer nodes count and round to the next power of 2 + prevLayerNodeCount := len(m.layers[layerIdx-1]) / length.Hash + newExpectendLayerNodeCount := prevLayerNodeCount / 2 + if newExpectendLayerNodeCount == 0 { + m.layers[layerIdx] = m.layers[layerIdx][:0] + return + } + if prevLayerNodeCount%2 != 0 { + newExpectendLayerNodeCount++ + } + newLayerSize := newExpectendLayerNodeCount * length.Hash + capacity := (newLayerSize / 2) * 3 + m.layers[layerIdx] = make([]byte, newLayerSize, capacity) + } + if len(m.layers[layerIdx]) == 0 { + return + } + + iterations := ceil(m.leavesCount, currentDivisor) + + for i := 0; i < iterations; i++ { + fromOffset := i * length.Hash + toOffset := (i + 1) * length.Hash + if !bytes.Equal(m.layers[layerIdx][fromOffset:toOffset], ZeroHashes[0][:]) { + continue + } + if layerIdx == 0 { + // leaf layer is always dirty + leafIndexBegin := i * 2 + m.computeLeaf(leafIndexBegin, m.hashBuf[:length.Hash]) + if leafIndexBegin == m.leavesCount-1 { + copy(m.hashBuf[length.Hash:], ZeroHashes[0][:]) + } else { + m.computeLeaf(leafIndexBegin+1, m.hashBuf[length.Hash:]) + } + if err := HashByteSlice(m.layers[layerIdx][fromOffset:toOffset], m.hashBuf[:]); err != nil { + panic(err) + } + continue + } + childFromOffset := (i * 2) * length.Hash + childToOffset := (i*2 + 2) * length.Hash + if childToOffset > len(m.layers[layerIdx-1]) { + copy(m.hashBuf[:length.Hash], m.layers[layerIdx-1][childFromOffset:]) + copy(m.hashBuf[length.Hash:], ZeroHashes[layerIdx][:]) + } else { + copy(m.hashBuf[:], m.layers[layerIdx-1][childFromOffset:childToOffset]) + } + if err := HashByteSlice(m.layers[layerIdx][fromOffset:toOffset], m.hashBuf[:]); err != nil { + panic(err) + } + } +} diff --git a/cl/merkle_tree/merkle_tree_test.go b/cl/merkle_tree/merkle_tree_test.go new file mode 100644 index 00000000000..8bcc1f11f14 --- /dev/null +++ b/cl/merkle_tree/merkle_tree_test.go @@ -0,0 +1,149 @@ +package merkle_tree_test + +import ( + "testing" + + "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/length" + "github.com/erigontech/erigon/cl/merkle_tree" + "github.com/stretchr/testify/require" +) + +func getExpectedRoot(testBuffer []byte) common.Hash { + var root common.Hash + merkle_tree.MerkleRootFromFlatLeaves(testBuffer, root[:]) + return root +} + +func getExpectedRootWithLimit(testBuffer []byte, limit int) common.Hash { + var root common.Hash + merkle_tree.MerkleRootFromFlatLeavesWithLimit(testBuffer, root[:], uint64(limit)) + return root +} + +func TestPowerOf2MerkleTree(t *testing.T) { + mt := merkle_tree.MerkleTree{} + testBuffer := make([]byte, 4*length.Hash) + testBuffer[0] = 1 + testBuffer[32] = 2 + testBuffer[64] = 3 + testBuffer[96] = 9 + mt.Initialize(4, 6, func(idx int, out []byte) { + copy(out, testBuffer[idx*length.Hash:(idx+1)*length.Hash]) + }, nil) + expectedRoot1 := getExpectedRoot(testBuffer) + require.Equal(t, mt.ComputeRoot(), expectedRoot1) + testBuffer[64] = 4 + require.Equal(t, mt.ComputeRoot(), expectedRoot1) + mt.MarkLeafAsDirty(2) + expectedRoot2 := getExpectedRoot(testBuffer) + require.Equal(t, mt.ComputeRoot(), expectedRoot2) + testBuffer[64] = 3 + mt.MarkLeafAsDirty(2) + require.Equal(t, mt.ComputeRoot(), expectedRoot1) + +} + +func TestMerkleTreeAppendLeaf(t *testing.T) { + mt := merkle_tree.MerkleTree{} + testBuffer := make([]byte, 4*length.Hash) + testBuffer[0] = 1 + testBuffer[32] = 2 + testBuffer[64] = 3 + testBuffer[96] = 9 + mt.Initialize(4, 6, func(idx int, out []byte) { + copy(out, testBuffer[idx*length.Hash:(idx+1)*length.Hash]) + }, nil) + // Test AppendLeaf + mt.AppendLeaf() + testBuffer = append(testBuffer, make([]byte, 4*length.Hash)...) + testBuffer[128] = 5 + expectedRoot1 := getExpectedRoot(testBuffer) + require.Equal(t, mt.ComputeRoot(), expectedRoot1) + // adding 3 more empty leaves should not change the root + mt.AppendLeaf() + mt.AppendLeaf() + mt.AppendLeaf() + require.Equal(t, mt.ComputeRoot(), expectedRoot1) +} + +func TestMerkleTreeRootEmpty(t *testing.T) { + mt := merkle_tree.MerkleTree{} + mt.Initialize(0, 6, func(idx int, out []byte) { + return + }, nil) + require.Equal(t, mt.ComputeRoot().String(), "0x0000000000000000000000000000000000000000000000000000000000000000") +} + +func TestMerkleTreeRootSingleElement(t *testing.T) { + mt := merkle_tree.MerkleTree{} + testBuffer := make([]byte, length.Hash) + testBuffer[0] = 1 + mt.Initialize(1, 6, func(idx int, out []byte) { + copy(out, testBuffer) + }, nil) + require.Equal(t, mt.ComputeRoot().String(), "0x0100000000000000000000000000000000000000000000000000000000000000") +} + +func TestMerkleTreeAppendLeafWithLowMaxDepth(t *testing.T) { + mt := merkle_tree.MerkleTree{} + testBuffer := make([]byte, 4*length.Hash) + testBuffer[0] = 1 + testBuffer[32] = 2 + testBuffer[64] = 3 + testBuffer[96] = 9 + mt.Initialize(4, 2, func(idx int, out []byte) { + copy(out, testBuffer[idx*length.Hash:(idx+1)*length.Hash]) + }, nil) + // Test AppendLeaf + mt.AppendLeaf() + testBuffer = append(testBuffer, make([]byte, 4*length.Hash)...) + testBuffer[128] = 5 + expectedRoot := getExpectedRoot(testBuffer) + require.Equal(t, mt.ComputeRoot(), expectedRoot) + // adding 3 more empty leaves should not change the root + mt.AppendLeaf() + mt.AppendLeaf() + mt.AppendLeaf() + require.Equal(t, mt.ComputeRoot(), expectedRoot) +} + +func TestMerkleTree17Elements(t *testing.T) { + mt := merkle_tree.MerkleTree{} + testBuffer := make([]byte, 17*length.Hash) + testBuffer[0] = 1 + testBuffer[32] = 2 + testBuffer[64] = 3 + testBuffer[96] = 9 + testBuffer[128] = 5 + mt.Initialize(17, 2, func(idx int, out []byte) { + copy(out, testBuffer[idx*length.Hash:(idx+1)*length.Hash]) + }, nil) + // Test AppendLeaf + expectedRoot := getExpectedRoot(testBuffer) + require.Equal(t, mt.ComputeRoot(), expectedRoot) +} + +func TestMerkleTreeAppendLeafWithLowMaxDepthAndLimit(t *testing.T) { + mt := merkle_tree.MerkleTree{} + testBuffer := make([]byte, 4*length.Hash) + testBuffer[0] = 1 + testBuffer[32] = 2 + testBuffer[64] = 3 + testBuffer[96] = 9 + lm := uint64(1 << 12) + mt.Initialize(4, 2, func(idx int, out []byte) { + copy(out, testBuffer[idx*length.Hash:(idx+1)*length.Hash]) + }, &lm) + // Test AppendLeaf + mt.AppendLeaf() + testBuffer = append(testBuffer, make([]byte, 4*length.Hash)...) + testBuffer[128] = 5 + expectedRoot := getExpectedRootWithLimit(testBuffer, int(lm)) + require.Equal(t, mt.ComputeRoot(), expectedRoot) + // adding 3 more empty leaves should not change the root + mt.AppendLeaf() + mt.AppendLeaf() + mt.AppendLeaf() + require.Equal(t, mt.ComputeRoot(), expectedRoot) +} diff --git a/cl/monitor/interface.go b/cl/monitor/interface.go new file mode 100644 index 00000000000..d1198316b9b --- /dev/null +++ b/cl/monitor/interface.go @@ -0,0 +1,22 @@ +package monitor + +import ( + "github.com/erigontech/erigon/cl/cltypes" +) + +//go:generate mockgen -typed=true -destination=mock_services/validator_monitor_mock.go -package=mock_services . ValidatorMonitor +type ValidatorMonitor interface { + ObserveValidator(vid uint64) + RemoveValidator(vid uint64) + OnNewBlock(block *cltypes.BeaconBlock) error +} + +type dummyValdatorMonitor struct{} + +func (d *dummyValdatorMonitor) ObserveValidator(vid uint64) {} + +func (d *dummyValdatorMonitor) RemoveValidator(vid uint64) {} + +func (d *dummyValdatorMonitor) OnNewBlock(block *cltypes.BeaconBlock) error { + return nil +} diff --git a/cl/monitor/metrics.go b/cl/monitor/metrics.go new file mode 100644 index 00000000000..b7f07976c39 --- /dev/null +++ b/cl/monitor/metrics.go @@ -0,0 +1,14 @@ +package monitor + +import "github.com/erigontech/erigon-lib/metrics" + +var ( + // metricAttestHit is the number of attestations that hit for those validators we observe within current_epoch-2 + metricAttestHit = metrics.GetOrCreateCounter("validator_attestation_hit") + // metricAttestMiss is the number of attestations that miss for those validators we observe within current_epoch-2 + metricAttestMiss = metrics.GetOrCreateCounter("validator_attestation_miss") + // metricProposerHit is the number of proposals that hit for those validators we observe in previous slot + metricProposerHit = metrics.GetOrCreateCounter("validator_proposal_hit") + // metricProposerMiss is the number of proposals that miss for those validators we observe in previous slot + metricProposerMiss = metrics.GetOrCreateCounter("validator_proposal_miss") +) diff --git a/cl/monitor/mock_services/validator_monitor_mock.go b/cl/monitor/mock_services/validator_monitor_mock.go new file mode 100644 index 00000000000..27c99819b72 --- /dev/null +++ b/cl/monitor/mock_services/validator_monitor_mock.go @@ -0,0 +1,150 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/erigontech/erigon/cl/monitor (interfaces: ValidatorMonitor) +// +// Generated by this command: +// +// mockgen -typed=true -destination=mock_services/validator_monitor_mock.go -package=mock_services . ValidatorMonitor +// + +// Package mock_services is a generated GoMock package. +package mock_services + +import ( + reflect "reflect" + + cltypes "github.com/erigontech/erigon/cl/cltypes" + gomock "go.uber.org/mock/gomock" +) + +// MockValidatorMonitor is a mock of ValidatorMonitor interface. +type MockValidatorMonitor struct { + ctrl *gomock.Controller + recorder *MockValidatorMonitorMockRecorder +} + +// MockValidatorMonitorMockRecorder is the mock recorder for MockValidatorMonitor. +type MockValidatorMonitorMockRecorder struct { + mock *MockValidatorMonitor +} + +// NewMockValidatorMonitor creates a new mock instance. +func NewMockValidatorMonitor(ctrl *gomock.Controller) *MockValidatorMonitor { + mock := &MockValidatorMonitor{ctrl: ctrl} + mock.recorder = &MockValidatorMonitorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockValidatorMonitor) EXPECT() *MockValidatorMonitorMockRecorder { + return m.recorder +} + +// ObserveValidator mocks base method. +func (m *MockValidatorMonitor) ObserveValidator(arg0 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ObserveValidator", arg0) +} + +// ObserveValidator indicates an expected call of ObserveValidator. +func (mr *MockValidatorMonitorMockRecorder) ObserveValidator(arg0 any) *MockValidatorMonitorObserveValidatorCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObserveValidator", reflect.TypeOf((*MockValidatorMonitor)(nil).ObserveValidator), arg0) + return &MockValidatorMonitorObserveValidatorCall{Call: call} +} + +// MockValidatorMonitorObserveValidatorCall wrap *gomock.Call +type MockValidatorMonitorObserveValidatorCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockValidatorMonitorObserveValidatorCall) Return() *MockValidatorMonitorObserveValidatorCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockValidatorMonitorObserveValidatorCall) Do(f func(uint64)) *MockValidatorMonitorObserveValidatorCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockValidatorMonitorObserveValidatorCall) DoAndReturn(f func(uint64)) *MockValidatorMonitorObserveValidatorCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// OnNewBlock mocks base method. +func (m *MockValidatorMonitor) OnNewBlock(arg0 *cltypes.BeaconBlock) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OnNewBlock", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// OnNewBlock indicates an expected call of OnNewBlock. +func (mr *MockValidatorMonitorMockRecorder) OnNewBlock(arg0 any) *MockValidatorMonitorOnNewBlockCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNewBlock", reflect.TypeOf((*MockValidatorMonitor)(nil).OnNewBlock), arg0) + return &MockValidatorMonitorOnNewBlockCall{Call: call} +} + +// MockValidatorMonitorOnNewBlockCall wrap *gomock.Call +type MockValidatorMonitorOnNewBlockCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockValidatorMonitorOnNewBlockCall) Return(arg0 error) *MockValidatorMonitorOnNewBlockCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockValidatorMonitorOnNewBlockCall) Do(f func(*cltypes.BeaconBlock) error) *MockValidatorMonitorOnNewBlockCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockValidatorMonitorOnNewBlockCall) DoAndReturn(f func(*cltypes.BeaconBlock) error) *MockValidatorMonitorOnNewBlockCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// RemoveValidator mocks base method. +func (m *MockValidatorMonitor) RemoveValidator(arg0 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveValidator", arg0) +} + +// RemoveValidator indicates an expected call of RemoveValidator. +func (mr *MockValidatorMonitorMockRecorder) RemoveValidator(arg0 any) *MockValidatorMonitorRemoveValidatorCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveValidator", reflect.TypeOf((*MockValidatorMonitor)(nil).RemoveValidator), arg0) + return &MockValidatorMonitorRemoveValidatorCall{Call: call} +} + +// MockValidatorMonitorRemoveValidatorCall wrap *gomock.Call +type MockValidatorMonitorRemoveValidatorCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockValidatorMonitorRemoveValidatorCall) Return() *MockValidatorMonitorRemoveValidatorCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockValidatorMonitorRemoveValidatorCall) Do(f func(uint64)) *MockValidatorMonitorRemoveValidatorCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockValidatorMonitorRemoveValidatorCall) DoAndReturn(f func(uint64)) *MockValidatorMonitorRemoveValidatorCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/cl/monitor/validator.go b/cl/monitor/validator.go new file mode 100644 index 00000000000..cfd3bb25d07 --- /dev/null +++ b/cl/monitor/validator.go @@ -0,0 +1,233 @@ +package monitor + +import ( + "sync" + "time" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/cl/beacon/synced_data" + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/cltypes" + "github.com/erigontech/erigon/cl/cltypes/solid" + "github.com/erigontech/erigon/cl/phase1/forkchoice" + "github.com/erigontech/erigon/cl/utils/eth_clock" +) + +type ValidatorMonitorImpl struct { + fc forkchoice.ForkChoiceStorageReader + syncedData *synced_data.SyncedDataManager + ethClock eth_clock.EthereumClock + beaconCfg *clparams.BeaconChainConfig + vaidatorStatuses *validatorStatuses // map validatorID -> epoch -> validatorStatus +} + +func NewValidatorMonitor( + enableMonitor bool, + fc forkchoice.ForkChoiceStorageReader, + ethClock eth_clock.EthereumClock, + beaconConfig *clparams.BeaconChainConfig, + syncedData *synced_data.SyncedDataManager, +) ValidatorMonitor { + if !enableMonitor { + return &dummyValdatorMonitor{} + } + + m := &ValidatorMonitorImpl{ + fc: fc, + ethClock: ethClock, + beaconCfg: beaconConfig, + syncedData: syncedData, + vaidatorStatuses: newValidatorStatuses(), + } + go m.runReportAttesterStatus() + go m.runReportProposerStatus() + return m +} + +func (m *ValidatorMonitorImpl) ObserveValidator(vid uint64) { + m.vaidatorStatuses.addValidator(vid) +} + +func (m *ValidatorMonitorImpl) RemoveValidator(vid uint64) { + m.vaidatorStatuses.removeValidator(vid) +} + +func (m *ValidatorMonitorImpl) OnNewBlock(block *cltypes.BeaconBlock) error { + var ( + atts = block.Body.Attestations + blockEpoch = m.ethClock.GetEpochAtSlot(block.Slot) + currentEpoch = m.ethClock.GetCurrentEpoch() + ) + if blockEpoch+2 < currentEpoch { + // skip old blocks + return nil + } + + blockRoot, err := block.HashSSZ() + if err != nil { + log.Warn("failed to hash block", "err", err, "slot", block.Slot) + return err + } + + state, err := m.fc.GetStateAtBlockRoot(blockRoot, false) + if err != nil { + log.Warn("failed to get state at block root", "err", err, "slot", block.Slot, "blockRoot", blockRoot) + return err + } else if state == nil { + log.Info("state is nil. syncing", "slot", block.Slot, "blockRoot", blockRoot) + return nil + } + + // todo: maybe launch a goroutine to update attester status + // update attester status + atts.Range(func(i int, att *solid.Attestation, length int) bool { + indicies, err := state.GetAttestingIndicies(att.AttestantionData(), att.AggregationBits(), true) + if err != nil { + log.Warn("failed to get attesting indicies", "err", err, "slot", block.Slot, "stateRoot", block.StateRoot) + return false + } + slot := att.AttestantionData().Slot() + attEpoch := m.ethClock.GetEpochAtSlot(slot) + for _, vidx := range indicies { + status := m.vaidatorStatuses.getValidatorStatus(vidx, attEpoch) + if status == nil { + continue + } + status.updateAttesterStatus(att) + } + return true + }) + // update proposer status + pIndex := block.ProposerIndex + if status := m.vaidatorStatuses.getValidatorStatus(pIndex, blockEpoch); status != nil { + status.proposeSlots.Add(block.Slot) + } + + return nil +} + +func (m *ValidatorMonitorImpl) runReportAttesterStatus() { + // every epoch seconds + epochDuration := time.Duration(m.beaconCfg.SlotsPerEpoch) * time.Duration(m.beaconCfg.SecondsPerSlot) * time.Second + ticker := time.NewTicker(epochDuration) + for range ticker.C { + currentEpoch := m.ethClock.GetCurrentEpoch() + // report attester status for current_epoch - 2 + epoch := currentEpoch - 2 + hitCount := 0 + missCount := 0 + m.vaidatorStatuses.iterate(func(vindex uint64, epochStatuses map[uint64]*validatorStatus) { + if status, ok := epochStatuses[epoch]; ok { + successAtt := status.attestedBlockRoots.Cardinality() + metricAttestHit.AddInt(successAtt) + hitCount += successAtt + delete(epochStatuses, epoch) + log.Debug("[monitor] report attester status hit", "epoch", epoch, "vindex", vindex, "countAttestedBlock", status.attestedBlockRoots.Cardinality()) + } else { + metricAttestMiss.AddInt(1) + missCount++ + log.Debug("[monitor] report attester status miss", "epoch", epoch, "vindex", vindex, "countAttestedBlock", 0) + } + }) + log.Info("[monitor] report attester hit/miss", "epoch", epoch, "hitCount", hitCount, "missCount", missCount, "cur_epoch", currentEpoch) + } + +} + +func (m *ValidatorMonitorImpl) runReportProposerStatus() { + // check proposer in previous slot every slot duration + ticker := time.NewTicker(time.Duration(m.beaconCfg.SecondsPerSlot) * time.Second) + defer ticker.Stop() + for range ticker.C { + headState := m.syncedData.HeadStateReader() + if headState == nil { + continue + } + // check proposer in previous slot + prevSlot := m.ethClock.GetCurrentSlot() - 1 + proposerIndex, err := headState.GetBeaconProposerIndexForSlot(prevSlot) + if err != nil { + log.Warn("failed to get proposer index", "slot", prevSlot, "err", err) + return + } + if status := m.vaidatorStatuses.getValidatorStatus(proposerIndex, prevSlot/m.beaconCfg.SlotsPerEpoch); status != nil { + if status.proposeSlots.Contains(prevSlot) { + metricProposerHit.AddInt(1) + log.Info("[monitor] proposer hit", "slot", prevSlot, "proposerIndex", proposerIndex) + } else { + metricProposerMiss.AddInt(1) + log.Info("[monitor] proposer miss", "slot", prevSlot, "proposerIndex", proposerIndex) + } + } + } +} + +type validatorStatus struct { + // attestedBlockRoots is the set of block roots that the validator has successfully attested during one epoch. + attestedBlockRoots mapset.Set[common.Hash] + // proposeSlots is the set of slots that the proposer has successfully proposed blocks during one epoch. + proposeSlots mapset.Set[uint64] +} + +func (s *validatorStatus) updateAttesterStatus(att *solid.Attestation) { + data := att.AttestantionData() + s.attestedBlockRoots.Add(data.BeaconBlockRoot()) +} + +type validatorStatuses struct { + statuses map[uint64]map[uint64]*validatorStatus + vStatusMutex sync.RWMutex +} + +func newValidatorStatuses() *validatorStatuses { + return &validatorStatuses{ + statuses: make(map[uint64]map[uint64]*validatorStatus), + } +} + +// getValidatorStatus returns the validator status for the given validator index and epoch. +// returns nil if validator is not observed. +func (s *validatorStatuses) getValidatorStatus(vid uint64, epoch uint64) *validatorStatus { + s.vStatusMutex.Lock() + defer s.vStatusMutex.Unlock() + statusByEpoch, ok := s.statuses[vid] + if !ok { + return nil + } + if _, ok := statusByEpoch[epoch]; !ok { + statusByEpoch[epoch] = &validatorStatus{ + attestedBlockRoots: mapset.NewSet[common.Hash](), + proposeSlots: mapset.NewSet[uint64](), + } + } + + return statusByEpoch[epoch] +} + +func (s *validatorStatuses) addValidator(vid uint64) { + s.vStatusMutex.Lock() + defer s.vStatusMutex.Unlock() + if _, ok := s.statuses[vid]; !ok { + s.statuses[vid] = make(map[uint64]*validatorStatus) + log.Info("[monitor] add validator", "vid", vid) + } +} + +func (s *validatorStatuses) removeValidator(vid uint64) { + s.vStatusMutex.Lock() + defer s.vStatusMutex.Unlock() + if _, ok := s.statuses[vid]; ok { + delete(s.statuses, vid) + log.Info("[monitor] remove validator", "vid", vid) + } +} + +func (s *validatorStatuses) iterate(run func(vid uint64, statuses map[uint64]*validatorStatus)) { + s.vStatusMutex.Lock() + defer s.vStatusMutex.Unlock() + for vid, statuses := range s.statuses { + run(vid, statuses) + } +} diff --git a/cl/persistence/base_encoding/uint64_diff.go b/cl/persistence/base_encoding/uint64_diff.go index a3aaec64efe..bc920f7cd70 100644 --- a/cl/persistence/base_encoding/uint64_diff.go +++ b/cl/persistence/base_encoding/uint64_diff.go @@ -72,7 +72,7 @@ type repeatedPatternEntry struct { func ComputeCompressedSerializedUint64ListDiff(w io.Writer, old, new []byte) error { if len(old) > len(new) { - return fmt.Errorf("old list is longer than new list") + return errors.New("old list is longer than new list") } compressor := compressorPool.Get().(*zstd.Encoder) @@ -136,7 +136,7 @@ func ComputeCompressedSerializedUint64ListDiff(w io.Writer, old, new []byte) err func ComputeCompressedSerializedEffectiveBalancesDiff(w io.Writer, old, new []byte) error { if len(old) > len(new) { - return fmt.Errorf("old list is longer than new list") + return errors.New("old list is longer than new list") } compressor := compressorPool.Get().(*zstd.Encoder) @@ -264,7 +264,7 @@ func ApplyCompressedSerializedUint64ListDiff(in, out []byte, diff []byte, revers func ComputeCompressedSerializedValidatorSetListDiff(w io.Writer, old, new []byte) error { if len(old) > len(new) { - return fmt.Errorf("old list is longer than new list") + return errors.New("old list is longer than new list") } validatorLength := 121 diff --git a/cl/persistence/blob_storage/blob_db.go b/cl/persistence/blob_storage/blob_db.go index c3cc2593909..74ef592df38 100644 --- a/cl/persistence/blob_storage/blob_db.go +++ b/cl/persistence/blob_storage/blob_db.go @@ -236,7 +236,7 @@ func VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx context.Context, stor return 0, 0, nil } if len(sidecars) > identifiers.Len() { - return 0, 0, fmt.Errorf("sidecars length is greater than identifiers length") + return 0, 0, errors.New("sidecars length is greater than identifiers length") } prevBlockRoot := identifiers.Get(0).BlockRoot totalProcessed := 0 @@ -261,7 +261,7 @@ func VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx context.Context, stor } if !cltypes.VerifyCommitmentInclusionProof(sidecar.KzgCommitment, sidecar.CommitmentInclusionProof, sidecar.Index, clparams.DenebVersion, sidecar.SignedBlockHeader.Header.BodyRoot) { - return 0, 0, fmt.Errorf("could not verify blob's inclusion proof") + return 0, 0, errors.New("could not verify blob's inclusion proof") } if verifySignatureFn != nil { // verify the signature of the sidecar head, we leave this step up to the caller to define @@ -305,7 +305,7 @@ func VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx context.Context, stor kzgProofs[i] = gokzg4844.KZGProof(sidecar.KzgProof) } if err := kzgCtx.VerifyBlobKZGProofBatch(blobs, kzgCommitments, kzgProofs); err != nil { - errAtomic.Store(fmt.Errorf("sidecar is wrong")) + errAtomic.Store(errors.New("sidecar is wrong")) return } if err := storage.WriteBlobSidecars(ctx, sds.blockRoot, sds.sidecars); err != nil { diff --git a/cl/persistence/state/historical_states_reader/attesting_indicies.go b/cl/persistence/state/historical_states_reader/attesting_indicies.go index 1a63f09e121..4ac8ded610a 100644 --- a/cl/persistence/state/historical_states_reader/attesting_indicies.go +++ b/cl/persistence/state/historical_states_reader/attesting_indicies.go @@ -17,6 +17,7 @@ package historical_states_reader import ( + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -50,7 +51,7 @@ func (r *HistoricalStatesReader) attestingIndicies(attestation solid.Attestation bitIndex := i % 8 sliceIndex := i / 8 if sliceIndex >= len(aggregationBits) { - return nil, fmt.Errorf("GetAttestingIndicies: committee is too big") + return nil, errors.New("GetAttestingIndicies: committee is too big") } if (aggregationBits[sliceIndex] & (1 << bitIndex)) > 0 { attestingIndices = append(attestingIndices, member) diff --git a/cl/persistence/state/static_validator_table.go b/cl/persistence/state/static_validator_table.go index 4bc10abe985..2cdbbfdfc9a 100644 --- a/cl/persistence/state/static_validator_table.go +++ b/cl/persistence/state/static_validator_table.go @@ -17,7 +17,7 @@ package state_accessors import ( - "fmt" + "errors" "io" "sync" @@ -292,7 +292,7 @@ func (s *StaticValidatorTable) AddValidator(v solid.Validator, validatorIndex, s } s.validatorTable = append(s.validatorTable, NewStaticValidatorFromValidator(v, slot)) if validatorIndex != uint64(len(s.validatorTable))-1 { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } return nil } @@ -304,7 +304,7 @@ func (s *StaticValidatorTable) AddWithdrawalCredentials(validatorIndex, slot uin return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddWithdrawalCredentials(slot, withdrawalCredentials) return nil @@ -317,7 +317,7 @@ func (s *StaticValidatorTable) AddSlashed(validatorIndex, slot uint64, slashed b return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddSlashed(slot, slashed) return nil @@ -330,7 +330,7 @@ func (s *StaticValidatorTable) AddActivationEligibility(validatorIndex, slot uin return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddActivationEligibility(slot, activationEligibility) return nil @@ -343,7 +343,7 @@ func (s *StaticValidatorTable) AddActivationEpoch(validatorIndex, slot uint64, a return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddActivationEpoch(slot, activationEpoch) return nil @@ -356,7 +356,7 @@ func (s *StaticValidatorTable) AddExitEpoch(validatorIndex, slot uint64, exitEpo return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddExitEpoch(slot, exitEpoch) return nil @@ -369,7 +369,7 @@ func (s *StaticValidatorTable) AddWithdrawableEpoch(validatorIndex, slot uint64, return nil } if validatorIndex >= uint64(len(s.validatorTable)) { - return fmt.Errorf("validator index mismatch") + return errors.New("validator index mismatch") } s.validatorTable[validatorIndex].AddWithdrawableEpoch(slot, withdrawableEpoch) return nil diff --git a/cl/phase1/core/checkpoint.go b/cl/phase1/core/checkpoint.go deleted file mode 100644 index 1a52a6cd932..00000000000 --- a/cl/phase1/core/checkpoint.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package core - -import ( - "context" - "encoding/binary" - "fmt" - "io" - "net/http" - - "github.com/erigontech/erigon/cl/cltypes" - "github.com/erigontech/erigon/cl/phase1/core/state" - - libcommon "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon/cl/clparams" -) - -func extractSlotFromSerializedBeaconState(beaconState []byte) (uint64, error) { - if len(beaconState) < 48 { - return 0, fmt.Errorf("checkpoint sync read failed, too short") - } - return binary.LittleEndian.Uint64(beaconState[40:48]), nil -} - -func RetrieveBeaconState(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, net clparams.NetworkType) (*state.CachingBeaconState, error) { - uris := clparams.GetAllCheckpointSyncEndpoints(net) - if len(uris) == 0 { - return nil, fmt.Errorf("no uris for checkpoint sync") - } - - fetchBeaconState := func(uri string) (*state.CachingBeaconState, error) { - log.Info("[Checkpoint Sync] Requesting beacon state", "uri", uri) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", "application/octet-stream") - if err != nil { - return nil, fmt.Errorf("checkpoint sync request failed %s", err) - } - r, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer func() { - err = r.Body.Close() - }() - if r.StatusCode != http.StatusOK { - return nil, fmt.Errorf("checkpoint sync failed, bad status code %d", r.StatusCode) - } - marshaled, err := io.ReadAll(r.Body) - if err != nil { - return nil, fmt.Errorf("checkpoint sync read failed %s", err) - } - - epoch, err := extractSlotFromSerializedBeaconState(marshaled) - if err != nil { - return nil, fmt.Errorf("checkpoint sync read failed %s", err) - } - - beaconState := state.New(beaconConfig) - err = beaconState.DecodeSSZ(marshaled, int(beaconConfig.GetCurrentStateVersion(epoch))) - if err != nil { - return nil, fmt.Errorf("checkpoint sync decode failed %s", err) - } - return beaconState, nil - } - - // Try all uris until one succeeds - var err error - var beaconState *state.CachingBeaconState - for _, uri := range uris { - beaconState, err = fetchBeaconState(uri) - if err == nil { - return beaconState, nil - } - log.Warn("[Checkpoint Sync] Failed to fetch beacon state", "uri", uri, "err", err) - } - return nil, err -} - -func RetrieveBlock(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, uri string, expectedBlockRoot *libcommon.Hash) (*cltypes.SignedBeaconBlock, error) { - log.Debug("[Checkpoint Sync] Requesting beacon block", "uri", uri) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", "application/octet-stream") - if err != nil { - return nil, fmt.Errorf("checkpoint sync request failed %s", err) - } - r, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer func() { - err = r.Body.Close() - }() - if r.StatusCode != http.StatusOK { - return nil, fmt.Errorf("checkpoint sync failed, bad status code %d", r.StatusCode) - } - marshaled, err := io.ReadAll(r.Body) - if err != nil { - return nil, fmt.Errorf("checkpoint sync read failed %s", err) - } - if len(marshaled) < 108 { - return nil, fmt.Errorf("checkpoint sync read failed, too short") - } - currentSlot := binary.LittleEndian.Uint64(marshaled[100:108]) - v := beaconConfig.GetCurrentStateVersion(currentSlot / beaconConfig.SlotsPerEpoch) - - block := cltypes.NewSignedBeaconBlock(beaconConfig) - err = block.DecodeSSZ(marshaled, int(v)) - if err != nil { - return nil, fmt.Errorf("checkpoint sync decode failed %s", err) - } - if expectedBlockRoot != nil { - has, err := block.Block.HashSSZ() - if err != nil { - return nil, fmt.Errorf("checkpoint sync decode failed %s", err) - } - if has != *expectedBlockRoot { - return nil, fmt.Errorf("checkpoint sync decode failed, unexpected block root %s", has) - } - } - return block, nil -} diff --git a/cl/phase1/core/checkpoint_sync/checkpoint_sync_test.go b/cl/phase1/core/checkpoint_sync/checkpoint_sync_test.go new file mode 100644 index 00000000000..3341f7cafc7 --- /dev/null +++ b/cl/phase1/core/checkpoint_sync/checkpoint_sync_test.go @@ -0,0 +1,89 @@ +package checkpoint_sync + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/erigontech/erigon/cl/antiquary/tests" + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/cltypes" + "github.com/erigontech/erigon/cl/utils" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRemoteCheckpointSync(t *testing.T) { + _, st, _ := tests.GetPhase0Random() + rec := false + // Create a mock HTTP server + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + enc, err := st.EncodeSSZ(nil) + if err != nil { + http.Error(w, fmt.Sprintf("could not encode state: %s", err), http.StatusInternalServerError) + return + } + w.Write(enc) + rec = true + })) + defer mockServer.Close() + + clparams.ConfigurableCheckpointsURLs = []string{mockServer.URL} + syncer := NewRemoteCheckpointSync(&clparams.MainnetBeaconConfig, clparams.MainnetNetwork) + state, err := syncer.GetLatestBeaconState(context.Background()) + assert.True(t, rec) + require.NoError(t, err) + require.NotNil(t, state) + // Compare the roots of the states + haveRoot, err := st.HashSSZ() + require.NoError(t, err) + wantRoot, err := state.HashSSZ() + require.NoError(t, err) + + assert.Equal(t, haveRoot, wantRoot) +} + +func TestLocalCheckpointSyncFromFile(t *testing.T) { + _, st, _ := tests.GetPhase0Random() + f := afero.NewMemMapFs() + enc, err := st.EncodeSSZ(nil) + enc = utils.CompressSnappy(enc) + require.NoError(t, err) + require.NoError(t, afero.WriteFile(f, clparams.LatestStateFileName, enc, 0644)) + + genesisState, err := st.Copy() + require.NoError(t, err) + genesisState.AddEth1DataVote(cltypes.NewEth1Data()) // Add some data to the genesis state so that it is different from the state read from the file + + syncer := NewLocalCheckpointSyncer(genesisState, f) + state, err := syncer.GetLatestBeaconState(context.Background()) + require.NoError(t, err) + require.NotNil(t, state) + // Compare the roots of the states + haveRoot, err := st.HashSSZ() + require.NoError(t, err) + wantRoot, err := state.HashSSZ() + require.NoError(t, err) + + assert.Equal(t, haveRoot, wantRoot) +} + +func TestLocalCheckpointSyncFromGenesis(t *testing.T) { + _, st, _ := tests.GetPhase0Random() + f := afero.NewMemMapFs() + + syncer := NewLocalCheckpointSyncer(st, f) + state, err := syncer.GetLatestBeaconState(context.Background()) + require.NoError(t, err) + require.NotNil(t, state) + // Compare the roots of the states + haveRoot, err := st.HashSSZ() + require.NoError(t, err) + wantRoot, err := state.HashSSZ() + require.NoError(t, err) + + assert.Equal(t, haveRoot, wantRoot) +} diff --git a/cl/phase1/core/checkpoint_sync/interface.go b/cl/phase1/core/checkpoint_sync/interface.go new file mode 100644 index 00000000000..1422d0c23ea --- /dev/null +++ b/cl/phase1/core/checkpoint_sync/interface.go @@ -0,0 +1,11 @@ +package checkpoint_sync + +import ( + "context" + + "github.com/erigontech/erigon/cl/phase1/core/state" +) + +type CheckpointSyncer interface { + GetLatestBeaconState(ctx context.Context) (*state.CachingBeaconState, error) +} diff --git a/cl/phase1/core/checkpoint_sync/local_checkpoint_syncer.go b/cl/phase1/core/checkpoint_sync/local_checkpoint_syncer.go new file mode 100644 index 00000000000..c264fb7cd79 --- /dev/null +++ b/cl/phase1/core/checkpoint_sync/local_checkpoint_syncer.go @@ -0,0 +1,50 @@ +package checkpoint_sync + +import ( + "context" + "fmt" + + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/phase1/core/state" + "github.com/erigontech/erigon/cl/utils" + "github.com/spf13/afero" +) + +type LocalCheckpointSyncer struct { + genesisState *state.CachingBeaconState + dir afero.Fs +} + +// The local checkpoint syncer, loads a checkpoint from the local disk or uses the genesis state. +func NewLocalCheckpointSyncer(genesisState *state.CachingBeaconState, dir afero.Fs) CheckpointSyncer { + return &LocalCheckpointSyncer{ + genesisState: genesisState, + dir: dir, + } + +} + +func (l *LocalCheckpointSyncer) GetLatestBeaconState(ctx context.Context) (*state.CachingBeaconState, error) { + // Open file {latestStateSubDir}/{fileName} + snappyEncoded, err := afero.ReadFile(l.dir, clparams.LatestStateFileName) + if err != nil { + log.Warn("Could not read local state, starting sync from genesis.") + return l.genesisState.Copy() + } + decompressedSnappy, err := utils.DecompressSnappy(snappyEncoded) + if err != nil { + return nil, fmt.Errorf("local state is corrupt: %s", err) + } + + beaconCfg := l.genesisState.BeaconConfig() + bs := state.New(beaconCfg) + slot, err := extractSlotFromSerializedBeaconState(decompressedSnappy) + if err != nil { + return nil, fmt.Errorf("could not deserialize state slot: %s", err) + } + if err := bs.DecodeSSZ(decompressedSnappy, int(beaconCfg.GetCurrentStateVersion(slot/beaconCfg.SlotsPerEpoch))); err != nil { + return nil, fmt.Errorf("could not deserialize state: %s", err) + } + return bs, nil +} diff --git a/cl/phase1/core/checkpoint_sync/remote_checkpoint_sync.go b/cl/phase1/core/checkpoint_sync/remote_checkpoint_sync.go new file mode 100644 index 00000000000..cbb54c81c9f --- /dev/null +++ b/cl/phase1/core/checkpoint_sync/remote_checkpoint_sync.go @@ -0,0 +1,93 @@ +package checkpoint_sync + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "net/http" + + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/phase1/core/state" +) + +// RemoteCheckpointSync is a CheckpointSyncer that fetches the checkpoint state from a remote endpoint. +type RemoteCheckpointSync struct { + beaconConfig *clparams.BeaconChainConfig + net clparams.NetworkType +} + +func NewRemoteCheckpointSync(beaconConfig *clparams.BeaconChainConfig, net clparams.NetworkType) CheckpointSyncer { + return &RemoteCheckpointSync{ + beaconConfig: beaconConfig, + net: net, + } +} + +func extractSlotFromSerializedBeaconState(beaconState []byte) (uint64, error) { + if len(beaconState) < 48 { + return 0, errors.New("checkpoint sync read failed, too short") + } + return binary.LittleEndian.Uint64(beaconState[40:48]), nil +} + +func (r *RemoteCheckpointSync) GetLatestBeaconState(ctx context.Context) (*state.CachingBeaconState, error) { + uris := clparams.GetAllCheckpointSyncEndpoints(r.net) + if len(uris) == 0 { + return nil, errors.New("no uris for checkpoint sync") + } + + fetchBeaconState := func(uri string) (*state.CachingBeaconState, error) { + log.Info("[Checkpoint Sync] Requesting beacon state", "uri", uri) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", "application/octet-stream") + if err != nil { + return nil, fmt.Errorf("checkpoint sync request failed %s", err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer func() { + err = resp.Body.Close() + }() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("checkpoint sync failed, bad status code %d", resp.StatusCode) + } + marshaled, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("checkpoint sync read failed %s", err) + } + + epoch, err := extractSlotFromSerializedBeaconState(marshaled) + if err != nil { + return nil, fmt.Errorf("checkpoint sync read failed %s", err) + } + + beaconState := state.New(r.beaconConfig) + err = beaconState.DecodeSSZ(marshaled, int(r.beaconConfig.GetCurrentStateVersion(epoch))) + if err != nil { + return nil, fmt.Errorf("checkpoint sync decode failed %s", err) + } + return beaconState, nil + } + + // Try all uris until one succeeds + var err error + var beaconState *state.CachingBeaconState + for _, uri := range uris { + beaconState, err = fetchBeaconState(uri) + if err == nil { + return beaconState, nil + } + log.Warn("[Checkpoint Sync] Failed to fetch beacon state", "uri", uri, "err", err) + } + return nil, err + +} diff --git a/cl/phase1/core/checkpoint_sync/util.go b/cl/phase1/core/checkpoint_sync/util.go new file mode 100644 index 00000000000..c23f8424133 --- /dev/null +++ b/cl/phase1/core/checkpoint_sync/util.go @@ -0,0 +1,35 @@ +package checkpoint_sync + +import ( + "context" + + "github.com/erigontech/erigon-lib/common/datadir" + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/clparams/initial_state" + "github.com/erigontech/erigon/cl/phase1/core/state" + "github.com/spf13/afero" +) + +// ReadOrFetchLatestBeaconState reads the latest beacon state from disk or fetches it from the network. +func ReadOrFetchLatestBeaconState(ctx context.Context, dirs datadir.Dirs, beaconCfg *clparams.BeaconChainConfig, caplinConfig clparams.CaplinConfig) (*state.CachingBeaconState, error) { + var syncer CheckpointSyncer + remoteSync := !caplinConfig.DisabledCheckpointSync + + if !initial_state.IsGenesisStateSupported(caplinConfig.NetworkId) && !remoteSync { + log.Warn("Local checkpoint sync is not supported for this network, falling back to remote sync") + remoteSync = true + } + if remoteSync { + syncer = NewRemoteCheckpointSync(beaconCfg, caplinConfig.NetworkId) + } else { + aferoFs := afero.NewOsFs() + + genesisState, err := initial_state.GetGenesisState(caplinConfig.NetworkId) + if err != nil { + return nil, err + } + syncer = NewLocalCheckpointSyncer(genesisState, afero.NewBasePathFs(aferoFs, dirs.CaplinLatest)) + } + return syncer.GetLatestBeaconState(ctx) +} diff --git a/cl/phase1/core/rawdb/config.go b/cl/phase1/core/rawdb/config.go deleted file mode 100644 index 59634e0e51a..00000000000 --- a/cl/phase1/core/rawdb/config.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package rawdb - -import ( - "encoding/json" - "math" - - "github.com/erigontech/erigon-lib/kv" -) - -type BeaconDataConfig struct { - BackFillingAmount uint64 `json:"backFillingAmount"` // it is string to handle all/minimal. - SlotPerRestorePoint uint64 `json:"sprp"` // TODO -} - -var beaconDataKey = []byte("beaconData") - -// Configurations for beacon database config -var BeaconDataConfigurations map[string]*BeaconDataConfig = map[string]*BeaconDataConfig{ - "full": { - BackFillingAmount: math.MaxUint64, - SlotPerRestorePoint: 0, - }, - "minimal": { - BackFillingAmount: 500_000, - SlotPerRestorePoint: 0, - }, - "light": { - BackFillingAmount: 0, - SlotPerRestorePoint: 0, - }, -} - -func WriteBeaconDataConfig(tx kv.Putter, cfg *BeaconDataConfig) error { - var ( - data []byte - err error - ) - if data, err = json.Marshal(cfg); err != nil { - return err - } - return tx.Put(kv.DatabaseInfo, beaconDataKey, data) -} - -func ReadBeaconDataConfig(tx kv.Getter) (*BeaconDataConfig, error) { - var ( - data []byte - err error - cfg = &BeaconDataConfig{} - ) - if data, err = tx.GetOne(kv.DatabaseInfo, beaconDataKey); err != nil { - return nil, err - } - if len(data) == 0 { - return nil, nil - } - - if err = json.Unmarshal(data, &cfg); err != nil { - return nil, err - } - return cfg, nil -} diff --git a/cl/phase1/core/state/accessors.go b/cl/phase1/core/state/accessors.go index 47198f38fe4..95021de1484 100644 --- a/cl/phase1/core/state/accessors.go +++ b/cl/phase1/core/state/accessors.go @@ -18,6 +18,7 @@ package state import ( "encoding/binary" + "errors" "fmt" "github.com/Giulio2002/bls" @@ -128,7 +129,7 @@ func EligibleValidatorsIndicies(b abstract.BeaconState) (eligibleValidators []ui func IsValidIndexedAttestation(b abstract.BeaconStateBasic, att *cltypes.IndexedAttestation) (bool, error) { inds := att.AttestingIndices if inds.Length() == 0 || !solid.IsUint64SortedSet(inds) { - return false, fmt.Errorf("isValidIndexedAttestation: attesting indices are not sorted or are null") + return false, errors.New("isValidIndexedAttestation: attesting indices are not sorted or are null") } pks := make([][]byte, 0, inds.Length()) @@ -159,7 +160,7 @@ func IsValidIndexedAttestation(b abstract.BeaconStateBasic, att *cltypes.Indexed return false, fmt.Errorf("error while validating signature: %v", err) } if !valid { - return false, fmt.Errorf("invalid aggregate signature") + return false, errors.New("invalid aggregate signature") } return true, nil } @@ -174,7 +175,7 @@ func GetUnslashedParticipatingIndices(b abstract.BeaconState, flagIndex int, epo case PreviousEpoch(b): participation = b.EpochParticipation(false) default: - return nil, fmt.Errorf("getUnslashedParticipatingIndices: only epoch and previous epoch can be used") + return nil, errors.New("getUnslashedParticipatingIndices: only epoch and previous epoch can be used") } // Iterate over all validators and include the active ones that have flag_index enabled and are not slashed. b.ForEachValidator(func(validator solid.Validator, i, total int) bool { diff --git a/cl/phase1/core/state/cache_accessors.go b/cl/phase1/core/state/cache_accessors.go index 41fea3a699a..c152954292d 100644 --- a/cl/phase1/core/state/cache_accessors.go +++ b/cl/phase1/core/state/cache_accessors.go @@ -19,6 +19,7 @@ package state import ( "crypto/sha256" "encoding/binary" + "errors" "fmt" "math" @@ -198,7 +199,7 @@ func (b *CachingBeaconState) GetAttestationParticipationFlagIndicies( } // Matching roots if !data.Source().Equal(justifiedCheckpoint) && !skipAssert { - return nil, fmt.Errorf("GetAttestationParticipationFlagIndicies: source does not match") + return nil, errors.New("GetAttestationParticipationFlagIndicies: source does not match") } targetRoot, err := GetBlockRoot(b, data.Target().Epoch()) if err != nil { @@ -344,7 +345,7 @@ func (b *CachingBeaconState) GetAttestingIndicies( bitIndex := i % 8 sliceIndex := i / 8 if sliceIndex >= len(aggregationBits) { - return nil, fmt.Errorf("GetAttestingIndicies: committee is too big") + return nil, errors.New("GetAttestingIndicies: committee is too big") } if (aggregationBits[sliceIndex] & (1 << bitIndex)) > 0 { attestingIndices = append(attestingIndices, member) diff --git a/cl/phase1/core/state/cache_mutators.go b/cl/phase1/core/state/cache_mutators.go index 70bc41fbba2..351567a4315 100644 --- a/cl/phase1/core/state/cache_mutators.go +++ b/cl/phase1/core/state/cache_mutators.go @@ -17,6 +17,7 @@ package state import ( + "errors" "fmt" "github.com/erigontech/erigon-lib/common/math" @@ -120,7 +121,7 @@ func (b *CachingBeaconState) InitiateValidatorExit(index uint64) error { var overflow bool var newWithdrawableEpoch uint64 if newWithdrawableEpoch, overflow = math.SafeAdd(exitQueueEpoch, b.BeaconConfig().MinValidatorWithdrawabilityDelay); overflow { - return fmt.Errorf("withdrawable epoch is too big") + return errors.New("withdrawable epoch is too big") } b.SetExitEpochForValidatorAtIndex(int(index), exitQueueEpoch) b.SetWithdrawableEpochForValidatorAtIndex(int(index), newWithdrawableEpoch) diff --git a/cl/phase1/core/state/lru/lru.go b/cl/phase1/core/state/lru/lru.go index 8668f51c9d3..8110a3150b5 100644 --- a/cl/phase1/core/state/lru/lru.go +++ b/cl/phase1/core/state/lru/lru.go @@ -29,8 +29,9 @@ import ( // Cache is a wrapper around hashicorp lru but with metric for Get type Cache[K comparable, V any] struct { *lru.Cache[K, V] - metricName string + // metrics + metricHit, metricMiss metrics.Counter } func NewWithEvict[K comparable, V any](metricName string, size int, fn func(K, V)) (*Cache[K, V], error) { @@ -38,7 +39,12 @@ func NewWithEvict[K comparable, V any](metricName string, size int, fn func(K, V if err != nil { return nil, err } - return &Cache[K, V]{Cache: v, metricName: metricName}, nil + return &Cache[K, V]{ + Cache: v, + metricName: metricName, + metricHit: metrics.GetOrCreateCounter(fmt.Sprintf(`golang_lru_cache_hit{%s="%s"}`, "cache", metricName)), + metricMiss: metrics.GetOrCreateCounter(fmt.Sprintf(`golang_lru_cache_miss{%s="%s"}`, "cache", metricName)), + }, nil } func New[K comparable, V any](metricName string, size int) (*Cache[K, V], error) { @@ -46,15 +52,20 @@ func New[K comparable, V any](metricName string, size int) (*Cache[K, V], error) if err != nil { return nil, err } - return &Cache[K, V]{Cache: v, metricName: metricName}, nil + return &Cache[K, V]{ + Cache: v, + metricName: metricName, + metricHit: metrics.GetOrCreateCounter(fmt.Sprintf(`golang_lru_cache_hit{%s="%s"}`, "cache", metricName)), + metricMiss: metrics.GetOrCreateCounter(fmt.Sprintf(`golang_lru_cache_miss{%s="%s"}`, "cache", metricName)), + }, nil } func (c *Cache[K, V]) Get(k K) (V, bool) { v, ok := c.Cache.Get(k) if ok { - metrics.GetOrCreateCounter(fmt.Sprintf(`golang_lru_cache_hit{%s="%s"}`, "cache", c.metricName)).Inc() + c.metricHit.Inc() } else { - metrics.GetOrCreateCounter(fmt.Sprintf(`golang_lru_cache_miss{%s="%s"}`, "cache", c.metricName)).Inc() + c.metricMiss.Inc() } return v, ok } @@ -62,19 +73,26 @@ func (c *Cache[K, V]) Get(k K) (V, bool) { type CacheWithTTL[K comparable, V any] struct { *expirable.LRU[K, V] metric string + // metrics + metricTTLHit, metricTTLMiss metrics.Counter } func NewWithTTL[K comparable, V any](metricName string, size int, ttl time.Duration) *CacheWithTTL[K, V] { cache := expirable.NewLRU[K, V](size, nil, ttl) - return &CacheWithTTL[K, V]{LRU: cache, metric: metricName} + return &CacheWithTTL[K, V]{ + LRU: cache, + metric: metricName, + metricTTLHit: metrics.GetOrCreateCounter(fmt.Sprintf(`golang_ttl_lru_cache_hit{%s="%s"}`, "cache", metricName)), + metricTTLMiss: metrics.GetOrCreateCounter(fmt.Sprintf(`golang_ttl_lru_cache_miss{%s="%s"}`, "cache", metricName)), + } } func (c *CacheWithTTL[K, V]) Get(k K) (V, bool) { v, ok := c.LRU.Get(k) if ok { - metrics.GetOrCreateCounter(fmt.Sprintf(`golang_ttl_lru_cache_hit{%s="%s"}`, "cache", c.metric)).Inc() + c.metricTTLHit.Inc() } else { - metrics.GetOrCreateCounter(fmt.Sprintf(`golang_ttl_lru_cache_miss{%s="%s"}`, "cache", c.metric)).Inc() + c.metricTTLMiss.Inc() } return v, ok } diff --git a/cl/phase1/core/state/raw/getters.go b/cl/phase1/core/state/raw/getters.go index d2d858e8a06..34df2b6a5de 100644 --- a/cl/phase1/core/state/raw/getters.go +++ b/cl/phase1/core/state/raw/getters.go @@ -331,7 +331,7 @@ func (b *BeaconState) GetBlockRootAtSlot(slot uint64) (libcommon.Hash, error) { return libcommon.Hash{}, ErrGetBlockRootAtSlotFuture } if b.Slot() > slot+b.BeaconConfig().SlotsPerHistoricalRoot { - return libcommon.Hash{}, fmt.Errorf("GetBlockRootAtSlot: slot too much far behind") + return libcommon.Hash{}, errors.New("GetBlockRootAtSlot: slot too much far behind") } return b.blockRoots.Get(int(slot % b.BeaconConfig().SlotsPerHistoricalRoot)), nil } diff --git a/cl/phase1/core/state/raw/hashing.go b/cl/phase1/core/state/raw/hashing.go index 0728cfff67b..746fa957ff2 100644 --- a/cl/phase1/core/state/raw/hashing.go +++ b/cl/phase1/core/state/raw/hashing.go @@ -17,11 +17,11 @@ package raw import ( - "time" + "sync" "github.com/erigontech/erigon-lib/common" libcommon "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon-lib/types/ssz" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/merkle_tree" ) @@ -32,7 +32,6 @@ func (b *BeaconState) HashSSZ() (out [32]byte, err error) { if err = b.computeDirtyLeaves(); err != nil { return [32]byte{}, err } - // for i := 0; i < len(b.leaves); i += 32 { // fmt.Println(i/32, libcommon.BytesToHash(b.leaves[i:i+32])) // } @@ -88,278 +87,115 @@ func preparateRootsForHashing(roots []common.Hash) [][32]byte { return ret } -func (b *BeaconState) computeDirtyLeaves() error { - // Update all dirty leafs - // ---- - // Field(0): GenesisTime - if b.isLeafDirty(GenesisTimeLeafIndex) { - b.updateLeaf(GenesisTimeLeafIndex, merkle_tree.Uint64Root(b.genesisTime)) - } - - // Field(1): GenesisValidatorsRoot - if b.isLeafDirty(GenesisValidatorsRootLeafIndex) { - b.updateLeaf(GenesisValidatorsRootLeafIndex, b.genesisValidatorsRoot) - } - - // Field(2): Slot - if b.isLeafDirty(SlotLeafIndex) { - b.updateLeaf(SlotLeafIndex, merkle_tree.Uint64Root(b.slot)) - } - - // Field(3): Fork - if b.isLeafDirty(ForkLeafIndex) { - forkRoot, err := b.fork.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(ForkLeafIndex, forkRoot) - } - - // Field(4): LatestBlockHeader - if b.isLeafDirty(LatestBlockHeaderLeafIndex) { - headerRoot, err := b.latestBlockHeader.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(LatestBlockHeaderLeafIndex, headerRoot) - } - - // Field(5): BlockRoots - if b.isLeafDirty(BlockRootsLeafIndex) { - root, err := b.blockRoots.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(BlockRootsLeafIndex, root) - } - - // Field(6): StateRoots - if b.isLeafDirty(StateRootsLeafIndex) { - root, err := b.stateRoots.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(StateRootsLeafIndex, root) - } - - begin := time.Now() - - // Field(7): HistoricalRoots - if b.isLeafDirty(HistoricalRootsLeafIndex) { - root, err := b.historicalRoots.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(HistoricalRootsLeafIndex, root) - } - log.Trace("HistoricalRoots hashing", "elapsed", time.Since(begin)) - - // Field(8): Eth1Data - if b.isLeafDirty(Eth1DataLeafIndex) { - dataRoot, err := b.eth1Data.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(Eth1DataLeafIndex, dataRoot) - } - - // Field(9): Eth1DataVotes - if b.isLeafDirty(Eth1DataVotesLeafIndex) { - root, err := b.eth1DataVotes.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(Eth1DataVotesLeafIndex, root) - } - - // Field(10): Eth1DepositIndex - if b.isLeafDirty(Eth1DepositIndexLeafIndex) { - b.updateLeaf(Eth1DepositIndexLeafIndex, merkle_tree.Uint64Root(b.eth1DepositIndex)) - } - - begin = time.Now() - - // Field(11): Validators - if b.isLeafDirty(ValidatorsLeafIndex) { - root, err := b.validators.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(ValidatorsLeafIndex, root) - - } - log.Trace("ValidatorSet hashing", "elapsed", time.Since(begin)) - - begin = time.Now() - // Field(12): Balances - if b.isLeafDirty(BalancesLeafIndex) { - root, err := b.balances.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(BalancesLeafIndex, root) - } - log.Trace("Balances hashing", "elapsed", time.Since(begin)) +type beaconStateHasher struct { + b *BeaconState + jobs map[StateLeafIndex]any + results sync.Map +} - begin = time.Now() - // Field(13): RandaoMixes - if b.isLeafDirty(RandaoMixesLeafIndex) { - root, err := b.randaoMixes.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(RandaoMixesLeafIndex, root) - } - log.Trace("RandaoMixes hashing", "elapsed", time.Since(begin)) +func (p *beaconStateHasher) run() { + wg := sync.WaitGroup{} + if p.jobs == nil { + p.jobs = make(map[StateLeafIndex]any) + } + + for idx, job := range p.jobs { + wg.Add(1) + go func(idx StateLeafIndex, job any) { + defer wg.Done() + switch obj := job.(type) { + case ssz.HashableSSZ: + root, err := obj.HashSSZ() + if err != nil { + panic(err) + } + p.results.Store(idx, root) + case uint64: + p.results.Store(idx, [32]byte(merkle_tree.Uint64Root(obj))) + case libcommon.Hash: + p.results.Store(idx, [32]byte(obj)) + } + + }(idx, job) + } + wg.Wait() + p.results.Range(func(key, value any) bool { + idx := key.(StateLeafIndex) + root := value.([32]byte) + p.b.updateLeaf(idx, root) + return true + }) +} - begin = time.Now() - // Field(14): Slashings - if b.isLeafDirty(SlashingsLeafIndex) { - root, err := b.slashings.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(SlashingsLeafIndex, root) +func (p *beaconStateHasher) add(idx StateLeafIndex, job any) { + if !p.b.isLeafDirty(idx) { + return } - log.Trace("Slashings hashing", "elapsed", time.Since(begin)) - // Field(15) and Field(16) are special due to the fact that they have different format in Phase0. - begin = time.Now() - // Field(15): PreviousEpochParticipation - if b.isLeafDirty(PreviousEpochParticipationLeafIndex) { - var root libcommon.Hash - var err error - if b.version == clparams.Phase0Version { - root, err = b.previousEpochAttestations.HashSSZ() - } else { - root, err = b.previousEpochParticipation.HashSSZ() - } - if err != nil { - return err - } - - b.updateLeaf(PreviousEpochParticipationLeafIndex, root) + if p.jobs == nil { + p.jobs = make(map[StateLeafIndex]any) } - log.Trace("PreviousEpochParticipation hashing", "elapsed", time.Since(begin)) - - begin = time.Now() + p.jobs[idx] = job +} - // Field(16): CurrentEpochParticipation - if b.isLeafDirty(CurrentEpochParticipationLeafIndex) { - var root libcommon.Hash - var err error - if b.version == clparams.Phase0Version { - root, err = b.currentEpochAttestations.HashSSZ() - } else { - root, err = b.currentEpochParticipation.HashSSZ() - } - if err != nil { - return err - } - b.updateLeaf(CurrentEpochParticipationLeafIndex, root) +func (b *BeaconState) computeDirtyLeaves() error { + beaconStateHasher := &beaconStateHasher{b: b} + // Update all dirty leafs. + beaconStateHasher.add(GenesisTimeLeafIndex, b.genesisTime) + beaconStateHasher.add(GenesisValidatorsRootLeafIndex, b.genesisValidatorsRoot) + beaconStateHasher.add(SlotLeafIndex, b.slot) + beaconStateHasher.add(ForkLeafIndex, b.fork) + beaconStateHasher.add(LatestBlockHeaderLeafIndex, b.latestBlockHeader) + beaconStateHasher.add(BlockRootsLeafIndex, b.blockRoots) + beaconStateHasher.add(StateRootsLeafIndex, b.stateRoots) + beaconStateHasher.add(HistoricalRootsLeafIndex, b.historicalRoots) + beaconStateHasher.add(Eth1DataLeafIndex, b.eth1Data) + beaconStateHasher.add(Eth1DataVotesLeafIndex, b.eth1DataVotes) + beaconStateHasher.add(Eth1DepositIndexLeafIndex, b.eth1DepositIndex) + beaconStateHasher.add(ValidatorsLeafIndex, b.validators) + beaconStateHasher.add(BalancesLeafIndex, b.balances) + beaconStateHasher.add(RandaoMixesLeafIndex, b.randaoMixes) + beaconStateHasher.add(SlashingsLeafIndex, b.slashings) + // Special case for Participation, if phase0 use attestation format, otherwise use bitlist format. + if b.version == clparams.Phase0Version { + beaconStateHasher.add(PreviousEpochParticipationLeafIndex, b.previousEpochAttestations) + beaconStateHasher.add(CurrentEpochParticipationLeafIndex, b.currentEpochAttestations) + } else { + beaconStateHasher.add(PreviousEpochParticipationLeafIndex, b.previousEpochParticipation) + beaconStateHasher.add(CurrentEpochParticipationLeafIndex, b.currentEpochParticipation) } - log.Trace("CurrentEpochParticipation hashing", "elapsed", time.Since(begin)) // Field(17): JustificationBits - if b.isLeafDirty(JustificationBitsLeafIndex) { - root, _ := b.justificationBits.HashSSZ() - b.updateLeaf(JustificationBitsLeafIndex, root) - } - - // Field(18): PreviousJustifiedCheckpoint - if b.isLeafDirty(PreviousJustifiedCheckpointLeafIndex) { - checkpointRoot, err := b.previousJustifiedCheckpoint.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(PreviousJustifiedCheckpointLeafIndex, checkpointRoot) - } - - // Field(19): CurrentJustifiedCheckpoint - if b.isLeafDirty(CurrentJustifiedCheckpointLeafIndex) { - checkpointRoot, err := b.currentJustifiedCheckpoint.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(CurrentJustifiedCheckpointLeafIndex, checkpointRoot) - } + root, _ := b.justificationBits.HashSSZ() + b.updateLeaf(JustificationBitsLeafIndex, root) - // Field(20): FinalizedCheckpoint - if b.isLeafDirty(FinalizedCheckpointLeafIndex) { - checkpointRoot, err := b.finalizedCheckpoint.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(FinalizedCheckpointLeafIndex, checkpointRoot) - } + beaconStateHasher.add(PreviousJustifiedCheckpointLeafIndex, b.previousJustifiedCheckpoint) + beaconStateHasher.add(CurrentJustifiedCheckpointLeafIndex, b.currentJustifiedCheckpoint) + beaconStateHasher.add(FinalizedCheckpointLeafIndex, b.finalizedCheckpoint) if b.version == clparams.Phase0Version { + beaconStateHasher.run() return nil } - begin = time.Now() - // Field(21): Inactivity Scores - if b.isLeafDirty(InactivityScoresLeafIndex) { - root, err := b.inactivityScores.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(InactivityScoresLeafIndex, root) - } - log.Trace("InactivityScores hashing", "elapsed", time.Since(begin)) - - // Field(22): CurrentSyncCommitte - if b.isLeafDirty(CurrentSyncCommitteeLeafIndex) { - committeeRoot, err := b.currentSyncCommittee.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(CurrentSyncCommitteeLeafIndex, committeeRoot) - } - - // Field(23): NextSyncCommitte - if b.isLeafDirty(NextSyncCommitteeLeafIndex) { - committeeRoot, err := b.nextSyncCommittee.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(NextSyncCommitteeLeafIndex, committeeRoot) - } - + // Altair fields + beaconStateHasher.add(InactivityScoresLeafIndex, b.inactivityScores) + beaconStateHasher.add(CurrentSyncCommitteeLeafIndex, b.currentSyncCommittee) + beaconStateHasher.add(NextSyncCommitteeLeafIndex, b.nextSyncCommittee) if b.version < clparams.BellatrixVersion { + beaconStateHasher.run() return nil } - // Field(24): LatestExecutionPayloadHeader - if b.isLeafDirty(LatestExecutionPayloadHeaderLeafIndex) { - headerRoot, err := b.latestExecutionPayloadHeader.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(LatestExecutionPayloadHeaderLeafIndex, headerRoot) - } - + // Bellatrix fields + beaconStateHasher.add(LatestExecutionPayloadHeaderLeafIndex, b.latestExecutionPayloadHeader) if b.version < clparams.CapellaVersion { + beaconStateHasher.run() return nil } + // Capella fields + beaconStateHasher.add(NextWithdrawalIndexLeafIndex, b.nextWithdrawalIndex) + beaconStateHasher.add(NextWithdrawalValidatorIndexLeafIndex, b.nextWithdrawalValidatorIndex) + beaconStateHasher.add(HistoricalSummariesLeafIndex, b.historicalSummaries) - // Field(25): NextWithdrawalIndex - if b.isLeafDirty(NextWithdrawalIndexLeafIndex) { - b.updateLeaf(NextWithdrawalIndexLeafIndex, merkle_tree.Uint64Root(b.nextWithdrawalIndex)) - } - - // Field(26): NextWithdrawalValidatorIndex - if b.isLeafDirty(NextWithdrawalValidatorIndexLeafIndex) { - b.updateLeaf(NextWithdrawalValidatorIndexLeafIndex, merkle_tree.Uint64Root(b.nextWithdrawalValidatorIndex)) - } - - begin = time.Now() - // Field(27): HistoricalSummaries - if b.isLeafDirty(HistoricalSummariesLeafIndex) { - root, err := b.historicalSummaries.HashSSZ() - if err != nil { - return err - } - b.updateLeaf(HistoricalSummariesLeafIndex, root) - } - log.Trace("HistoricalSummaries hashing", "elapsed", time.Since(begin)) + beaconStateHasher.run() return nil } diff --git a/cl/phase1/execution_client/execution_client_direct.go b/cl/phase1/execution_client/execution_client_direct.go index 58eab549fd0..0881d14c89e 100644 --- a/cl/phase1/execution_client/execution_client_direct.go +++ b/cl/phase1/execution_client/execution_client_direct.go @@ -19,6 +19,7 @@ package execution_client import ( "context" "encoding/binary" + "errors" "fmt" "math/big" @@ -75,13 +76,13 @@ func (cc *ExecutionClientDirect) NewPayload(ctx context.Context, payload *cltype // check status switch status { case execution.ExecutionStatus_BadBlock, execution.ExecutionStatus_InvalidForkchoice: - return PayloadStatusInvalidated, fmt.Errorf("bad block") + return PayloadStatusInvalidated, errors.New("bad block") case execution.ExecutionStatus_Busy, execution.ExecutionStatus_MissingSegment, execution.ExecutionStatus_TooFarAway: return PayloadStatusNotValidated, nil case execution.ExecutionStatus_Success: return PayloadStatusValidated, nil } - return PayloadStatusNone, fmt.Errorf("unexpected status") + return PayloadStatusNone, errors.New("unexpected status") } func (cc *ExecutionClientDirect) ForkChoiceUpdate(ctx context.Context, finalized libcommon.Hash, head libcommon.Hash, attr *engine_types.PayloadAttributes) ([]byte, error) { @@ -90,10 +91,10 @@ func (cc *ExecutionClientDirect) ForkChoiceUpdate(ctx context.Context, finalized return nil, fmt.Errorf("execution Client RPC failed to retrieve ForkChoiceUpdate response, err: %w", err) } if status == execution.ExecutionStatus_InvalidForkchoice { - return nil, fmt.Errorf("forkchoice was invalid") + return nil, errors.New("forkchoice was invalid") } if status == execution.ExecutionStatus_BadBlock { - return nil, fmt.Errorf("bad block as forkchoice") + return nil, errors.New("bad block as forkchoice") } if attr == nil { return nil, nil diff --git a/cl/phase1/execution_client/execution_client_rpc.go b/cl/phase1/execution_client/execution_client_rpc.go index 154b3b9ce0a..4983e7704ff 100644 --- a/cl/phase1/execution_client/execution_client_rpc.go +++ b/cl/phase1/execution_client/execution_client_rpc.go @@ -18,6 +18,7 @@ package execution_client import ( "context" + "errors" "fmt" "math/big" "net/http" @@ -89,7 +90,7 @@ func (cc *ExecutionClientRpc) NewPayload(ctx context.Context, payload *cltypes.E case clparams.DenebVersion: engineMethod = rpc_helper.EngineNewPayloadV3 default: - return PayloadStatusNone, fmt.Errorf("invalid payload version") + return PayloadStatusNone, errors.New("invalid payload version") } request := engine_types.ExecutionPayload{ @@ -174,7 +175,7 @@ func (cc *ExecutionClientRpc) ForkChoiceUpdate(ctx context.Context, finalized li func checkPayloadStatus(payloadStatus *engine_types.PayloadStatus) error { if payloadStatus == nil { - return fmt.Errorf("empty payloadStatus") + return errors.New("empty payloadStatus") } validationError := payloadStatus.ValidationError diff --git a/cl/phase1/forkchoice/checkpoint_state.go b/cl/phase1/forkchoice/checkpoint_state.go index ecf9f147a61..f1c095d8bf0 100644 --- a/cl/phase1/forkchoice/checkpoint_state.go +++ b/cl/phase1/forkchoice/checkpoint_state.go @@ -17,6 +17,7 @@ package forkchoice import ( + "errors" "fmt" "github.com/erigontech/erigon/cl/cltypes/solid" @@ -135,7 +136,7 @@ func (c *checkpointState) getAttestingIndicies(attestation *solid.AttestationDat bitIndex := i % 8 sliceIndex := i / 8 if sliceIndex >= len(aggregationBits) { - return nil, fmt.Errorf("GetAttestingIndicies: committee is too big") + return nil, errors.New("GetAttestingIndicies: committee is too big") } if (aggregationBits[sliceIndex] & (1 << bitIndex)) > 0 { attestingIndices = append(attestingIndices, member) @@ -177,7 +178,7 @@ func (c *checkpointState) getDomain(domainType [4]byte, epoch uint64) ([]byte, e func (c *checkpointState) isValidIndexedAttestation(att *cltypes.IndexedAttestation) (bool, error) { inds := att.AttestingIndices if inds.Length() == 0 || !solid.IsUint64SortedSet(inds) { - return false, fmt.Errorf("isValidIndexedAttestation: attesting indices are not sorted or are null") + return false, errors.New("isValidIndexedAttestation: attesting indices are not sorted or are null") } pks := [][]byte{} @@ -206,7 +207,7 @@ func (c *checkpointState) isValidIndexedAttestation(att *cltypes.IndexedAttestat return false, fmt.Errorf("error while validating signature: %v", err) } if !valid { - return false, fmt.Errorf("invalid aggregate signature") + return false, errors.New("invalid aggregate signature") } return true, nil } diff --git a/cl/phase1/forkchoice/fork_choice_test.go b/cl/phase1/forkchoice/fork_choice_test.go index 0782d360222..e502063680a 100644 --- a/cl/phase1/forkchoice/fork_choice_test.go +++ b/cl/phase1/forkchoice/fork_choice_test.go @@ -77,7 +77,7 @@ func TestForkChoiceBasic(t *testing.T) { anchorState := state.New(&clparams.MainnetBeaconConfig) require.NoError(t, utils.DecodeSSZSnappy(anchorState, anchorStateEncoded, int(clparams.AltairVersion))) pool := pool.NewOperationsPool(&clparams.MainnetBeaconConfig) - emitters := beaconevents.NewEmitters() + emitters := beaconevents.NewEventEmitter() store, err := forkchoice.NewForkChoiceStore(nil, anchorState, nil, pool, fork_graph.NewForkGraphDisk(anchorState, afero.NewMemMapFs(), beacon_router_configuration.RouterConfiguration{}), emitters, sd, nil) require.NoError(t, err) // first steps @@ -142,7 +142,7 @@ func TestForkChoiceChainBellatrix(t *testing.T) { } // Initialize forkchoice store pool := pool.NewOperationsPool(&clparams.MainnetBeaconConfig) - emitters := beaconevents.NewEmitters() + emitters := beaconevents.NewEventEmitter() sd := synced_data.NewSyncedDataManager(true, &clparams.MainnetBeaconConfig) store, err := forkchoice.NewForkChoiceStore(nil, anchorState, nil, pool, fork_graph.NewForkGraphDisk(anchorState, afero.NewMemMapFs(), beacon_router_configuration.RouterConfiguration{ Beacon: true, diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go index 00ce755a447..2899deab2a6 100644 --- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go +++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go @@ -417,28 +417,24 @@ func (f *forkGraphDisk) MarkHeaderAsInvalid(blockRoot libcommon.Hash) { } func (f *forkGraphDisk) hasBeaconState(blockRoot libcommon.Hash) bool { - _, err := f.fs.Stat(getBeaconStateFilename(blockRoot)) - return err == nil + exists, err := afero.Exists(f.fs, getBeaconStateFilename(blockRoot)) + return err == nil && exists } func (f *forkGraphDisk) Prune(pruneSlot uint64) (err error) { - pruneSlot -= f.beaconCfg.SlotsPerEpoch * 2 oldRoots := make([]libcommon.Hash, 0, f.beaconCfg.SlotsPerEpoch) highestStoredBeaconStateSlot := uint64(0) f.blocks.Range(func(key, value interface{}) bool { hash := key.(libcommon.Hash) signedBlock := value.(*cltypes.SignedBeaconBlock) - if signedBlock.Block.Slot < highestStoredBeaconStateSlot { - return true - } - if f.hasBeaconState(hash) { + if f.hasBeaconState(hash) && highestStoredBeaconStateSlot < signedBlock.Block.Slot { highestStoredBeaconStateSlot = signedBlock.Block.Slot } if signedBlock.Block.Slot >= pruneSlot { return true } - oldRoots = append(oldRoots, hash) + oldRoots = append(oldRoots, hash) return true }) if pruneSlot >= highestStoredBeaconStateSlot { diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go index f700f1d5cdd..902426d7801 100644 --- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go +++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go @@ -84,7 +84,9 @@ func (f *forkGraphDisk) readBeaconStateFromDisk(blockRoot libcommon.Hash) (bs *s return nil, fmt.Errorf("failed to decode snappy buffer: %w, root: %x, len: %d, decLen: %d", err, blockRoot, n, decLen) } bs = state.New(f.beaconCfg) - err = bs.DecodeSSZ(sszBuffer, int(v[0])) + if err = bs.DecodeSSZ(sszBuffer, int(v[0])); err != nil { + return nil, fmt.Errorf("failed to decode beacon state: %w, root: %x, len: %d, decLen: %d, bs: %+v", err, blockRoot, n, decLen, bs) + } // decode the cache file cacheFile, err := f.fs.Open(getBeaconStateCacheFilename(blockRoot)) if err != nil { diff --git a/cl/phase1/forkchoice/forkchoice.go b/cl/phase1/forkchoice/forkchoice.go index 1dfd35949ca..d8460c1767e 100644 --- a/cl/phase1/forkchoice/forkchoice.go +++ b/cl/phase1/forkchoice/forkchoice.go @@ -131,7 +131,7 @@ type ForkChoiceStore struct { operationsPool pool.OperationsPool beaconCfg *clparams.BeaconChainConfig - emitters *beaconevents.Emitters + emitters *beaconevents.EventEmitter synced atomic.Bool ethClock eth_clock.EthereumClock @@ -155,7 +155,7 @@ func NewForkChoiceStore( engine execution_client.ExecutionEngine, operationsPool pool.OperationsPool, forkGraph fork_graph.ForkGraph, - emitters *beaconevents.Emitters, + emitters *beaconevents.EventEmitter, syncedDataManager *synced_data.SyncedDataManager, blobStorage blob_storage.BlobStorage, ) (*ForkChoiceStore, error) { diff --git a/cl/phase1/forkchoice/get_head.go b/cl/phase1/forkchoice/get_head.go index 793aacd239b..e002098a238 100644 --- a/cl/phase1/forkchoice/get_head.go +++ b/cl/phase1/forkchoice/get_head.go @@ -18,7 +18,7 @@ package forkchoice import ( "bytes" - "fmt" + "errors" "sort" libcommon "github.com/erigontech/erigon-lib/common" @@ -104,7 +104,7 @@ func (f *ForkChoiceStore) GetHead() (libcommon.Hash, uint64, error) { if len(children) == 0 { header, hasHeader := f.forkGraph.GetHeader(f.headHash) if !hasHeader { - return libcommon.Hash{}, 0, fmt.Errorf("no slot for head is stored") + return libcommon.Hash{}, 0, errors.New("no slot for head is stored") } f.headSlot = header.Slot return f.headHash, f.headSlot, nil diff --git a/cl/phase1/forkchoice/on_attestation.go b/cl/phase1/forkchoice/on_attestation.go index 036c5445bb3..d959200b7d3 100644 --- a/cl/phase1/forkchoice/on_attestation.go +++ b/cl/phase1/forkchoice/on_attestation.go @@ -17,7 +17,7 @@ package forkchoice import ( - "fmt" + "errors" "github.com/erigontech/erigon/cl/cltypes/solid" "github.com/erigontech/erigon/cl/phase1/core/state" @@ -26,7 +26,7 @@ import ( ) var ( - ErrIgnore = fmt.Errorf("ignore") + ErrIgnore = errors.New("ignore") ) // OnAttestation processes incoming attestations. @@ -101,7 +101,7 @@ func (f *ForkChoiceStore) verifyAttestationWithCheckpointState( } // Verify attestation signature. if targetState == nil { - return nil, fmt.Errorf("target state does not exist") + return nil, errors.New("target state does not exist") } // Now we need to find the attesting indicies. attestationIndicies, err = targetState.getAttestingIndicies( @@ -122,7 +122,7 @@ func (f *ForkChoiceStore) verifyAttestationWithCheckpointState( return nil, err } if !valid { - return nil, fmt.Errorf("invalid attestation") + return nil, errors.New("invalid attestation") } } return attestationIndicies, nil @@ -152,7 +152,7 @@ func (f *ForkChoiceStore) verifyAttestationWithState( return nil, err } if !valid { - return nil, fmt.Errorf("invalid attestation") + return nil, errors.New("invalid attestation") } } return attestationIndicies, nil @@ -227,23 +227,23 @@ func (f *ForkChoiceStore) ValidateOnAttestation(attestation *solid.Attestation) target := attestation.AttestantionData().Target() if target.Epoch() != f.computeEpochAtSlot(attestation.AttestantionData().Slot()) { - return fmt.Errorf("mismatching target epoch with slot data") + return errors.New("mismatching target epoch with slot data") } if _, has := f.forkGraph.GetHeader(target.BlockRoot()); !has { - return fmt.Errorf("target root is missing") + return errors.New("target root is missing") } if blockHeader, has := f.forkGraph.GetHeader(attestation.AttestantionData().BeaconBlockRoot()); !has || blockHeader.Slot > attestation.AttestantionData().Slot() { - return fmt.Errorf("bad attestation data") + return errors.New("bad attestation data") } // LMD vote must be consistent with FFG vote target targetSlot := f.computeStartSlotAtEpoch(target.Epoch()) ancestorRoot := f.Ancestor(attestation.AttestantionData().BeaconBlockRoot(), targetSlot) if ancestorRoot == (libcommon.Hash{}) { - return fmt.Errorf("could not retrieve ancestor") + return errors.New("could not retrieve ancestor") } if ancestorRoot != target.BlockRoot() { - return fmt.Errorf("ancestor root mismatches with target") + return errors.New("ancestor root mismatches with target") } return nil @@ -263,5 +263,5 @@ func (f *ForkChoiceStore) validateTargetEpochAgainstCurrentTime( if target.Epoch() == currentEpoch || target.Epoch() == previousEpoch { return nil } - return fmt.Errorf("verification of attestation against current time failed") + return errors.New("verification of attestation against current time failed") } diff --git a/cl/phase1/forkchoice/on_attester_slashing.go b/cl/phase1/forkchoice/on_attester_slashing.go index 5eb499aeee5..0959a36dbb1 100644 --- a/cl/phase1/forkchoice/on_attester_slashing.go +++ b/cl/phase1/forkchoice/on_attester_slashing.go @@ -17,6 +17,7 @@ package forkchoice import ( + "errors" "fmt" "github.com/Giulio2002/bls" @@ -38,7 +39,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS attestation1 := attesterSlashing.Attestation_1 attestation2 := attesterSlashing.Attestation_2 if !cltypes.IsSlashableAttestationData(attestation1.Data, attestation2.Data) { - return fmt.Errorf("attestation data is not slashable") + return errors.New("attestation data is not slashable") } var err error s := f.syncedDataManager.HeadState() @@ -50,7 +51,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS } } if s == nil { - return fmt.Errorf("no state accessible") + return errors.New("no state accessible") } attestation1PublicKeys, err := getIndexedAttestationPublicKeys(s, attestation1) if err != nil { @@ -81,7 +82,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS return fmt.Errorf("error while validating signature: %v", err) } if !valid { - return fmt.Errorf("invalid aggregate signature") + return errors.New("invalid aggregate signature") } // Verify validity of slashings (2) signingRoot, err = fork.ComputeSigningRoot(attestation2.Data, domain2) @@ -94,7 +95,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS return fmt.Errorf("error while validating signature: %v", err) } if !valid { - return fmt.Errorf("invalid aggregate signature") + return errors.New("invalid aggregate signature") } } @@ -113,6 +114,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS } if anySlashed { f.operationsPool.AttesterSlashingsPool.Insert(pool.ComputeKeyForAttesterSlashing(attesterSlashing), attesterSlashing) + f.emitters.Operation().SendAttesterSlashing(attesterSlashing) } return nil } @@ -120,7 +122,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS func getIndexedAttestationPublicKeys(b *state.CachingBeaconState, att *cltypes.IndexedAttestation) ([][]byte, error) { inds := att.AttestingIndices if inds.Length() == 0 || !solid.IsUint64SortedSet(inds) { - return nil, fmt.Errorf("isValidIndexedAttestation: attesting indices are not sorted or are null") + return nil, errors.New("isValidIndexedAttestation: attesting indices are not sorted or are null") } pks := make([][]byte, 0, inds.Length()) if err := solid.RangeErr[uint64](inds, func(_ int, v uint64, _ int) error { diff --git a/cl/phase1/forkchoice/on_block.go b/cl/phase1/forkchoice/on_block.go index b845e8c5522..881f1d5f000 100644 --- a/cl/phase1/forkchoice/on_block.go +++ b/cl/phase1/forkchoice/on_block.go @@ -18,6 +18,7 @@ package forkchoice import ( "context" + "errors" "fmt" "sort" "time" @@ -41,7 +42,7 @@ import ( const foreseenProposers = 16 -var ErrEIP4844DataNotAvailable = fmt.Errorf("EIP-4844 blob data is not available") +var ErrEIP4844DataNotAvailable = errors.New("EIP-4844 blob data is not available") func verifyKzgCommitmentsAgainstTransactions(cfg *clparams.BeaconChainConfig, block *cltypes.Eth1Block, kzgCommitments *solid.ListSSZ[*cltypes.KZGCommitment]) error { expectedBlobHashes := []common.Hash{} @@ -75,7 +76,7 @@ func (f *ForkChoiceStore) OnBlock(ctx context.Context, block *cltypes.SignedBeac return err } if f.Slot() < block.Block.Slot { - return fmt.Errorf("block is too early compared to current_slot") + return errors.New("block is too early compared to current_slot") } // Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) finalizedSlot := f.computeStartSlotAtEpoch(f.finalizedCheckpoint.Load().(solid.Checkpoint).Epoch()) @@ -129,7 +130,7 @@ func (f *ForkChoiceStore) OnBlock(ctx context.Context, block *cltypes.SignedBeac if err := f.optimisticStore.InvalidateBlock(block.Block); err != nil { return fmt.Errorf("failed to remove block from optimistic store: %v", err) } - return fmt.Errorf("block is invalid") + return errors.New("block is invalid") case execution_client.PayloadStatusValidated: log.Trace("OnBlock: block is validated", "block", libcommon.Hash(blockRoot)) // remove from optimistic candidate diff --git a/cl/phase1/forkchoice/utils.go b/cl/phase1/forkchoice/utils.go index 468d30b3df2..1836542ec38 100644 --- a/cl/phase1/forkchoice/utils.go +++ b/cl/phase1/forkchoice/utils.go @@ -17,8 +17,9 @@ package forkchoice import ( - "fmt" + "errors" + "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/transition" libcommon "github.com/erigontech/erigon-lib/common" @@ -38,9 +39,22 @@ func (f *ForkChoiceStore) updateCheckpoints(justifiedCheckpoint, finalizedCheckp f.justifiedCheckpoint.Store(justifiedCheckpoint) } if finalizedCheckpoint.Epoch() > f.finalizedCheckpoint.Load().(solid.Checkpoint).Epoch() { - f.emitters.Publish("finalized_checkpoint", finalizedCheckpoint) f.onNewFinalized(finalizedCheckpoint) f.finalizedCheckpoint.Store(finalizedCheckpoint) + + // prepare and send the finalized checkpoint event + blockRoot := finalizedCheckpoint.BlockRoot() + blockHeader, ok := f.forkGraph.GetHeader(blockRoot) + if !ok { + log.Warn("Finalized block header not found", "blockRoot", blockRoot) + return + } + f.emitters.State().SendFinalizedCheckpoint(&beaconevents.FinalizedCheckpointData{ + Block: finalizedCheckpoint.BlockRoot(), + Epoch: finalizedCheckpoint.Epoch(), + State: blockHeader.Root, + ExecutionOptimistic: false, + }) } } @@ -62,8 +76,8 @@ func (f *ForkChoiceStore) onNewFinalized(newFinalized solid.Checkpoint) { } return true }) - - f.forkGraph.Prune(newFinalized.Epoch() * f.beaconCfg.SlotsPerEpoch) + slotToPrune := ((newFinalized.Epoch() - 1) * f.beaconCfg.SlotsPerEpoch) - 1 + f.forkGraph.Prune(slotToPrune) } // updateCheckpoints updates the justified and finalized checkpoints if new checkpoints have higher epochs. @@ -124,7 +138,7 @@ func (f *ForkChoiceStore) getCheckpointState(checkpoint solid.Checkpoint) (*chec return nil, err } if baseState == nil { - return nil, fmt.Errorf("getCheckpointState: baseState not found in graph") + return nil, errors.New("getCheckpointState: baseState not found in graph") } // By default use the no change encoding to signal that there is no future epoch here. if baseState.Slot() < f.computeStartSlotAtEpoch(checkpoint.Epoch()) { diff --git a/cl/phase1/network/backward_beacon_downloader.go b/cl/phase1/network/backward_beacon_downloader.go index 81c18369d50..c990fc5a698 100644 --- a/cl/phase1/network/backward_beacon_downloader.go +++ b/cl/phase1/network/backward_beacon_downloader.go @@ -187,6 +187,10 @@ Loop: } // set expected root to the segment parent root b.expectedRoot = segment.Block.ParentRoot + if segment.Block.Slot == 0 { + b.finished.Store(true) + return nil + } b.slotToDownload.Store(segment.Block.Slot - 1) // update slot (might be inexact but whatever) } if !b.neverSkip { diff --git a/cl/phase1/network/beacon_downloader.go b/cl/phase1/network/beacon_downloader.go index 927b645305f..ce7b951cc89 100644 --- a/cl/phase1/network/beacon_downloader.go +++ b/cl/phase1/network/beacon_downloader.go @@ -21,7 +21,6 @@ import ( "sync/atomic" "time" - libcommon "github.com/erigontech/erigon-lib/common" "golang.org/x/net/context" "github.com/erigontech/erigon/cl/cltypes" @@ -32,18 +31,15 @@ import ( // Output: the new last new highest slot processed and an error possibly? type ProcessFn func( highestSlotProcessed uint64, - highestBlockRootProcessed libcommon.Hash, blocks []*cltypes.SignedBeaconBlock) ( newHighestSlotProcessed uint64, - newHighestBlockRootProcessed libcommon.Hash, err error) type ForwardBeaconDownloader struct { - ctx context.Context - highestSlotProcessed uint64 - highestBlockRootProcessed libcommon.Hash - rpc *rpc.BeaconRpcP2P - process ProcessFn + ctx context.Context + highestSlotProcessed uint64 + rpc *rpc.BeaconRpcP2P + process ProcessFn mu sync.Mutex } @@ -69,20 +65,6 @@ func (f *ForwardBeaconDownloader) SetHighestProcessedSlot(highestSlotProcessed u f.highestSlotProcessed = highestSlotProcessed } -// SetHighestProcessedRoot sets the highest processed block root so far. -func (f *ForwardBeaconDownloader) SetHighestProcessedRoot(root libcommon.Hash) { - f.mu.Lock() - defer f.mu.Unlock() - f.highestBlockRootProcessed = root -} - -// HighestProcessedRoot returns the highest processed block root so far. -func (f *ForwardBeaconDownloader) HighestProcessedRoot() libcommon.Hash { - f.mu.Lock() - defer f.mu.Unlock() - return f.highestBlockRootProcessed -} - type peerAndBlocks struct { peerId string blocks []*cltypes.SignedBeaconBlock @@ -133,17 +115,15 @@ Loop: f.mu.Lock() defer f.mu.Unlock() - var highestBlockRootProcessed libcommon.Hash var highestSlotProcessed uint64 var err error blocks := atomicResp.Load().(peerAndBlocks).blocks pid := atomicResp.Load().(peerAndBlocks).peerId - if highestSlotProcessed, highestBlockRootProcessed, err = f.process(f.highestSlotProcessed, f.highestBlockRootProcessed, blocks); err != nil { + if highestSlotProcessed, err = f.process(f.highestSlotProcessed, blocks); err != nil { f.rpc.BanPeer(pid) return } f.highestSlotProcessed = highestSlotProcessed - f.highestBlockRootProcessed = highestBlockRootProcessed } // GetHighestProcessedSlot retrieve the highest processed slot we accumulated. diff --git a/cl/phase1/network/gossip_manager.go b/cl/phase1/network/gossip_manager.go index 920e6ac93e6..ec69d41c2e3 100644 --- a/cl/phase1/network/gossip_manager.go +++ b/cl/phase1/network/gossip_manager.go @@ -50,7 +50,7 @@ type GossipManager struct { beaconConfig *clparams.BeaconChainConfig ethClock eth_clock.EthereumClock - emitters *beaconevents.Emitters + emitters *beaconevents.EventEmitter committeeSub *committee_subscription.CommitteeSubscribeMgmt // Services for processing messages from the network @@ -70,7 +70,7 @@ func NewGossipReceiver( forkChoice *forkchoice.ForkChoiceStore, beaconConfig *clparams.BeaconChainConfig, ethClock eth_clock.EthereumClock, - emitters *beaconevents.Emitters, + emitters *beaconevents.EventEmitter, comitteeSub *committee_subscription.CommitteeSubscribeMgmt, blockService services.BlockService, blobService services.BlobSidecarsService, diff --git a/cl/phase1/network/services/aggregate_and_proof_service.go b/cl/phase1/network/services/aggregate_and_proof_service.go index adaa543fbe4..74dde2aeee8 100644 --- a/cl/phase1/network/services/aggregate_and_proof_service.go +++ b/cl/phase1/network/services/aggregate_and_proof_service.go @@ -18,7 +18,7 @@ package services import ( "context" - "fmt" + "errors" "slices" "sync" "time" @@ -115,11 +115,11 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( // [REJECT] The committee index is within the expected range -- i.e. index < get_committee_count_per_slot(state, aggregate.data.target.epoch). committeeCountPerSlot := headState.CommitteeCount(target.Epoch()) if aggregateData.CommitteeIndex() >= committeeCountPerSlot { - return fmt.Errorf("invalid committee index in aggregate and proof") + return errors.New("invalid committee index in aggregate and proof") } // [REJECT] The aggregate attestation's epoch matches its target -- i.e. aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot) if aggregateData.Target().Epoch() != epoch { - return fmt.Errorf("invalid target epoch in aggregate and proof") + return errors.New("invalid target epoch in aggregate and proof") } committee, err := headState.GetBeaconCommitee(slot, committeeIndex) if err != nil { @@ -128,14 +128,14 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( // [REJECT] The aggregator's validator index is within the committee -- i.e. aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, index). if !slices.Contains(committee, aggregateAndProof.Message.AggregatorIndex) { - return fmt.Errorf("committee index not in committee") + return errors.New("committee index not in committee") } // [REJECT] The aggregate attestation's target block is an ancestor of the block named in the LMD vote -- i.e. get_checkpoint_block(store, aggregate.data.beacon_block_root, aggregate.data.target.epoch) == aggregate.data.target.root if a.forkchoiceStore.Ancestor( aggregateData.BeaconBlockRoot(), epoch*a.beaconCfg.SlotsPerEpoch, ) != target.BlockRoot() { - return fmt.Errorf("invalid target block") + return errors.New("invalid target block") } if a.test { return nil @@ -144,7 +144,7 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage( // [REJECT] aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot -- i.e. is_aggregator(state, aggregate.data.slot, index, aggregate_and_proof.selection_proof) returns True. if !state.IsAggregator(a.beaconCfg, uint64(len(committee)), committeeIndex, selectionProof) { log.Warn("receveived aggregate and proof from invalid aggregator") - return fmt.Errorf("invalid aggregate and proof") + return errors.New("invalid aggregate and proof") } attestingIndicies, err := headState.GetAttestingIndicies( aggregateAndProof.Message.Aggregate.AttestantionData(), @@ -183,7 +183,7 @@ func verifySignaturesOnAggregate( return err } if len(attestingIndicies) == 0 { - return fmt.Errorf("no attesting indicies") + return errors.New("no attesting indicies") } // [REJECT] The aggregate_and_proof.selection_proof is a valid signature of the aggregate.data.slot by the validator with index aggregate_and_proof.aggregator_index. if err := verifyAggregateAndProofSignature(s, aggregateAndProof.Message); err != nil { @@ -220,7 +220,7 @@ func verifyAggregateAndProofSignature( return err } if !valid { - return fmt.Errorf("invalid bls signature on aggregate and proof") + return errors.New("invalid bls signature on aggregate and proof") } return nil } @@ -246,7 +246,7 @@ func verifyAggregatorSignature( return err } if !valid { - return fmt.Errorf("invalid bls signature on aggregate and proof") + return errors.New("invalid bls signature on aggregate and proof") } return nil } @@ -266,7 +266,7 @@ func verifyAggregateMessageSignature( return err } if !valid { - return fmt.Errorf("invalid aggregate signature") + return errors.New("invalid aggregate signature") } return nil } diff --git a/cl/phase1/network/services/attestation_service.go b/cl/phase1/network/services/attestation_service.go index ad673507c51..5cdc5d56abb 100644 --- a/cl/phase1/network/services/attestation_service.go +++ b/cl/phase1/network/services/attestation_service.go @@ -26,6 +26,7 @@ import ( "github.com/Giulio2002/bls" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cl/aggregation" + "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes/solid" @@ -53,6 +54,7 @@ type attestationService struct { syncedDataManager synced_data.SyncedData beaconCfg *clparams.BeaconChainConfig netCfg *clparams.NetworkConfig + emitters *beaconevents.EventEmitter // validatorAttestationSeen maps from epoch to validator index. This is used to ignore duplicate validator attestations in the same epoch. validatorAttestationSeen *lru.CacheWithTTL[uint64, uint64] // validator index -> epoch attestationsToBeLaterProcessed sync.Map @@ -66,6 +68,7 @@ func NewAttestationService( syncedDataManager synced_data.SyncedData, beaconCfg *clparams.BeaconChainConfig, netCfg *clparams.NetworkConfig, + emitters *beaconevents.EventEmitter, ) AttestationService { epochDuration := time.Duration(beaconCfg.SlotsPerEpoch*beaconCfg.SecondsPerSlot) * time.Second a := &attestationService{ @@ -75,6 +78,7 @@ func NewAttestationService( syncedDataManager: syncedDataManager, beaconCfg: beaconCfg, netCfg: netCfg, + emitters: emitters, validatorAttestationSeen: lru.NewWithTTL[uint64, uint64]("validator_attestation_seen", validatorAttestationCacheSize, epochDuration), } go a.loop(ctx) @@ -101,7 +105,7 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, // [REJECT] The attestation is for the correct subnet -- i.e. compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, index) == subnet_id subnetId := computeSubnetForAttestation(committeeCount, slot, committeeIndex, s.beaconCfg.SlotsPerEpoch, s.netCfg.AttestationSubnetCount) if subnet == nil || subnetId != *subnet { - return fmt.Errorf("wrong subnet") + return errors.New("wrong subnet") } // [IGNORE] attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- // i.e. attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot (a client MAY queue future attestations for processing at the appropriate slot). @@ -111,7 +115,7 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, } // [REJECT] The attestation's epoch matches its target -- i.e. attestation.data.target.epoch == compute_epoch_at_slot(attestation.data.slot) if targetEpoch != slot/s.beaconCfg.SlotsPerEpoch { - return fmt.Errorf("epoch mismatch") + return errors.New("epoch mismatch") } // [REJECT] The number of aggregation bits matches the committee size -- i.e. len(aggregation_bits) == len(get_beacon_committee(state, attestation.data.slot, index)). beaconCommittee, err := s.forkchoiceStore.GetBeaconCommitee(slot, committeeIndex) @@ -144,14 +148,14 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, return ErrIgnore // Ignore if it is just an empty bitlist } if setBits != 1 { - return fmt.Errorf("attestation does not have exactly one participating validator") + return errors.New("attestation does not have exactly one participating validator") } // [IGNORE] There has been no other valid attestation seen on an attestation subnet that has an identical attestation.data.target.epoch and participating validator index. if err != nil { return err } if onBitIndex >= len(beaconCommittee) { - return fmt.Errorf("on bit index out of committee range") + return errors.New("on bit index out of committee range") } // mark the validator as seen vIndex := beaconCommittee[onBitIndex] @@ -179,7 +183,7 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, return err } else if !valid { log.Warn("lodestar: invalid signature", "signature", common.Bytes2Hex(signature[:]), "signningRoot", common.Bytes2Hex(signingRoot[:]), "pubKey", common.Bytes2Hex(pubKey[:])) - return fmt.Errorf("invalid signature") + return errors.New("invalid signature") } // [IGNORE] The block being voted for (attestation.data.beacon_block_root) has been seen (via both gossip and non-gossip sources) @@ -193,7 +197,7 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, // get_checkpoint_block(store, attestation.data.beacon_block_root, attestation.data.target.epoch) == attestation.data.target.root startSlotAtEpoch := targetEpoch * s.beaconCfg.SlotsPerEpoch if s.forkchoiceStore.Ancestor(root, startSlotAtEpoch) != att.AttestantionData().Target().BlockRoot() { - return fmt.Errorf("invalid target block") + return errors.New("invalid target block") } // [IGNORE] The current finalized_checkpoint is an ancestor of the block defined by attestation.data.beacon_block_root -- // i.e. get_checkpoint_block(store, attestation.data.beacon_block_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root @@ -206,7 +210,11 @@ func (s *attestationService) ProcessMessage(ctx context.Context, subnet *uint64, if errors.Is(err, aggregation.ErrIsSuperset) { return ErrIgnore } - return err + if err != nil { + return err + } + s.emitters.Operation().SendAttestation(att) + return nil } type attestationJob struct { diff --git a/cl/phase1/network/services/attestation_service_test.go b/cl/phase1/network/services/attestation_service_test.go index ec66058b109..fc169a15c1e 100644 --- a/cl/phase1/network/services/attestation_service_test.go +++ b/cl/phase1/network/services/attestation_service_test.go @@ -28,6 +28,7 @@ import ( "github.com/erigontech/erigon-lib/types/ssz" "github.com/erigontech/erigon/cl/abstract" mockState "github.com/erigontech/erigon/cl/abstract/mock_services" + "github.com/erigontech/erigon/cl/beacon/beaconevents" mockSync "github.com/erigontech/erigon/cl/beacon/synced_data/mock_services" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" @@ -73,11 +74,12 @@ func (t *attestationTestSuite) SetupTest() { t.ethClock = eth_clock.NewMockEthereumClock(t.gomockCtrl) t.beaconConfig = &clparams.BeaconChainConfig{SlotsPerEpoch: mockSlotsPerEpoch} netConfig := &clparams.NetworkConfig{} + emitters := beaconevents.NewEventEmitter() computeSigningRoot = func(obj ssz.HashableSSZ, domain []byte) ([32]byte, error) { return [32]byte{}, nil } blsVerify = func(sig []byte, msg []byte, pubKeys []byte) (bool, error) { return true, nil } ctx, cn := context.WithCancel(context.Background()) cn() - t.attService = NewAttestationService(ctx, t.mockForkChoice, t.committeeSubscibe, t.ethClock, t.syncedData, t.beaconConfig, netConfig) + t.attService = NewAttestationService(ctx, t.mockForkChoice, t.committeeSubscibe, t.ethClock, t.syncedData, t.beaconConfig, netConfig, emitters) } func (t *attestationTestSuite) TearDownTest() { diff --git a/cl/phase1/network/services/blob_sidecar_service.go b/cl/phase1/network/services/blob_sidecar_service.go index 56f14a434a8..c0647badb2b 100644 --- a/cl/phase1/network/services/blob_sidecar_service.go +++ b/cl/phase1/network/services/blob_sidecar_service.go @@ -18,6 +18,7 @@ package services import ( "context" + "errors" "fmt" "sync" "time" @@ -27,6 +28,7 @@ import ( "github.com/erigontech/erigon-lib/crypto/kzg" "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" @@ -41,9 +43,10 @@ type blobSidecarService struct { forkchoiceStore forkchoice.ForkChoiceStorage beaconCfg *clparams.BeaconChainConfig syncedDataManager *synced_data.SyncedDataManager + ethClock eth_clock.EthereumClock + emitters *beaconevents.EventEmitter blobSidecarsScheduledForLaterExecution sync.Map - ethClock eth_clock.EthereumClock test bool } @@ -59,6 +62,7 @@ func NewBlobSidecarService( forkchoiceStore forkchoice.ForkChoiceStorage, syncedDataManager *synced_data.SyncedDataManager, ethClock eth_clock.EthereumClock, + emitters *beaconevents.EventEmitter, test bool, ) BlobSidecarsService { b := &blobSidecarService{ @@ -67,6 +71,7 @@ func NewBlobSidecarService( syncedDataManager: syncedDataManager, test: test, ethClock: ethClock, + emitters: emitters, } go b.loop(ctx) return b @@ -86,7 +91,7 @@ func (b *blobSidecarService) ProcessMessage(ctx context.Context, subnetId *uint6 // [REJECT] The sidecar's index is consistent with MAX_BLOBS_PER_BLOCK -- i.e. blob_sidecar.index < MAX_BLOBS_PER_BLOCK. if msg.Index >= b.beaconCfg.MaxBlobsPerBlock { - return fmt.Errorf("blob index out of range") + return errors.New("blob index out of range") } sidecarSubnetIndex := msg.Index % b.beaconCfg.MaxBlobsPerBlock if sidecarSubnetIndex != *subnetId { @@ -122,7 +127,11 @@ func (b *blobSidecarService) ProcessMessage(ctx context.Context, subnetId *uint6 return ErrInvalidSidecarSlot } - return b.verifyAndStoreBlobSidecar(headState, msg) + if err := b.verifyAndStoreBlobSidecar(headState, msg); err != nil { + return err + } + b.emitters.Operation().SendBlobSidecar(msg) + return nil } func (b *blobSidecarService) verifyAndStoreBlobSidecar(headState *state.CachingBeaconState, msg *cltypes.BlobSidecar) error { @@ -148,7 +157,7 @@ func (b *blobSidecarService) verifyAndStoreBlobSidecar(headState *state.CachingB func (b *blobSidecarService) verifySidecarsSignature(headState *state.CachingBeaconState, header *cltypes.SignedBeaconBlockHeader) error { parentHeader, ok := b.forkchoiceStore.GetHeader(header.Header.ParentRoot) if !ok { - return fmt.Errorf("parent header not found") + return errors.New("parent header not found") } currentVersion := b.beaconCfg.GetCurrentStateVersion(parentHeader.Slot / b.beaconCfg.SlotsPerEpoch) forkVersion := b.beaconCfg.GetForkVersionByVersion(currentVersion) @@ -168,7 +177,7 @@ func (b *blobSidecarService) verifySidecarsSignature(headState *state.CachingBea return err } if !ok { - return fmt.Errorf("blob signature validation: signature not valid") + return errors.New("blob signature validation: signature not valid") } return nil } diff --git a/cl/phase1/network/services/blob_sidecar_service_test.go b/cl/phase1/network/services/blob_sidecar_service_test.go index 03090941857..9bee325fc13 100644 --- a/cl/phase1/network/services/blob_sidecar_service_test.go +++ b/cl/phase1/network/services/blob_sidecar_service_test.go @@ -26,6 +26,7 @@ import ( "github.com/erigontech/erigon-lib/common" libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" @@ -75,7 +76,8 @@ func setupBlobSidecarService(t *testing.T, ctrl *gomock.Controller, test bool) ( syncedDataManager := synced_data.NewSyncedDataManager(true, cfg) ethClock := eth_clock.NewMockEthereumClock(ctrl) forkchoiceMock := mock_services.NewForkChoiceStorageMock(t) - blockService := NewBlobSidecarService(ctx2, cfg, forkchoiceMock, syncedDataManager, ethClock, test) + emitters := beaconevents.NewEventEmitter() + blockService := NewBlobSidecarService(ctx2, cfg, forkchoiceMock, syncedDataManager, ethClock, emitters, test) return blockService, syncedDataManager, ethClock, forkchoiceMock } diff --git a/cl/phase1/network/services/block_service.go b/cl/phase1/network/services/block_service.go index e3c031e5929..9465c4d6705 100644 --- a/cl/phase1/network/services/block_service.go +++ b/cl/phase1/network/services/block_service.go @@ -18,10 +18,10 @@ package services import ( "context" - "strconv" "sync" "time" + libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cl/beacon/beaconevents" @@ -29,12 +29,11 @@ import ( "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" + "github.com/erigontech/erigon/cl/monitor" "github.com/erigontech/erigon/cl/persistence/beacon_indicies" "github.com/erigontech/erigon/cl/phase1/core/state/lru" "github.com/erigontech/erigon/cl/phase1/forkchoice" "github.com/erigontech/erigon/cl/utils/eth_clock" - - libcommon "github.com/erigontech/erigon-lib/common" ) type proposerIndexAndSlot struct { @@ -57,10 +56,11 @@ type blockService struct { seenBlocksCache *lru.Cache[proposerIndexAndSlot, struct{}] // blocks that should be scheduled for later execution (e.g missing blobs). - emitter *beaconevents.Emitters + emitter *beaconevents.EventEmitter blocksScheduledForLaterExecution sync.Map // store the block in db - db kv.RwDB + db kv.RwDB + validatorMonitor monitor.ValidatorMonitor } // NewBlockService creates a new block service @@ -71,20 +71,22 @@ func NewBlockService( syncedData *synced_data.SyncedDataManager, ethClock eth_clock.EthereumClock, beaconCfg *clparams.BeaconChainConfig, - emitter *beaconevents.Emitters, + emitter *beaconevents.EventEmitter, + validatorMonitor monitor.ValidatorMonitor, ) Service[*cltypes.SignedBeaconBlock] { seenBlocksCache, err := lru.New[proposerIndexAndSlot, struct{}]("seenblocks", seenBlockCacheSize) if err != nil { panic(err) } b := &blockService{ - forkchoiceStore: forkchoiceStore, - syncedData: syncedData, - ethClock: ethClock, - beaconCfg: beaconCfg, - seenBlocksCache: seenBlocksCache, - emitter: emitter, - db: db, + forkchoiceStore: forkchoiceStore, + syncedData: syncedData, + ethClock: ethClock, + beaconCfg: beaconCfg, + seenBlocksCache: seenBlocksCache, + emitter: emitter, + db: db, + validatorMonitor: validatorMonitor, } go b.loop(ctx) return b @@ -160,10 +162,10 @@ func (b *blockService) publishBlockEvent(block *cltypes.SignedBeaconBlock) { return } // publish block to event handler - b.emitter.Publish("block", map[string]any{ - "slot": strconv.Itoa(int(block.Block.Slot)), - "block": libcommon.Hash(blockRoot), - "execution_optimistic": false, + b.emitter.State().SendBlock(&beaconevents.BlockData{ + Slot: block.Block.Slot, + Block: libcommon.Hash(blockRoot), + ExecutionOptimistic: false, }) } @@ -193,10 +195,13 @@ func (b *blockService) processAndStoreBlock(ctx context.Context, block *cltypes. return err } go b.importBlockOperations(block) - return b.db.Update(ctx, func(tx kv.RwTx) error { + if err := b.db.Update(ctx, func(tx kv.RwTx) error { return beacon_indicies.WriteHighestFinalized(tx, b.forkchoiceStore.FinalizedSlot()) - }) - + }); err != nil { + return err + } + b.validatorMonitor.OnNewBlock(block.Block) + return nil } // importBlockOperations imports block operations in parallel diff --git a/cl/phase1/network/services/block_service_test.go b/cl/phase1/network/services/block_service_test.go index 772e8a85a57..0bfab13954a 100644 --- a/cl/phase1/network/services/block_service_test.go +++ b/cl/phase1/network/services/block_service_test.go @@ -29,18 +29,20 @@ import ( "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" + mockMonitor "github.com/erigontech/erigon/cl/monitor/mock_services" "github.com/erigontech/erigon/cl/phase1/forkchoice/mock_services" "github.com/erigontech/erigon/cl/utils/eth_clock" ) -func setupBlockService(t *testing.T, ctrl *gomock.Controller) (BlockService, *synced_data.SyncedDataManager, *eth_clock.MockEthereumClock, *mock_services.ForkChoiceStorageMock) { +func setupBlockService(t *testing.T, ctrl *gomock.Controller) (BlockService, *synced_data.SyncedDataManager, *eth_clock.MockEthereumClock, *mock_services.ForkChoiceStorageMock, *mockMonitor.MockValidatorMonitor) { db := memdb.NewTestDB(t) cfg := &clparams.MainnetBeaconConfig syncedDataManager := synced_data.NewSyncedDataManager(true, cfg) ethClock := eth_clock.NewMockEthereumClock(ctrl) forkchoiceMock := mock_services.NewForkChoiceStorageMock(t) - blockService := NewBlockService(context.Background(), db, forkchoiceMock, syncedDataManager, ethClock, cfg, nil) - return blockService, syncedDataManager, ethClock, forkchoiceMock + validatorMonitor := mockMonitor.NewMockValidatorMonitor(ctrl) + blockService := NewBlockService(context.Background(), db, forkchoiceMock, syncedDataManager, ethClock, cfg, nil, validatorMonitor) + return blockService, syncedDataManager, ethClock, forkchoiceMock, validatorMonitor } func TestBlockServiceUnsynced(t *testing.T) { @@ -49,7 +51,7 @@ func TestBlockServiceUnsynced(t *testing.T) { blocks, _, _ := tests.GetBellatrixRandom() - blockService, _, _, _ := setupBlockService(t, ctrl) + blockService, _, _, _, _ := setupBlockService(t, ctrl) require.Error(t, blockService.ProcessMessage(context.Background(), nil, blocks[0])) } @@ -59,7 +61,7 @@ func TestBlockServiceIgnoreSlot(t *testing.T) { blocks, _, post := tests.GetBellatrixRandom() - blockService, syncedData, ethClock, _ := setupBlockService(t, ctrl) + blockService, syncedData, ethClock, _, _ := setupBlockService(t, ctrl) syncedData.OnHeadState(post) ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(false).AnyTimes() @@ -73,7 +75,7 @@ func TestBlockServiceLowerThanFinalizedCheckpoint(t *testing.T) { blocks, _, post := tests.GetBellatrixRandom() - blockService, syncedData, ethClock, fcu := setupBlockService(t, ctrl) + blockService, syncedData, ethClock, fcu, _ := setupBlockService(t, ctrl) syncedData.OnHeadState(post) ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() @@ -89,7 +91,7 @@ func TestBlockServiceUnseenParentRoot(t *testing.T) { blocks, _, post := tests.GetBellatrixRandom() - blockService, syncedData, ethClock, fcu := setupBlockService(t, ctrl) + blockService, syncedData, ethClock, fcu, _ := setupBlockService(t, ctrl) syncedData.OnHeadState(post) ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() @@ -104,7 +106,7 @@ func TestBlockServiceYoungerThanParent(t *testing.T) { blocks, _, post := tests.GetBellatrixRandom() - blockService, syncedData, ethClock, fcu := setupBlockService(t, ctrl) + blockService, syncedData, ethClock, fcu, _ := setupBlockService(t, ctrl) syncedData.OnHeadState(post) ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() @@ -121,7 +123,7 @@ func TestBlockServiceInvalidCommitmentsPerBlock(t *testing.T) { blocks, _, post := tests.GetBellatrixRandom() - blockService, syncedData, ethClock, fcu := setupBlockService(t, ctrl) + blockService, syncedData, ethClock, fcu, _ := setupBlockService(t, ctrl) syncedData.OnHeadState(post) ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() @@ -141,13 +143,14 @@ func TestBlockServiceSuccess(t *testing.T) { blocks, _, post := tests.GetBellatrixRandom() - blockService, syncedData, ethClock, fcu := setupBlockService(t, ctrl) + blockService, syncedData, ethClock, fcu, validatorMonitor := setupBlockService(t, ctrl) syncedData.OnHeadState(post) ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() fcu.FinalizedCheckpointVal = post.FinalizedCheckpoint() fcu.Headers[blocks[1].Block.ParentRoot] = blocks[0].SignedBeaconBlockHeader().Header.Copy() blocks[1].Block.Body.BlobKzgCommitments = solid.NewStaticListSSZ[*cltypes.KZGCommitment](100, 48) + validatorMonitor.EXPECT().OnNewBlock(blocks[1].Block).Return(nil).Times(1) require.NoError(t, blockService.ProcessMessage(context.Background(), nil, blocks[1])) } diff --git a/cl/phase1/network/services/bls_to_execution_change_service.go b/cl/phase1/network/services/bls_to_execution_change_service.go index b6170f11fd8..4b8e20e442a 100644 --- a/cl/phase1/network/services/bls_to_execution_change_service.go +++ b/cl/phase1/network/services/bls_to_execution_change_service.go @@ -19,6 +19,7 @@ package services import ( "bytes" "context" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -33,14 +34,14 @@ import ( type blsToExecutionChangeService struct { operationsPool pool.OperationsPool - emitters *beaconevents.Emitters + emitters *beaconevents.EventEmitter syncedDataManager synced_data.SyncedData beaconCfg *clparams.BeaconChainConfig } func NewBLSToExecutionChangeService( operationsPool pool.OperationsPool, - emitters *beaconevents.Emitters, + emitters *beaconevents.EventEmitter, syncedDataManager synced_data.SyncedData, beaconCfg *clparams.BeaconChainConfig, ) BLSToExecutionChangeService { @@ -54,7 +55,6 @@ func NewBLSToExecutionChangeService( func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet *uint64, msg *cltypes.SignedBLSToExecutionChange) error { // https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/p2p-interface.md#bls_to_execution_change - defer s.emitters.Publish("bls_to_execution_change", msg) // [IGNORE] The signed_bls_to_execution_change is the first valid signed bls to execution change received // for the validator with index signed_bls_to_execution_change.message.validator_index. if s.operationsPool.BLSToExecutionChangesPool.Has(msg.Signature) { @@ -84,7 +84,7 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet // assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX if wc[0] != byte(s.beaconCfg.BLSWithdrawalPrefixByte) { - return fmt.Errorf("invalid withdrawal credentials prefix") + return errors.New("invalid withdrawal credentials prefix") } // assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:] @@ -92,7 +92,7 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet // Check the validator's withdrawal credentials against the provided message. hashedFrom := utils.Sha256(change.From[:]) if !bytes.Equal(hashedFrom[1:], wc[1:]) { - return fmt.Errorf("invalid withdrawal credentials hash") + return errors.New("invalid withdrawal credentials hash") } // assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) @@ -110,7 +110,7 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet return err } if !valid { - return fmt.Errorf("invalid signature") + return errors.New("invalid signature") } // validator.withdrawal_credentials = ( @@ -124,6 +124,7 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet copy(newWc[12:], change.To[:]) stateMutator.SetWithdrawalCredentialForValidatorAtIndex(int(change.ValidatorIndex), newWc) + s.emitters.Operation().SendBlsToExecution(msg) s.operationsPool.BLSToExecutionChangesPool.Insert(msg.Signature, msg) return nil } diff --git a/cl/phase1/network/services/bls_to_execution_change_service_test.go b/cl/phase1/network/services/bls_to_execution_change_service_test.go index 2ef51b12d61..7d37978fe56 100644 --- a/cl/phase1/network/services/bls_to_execution_change_service_test.go +++ b/cl/phase1/network/services/bls_to_execution_change_service_test.go @@ -40,7 +40,7 @@ type blsToExecutionChangeTestSuite struct { suite.Suite gomockCtrl *gomock.Controller operationsPool *pool.OperationsPool - emitters *beaconevents.Emitters + emitters *beaconevents.EventEmitter syncedData *mockSync.MockSyncedData beaconCfg *clparams.BeaconChainConfig @@ -54,7 +54,7 @@ func (t *blsToExecutionChangeTestSuite) SetupTest() { BLSToExecutionChangesPool: pool.NewOperationPool[common.Bytes96, *cltypes.SignedBLSToExecutionChange](10, "blsToExecutionChangesPool"), } t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) - t.emitters = beaconevents.NewEmitters() + t.emitters = beaconevents.NewEventEmitter() t.beaconCfg = &clparams.BeaconChainConfig{} t.service = NewBLSToExecutionChangeService(*t.operationsPool, t.emitters, t.syncedData, t.beaconCfg) // mock global functions diff --git a/cl/phase1/network/services/proposer_slashing_service.go b/cl/phase1/network/services/proposer_slashing_service.go index 3aa5fc1ed45..0fe96ef8791 100644 --- a/cl/phase1/network/services/proposer_slashing_service.go +++ b/cl/phase1/network/services/proposer_slashing_service.go @@ -18,8 +18,10 @@ package services import ( "context" + "errors" "fmt" + "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" @@ -34,6 +36,7 @@ type proposerSlashingService struct { syncedDataManager synced_data.SyncedData beaconCfg *clparams.BeaconChainConfig ethClock eth_clock.EthereumClock + emitters *beaconevents.EventEmitter cache *lru.Cache[uint64, struct{}] } @@ -42,6 +45,7 @@ func NewProposerSlashingService( syncedDataManager synced_data.SyncedData, beaconCfg *clparams.BeaconChainConfig, ethClock eth_clock.EthereumClock, + emitters *beaconevents.EventEmitter, ) *proposerSlashingService { cache, err := lru.New[uint64, struct{}]("proposer_slashing", proposerSlashingCacheSize) if err != nil { @@ -53,6 +57,7 @@ func NewProposerSlashingService( beaconCfg: beaconCfg, ethClock: ethClock, cache: cache, + emitters: emitters, } } @@ -83,7 +88,7 @@ func (s *proposerSlashingService) ProcessMessage(ctx context.Context, subnet *ui // Verify the headers are different if *h1 == *h2 { - return fmt.Errorf("proposee slashing headers are the same") + return errors.New("proposee slashing headers are the same") } // Verify the proposer is slashable @@ -121,5 +126,6 @@ func (s *proposerSlashingService) ProcessMessage(ctx context.Context, subnet *ui s.operationsPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(msg), msg) s.cache.Add(pIndex, struct{}{}) + s.emitters.Operation().SendProposerSlashing(msg) return nil } diff --git a/cl/phase1/network/services/proposer_slashing_service_test.go b/cl/phase1/network/services/proposer_slashing_service_test.go index 7b3a9f00d44..b7a66cb7cd7 100644 --- a/cl/phase1/network/services/proposer_slashing_service_test.go +++ b/cl/phase1/network/services/proposer_slashing_service_test.go @@ -24,6 +24,7 @@ import ( "github.com/erigontech/erigon-lib/common" mockState "github.com/erigontech/erigon/cl/abstract/mock_services" + "github.com/erigontech/erigon/cl/beacon/beaconevents" mockSync "github.com/erigontech/erigon/cl/beacon/synced_data/mock_services" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes" @@ -55,7 +56,8 @@ func (t *proposerSlashingTestSuite) SetupTest() { t.beaconCfg = &clparams.BeaconChainConfig{ SlotsPerEpoch: 2, } - t.proposerSlashingService = NewProposerSlashingService(*t.operationsPool, t.syncedData, t.beaconCfg, t.ethClock) + emitters := beaconevents.NewEventEmitter() + t.proposerSlashingService = NewProposerSlashingService(*t.operationsPool, t.syncedData, t.beaconCfg, t.ethClock, emitters) // mock global functions t.mockFuncs = &mockFuncs{ctrl: t.gomockCtrl} computeSigningRoot = t.mockFuncs.ComputeSigningRoot diff --git a/cl/phase1/network/services/sync_contribution_service.go b/cl/phase1/network/services/sync_contribution_service.go index ebf28547a0c..2aad8a3a7fb 100644 --- a/cl/phase1/network/services/sync_contribution_service.go +++ b/cl/phase1/network/services/sync_contribution_service.go @@ -21,7 +21,6 @@ import ( "context" "encoding/binary" "errors" - "fmt" "slices" "sync" @@ -52,7 +51,7 @@ type syncContributionService struct { beaconCfg *clparams.BeaconChainConfig syncContributionPool sync_contribution_pool.SyncContributionPool seenSyncCommitteeContributions map[seenSyncCommitteeContribution]struct{} - emitters *beaconevents.Emitters + emitters *beaconevents.EventEmitter ethClock eth_clock.EthereumClock test bool @@ -65,7 +64,7 @@ func NewSyncContributionService( beaconCfg *clparams.BeaconChainConfig, syncContributionPool sync_contribution_pool.SyncContributionPool, ethClock eth_clock.EthereumClock, - emitters *beaconevents.Emitters, + emitters *beaconevents.EventEmitter, test bool, ) SyncContributionService { return &syncContributionService{ @@ -95,7 +94,7 @@ func (s *syncContributionService) ProcessMessage(ctx context.Context, subnet *ui // [REJECT] The subcommittee index is in the allowed range, i.e. contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT. if contributionAndProof.Contribution.SubcommitteeIndex >= clparams.MainnetBeaconConfig.SyncCommitteeSubnetCount { - return fmt.Errorf("subcommittee index is out of range") + return errors.New("subcommittee index is out of range") } aggregatorPubKey, err := headState.ValidatorPublicKey(int(contributionAndProof.AggregatorIndex)) @@ -114,18 +113,18 @@ func (s *syncContributionService) ProcessMessage(ctx context.Context, subnet *ui // [REJECT] The contribution has participants -- that is, any(contribution.aggregation_bits). if bytes.Equal(aggregationBits, make([]byte, len(aggregationBits))) { // check if the aggregation bits are all zeros - return fmt.Errorf("contribution has no participants") + return errors.New("contribution has no participants") } modulo := max(1, s.beaconCfg.SyncCommitteeSize/s.beaconCfg.SyncCommitteeSubnetCount/s.beaconCfg.TargetAggregatorsPerSyncSubcommittee) hashSignature := utils.Sha256(selectionProof[:]) if !s.test && binary.LittleEndian.Uint64(hashSignature[:8])%modulo != 0 { - return fmt.Errorf("selects the validator as an aggregator") + return errors.New("selects the validator as an aggregator") } // [REJECT] The aggregator's validator index is in the declared subcommittee of the current sync committee -- i.e. state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index). if !slices.Contains(subcommiteePubsKeys, aggregatorPubKey) { - return fmt.Errorf("aggregator's validator index is not in subcommittee") + return errors.New("aggregator's validator index is not in subcommittee") } // [IGNORE] The sync committee contribution is the first valid contribution received for the aggregator with index contribution_and_proof.aggregator_index for the slot contribution.slot and subcommittee index contribution.subcommittee_index (this requires maintaining a cache of size SYNC_COMMITTEE_SIZE for this topic that can be flushed after each slot). @@ -150,7 +149,7 @@ func (s *syncContributionService) ProcessMessage(ctx context.Context, subnet *ui s.markContributionAsSeen(contributionAndProof) // emit contribution_and_proof - s.emitters.Publish("contribution_and_proof", signedContribution) + s.emitters.Operation().SendContributionProof(signedContribution) // add the contribution to the pool err = s.syncContributionPool.AddSyncContribution(headState, contributionAndProof.Contribution) if errors.Is(err, sync_contribution_pool.ErrIsSuperset) { @@ -236,7 +235,7 @@ func verifySyncContributionSelectionProof(st *state.CachingBeaconState, contribu return err } if !valid { - return fmt.Errorf("invalid selectionProof signature") + return errors.New("invalid selectionProof signature") } return nil } @@ -266,7 +265,7 @@ func verifySyncContributionProofAggregatedSignature(s *state.CachingBeaconState, } if !valid { - return fmt.Errorf("invalid signature for aggregate sync contribution") + return errors.New("invalid signature for aggregate sync contribution") } return nil } diff --git a/cl/phase1/network/services/sync_contribution_service_test.go b/cl/phase1/network/services/sync_contribution_service_test.go index 9a8d5e6d8e7..3b1ef16e96a 100644 --- a/cl/phase1/network/services/sync_contribution_service_test.go +++ b/cl/phase1/network/services/sync_contribution_service_test.go @@ -38,7 +38,7 @@ func setupSyncContributionServiceTest(t *testing.T, ctrl *gomock.Controller) (Sy syncedDataManager := synced_data.NewSyncedDataManager(true, cfg) ethClock := eth_clock.NewMockEthereumClock(ctrl) syncContributionPool := syncpoolmock.NewMockSyncContributionPool(ctrl) - s := NewSyncContributionService(syncedDataManager, cfg, syncContributionPool, ethClock, beaconevents.NewEmitters(), true) + s := NewSyncContributionService(syncedDataManager, cfg, syncContributionPool, ethClock, beaconevents.NewEventEmitter(), true) syncContributionPool.EXPECT().AddSyncContribution(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return s, syncedDataManager, ethClock } diff --git a/cl/phase1/network/services/voluntary_exit_service.go b/cl/phase1/network/services/voluntary_exit_service.go index c425f678cd1..c067a0ad45e 100644 --- a/cl/phase1/network/services/voluntary_exit_service.go +++ b/cl/phase1/network/services/voluntary_exit_service.go @@ -33,7 +33,7 @@ import ( type voluntaryExitService struct { operationsPool pool.OperationsPool - emitters *beaconevents.Emitters + emitters *beaconevents.EventEmitter syncedDataManager synced_data.SyncedData beaconCfg *clparams.BeaconChainConfig ethClock eth_clock.EthereumClock @@ -41,7 +41,7 @@ type voluntaryExitService struct { func NewVoluntaryExitService( operationsPool pool.OperationsPool, - emitters *beaconevents.Emitters, + emitters *beaconevents.EventEmitter, syncedDataManager synced_data.SyncedData, beaconCfg *clparams.BeaconChainConfig, ethClock eth_clock.EthereumClock, @@ -58,7 +58,6 @@ func NewVoluntaryExitService( func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint64, msg *cltypes.SignedVoluntaryExit) error { // ref: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#voluntary_exit voluntaryExit := msg.VoluntaryExit - defer s.emitters.Publish("voluntary_exit", voluntaryExit) // [IGNORE] The voluntary exit is the first valid voluntary exit received for the validator with index signed_voluntary_exit.message.validator_index. if s.operationsPool.VoluntaryExitsPool.Has(voluntaryExit.ValidatorIndex) { @@ -80,7 +79,7 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 // Verify the validator is active // assert is_active_validator(validator, get_current_epoch(state)) if !val.Active(curEpoch) { - return fmt.Errorf("validator is not active") + return errors.New("validator is not active") } // Verify exit has not been initiated @@ -92,13 +91,13 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 // Exits must specify an epoch when they become valid; they are not valid before then // assert get_current_epoch(state) >= voluntary_exit.epoch if !(curEpoch >= voluntaryExit.Epoch) { - return fmt.Errorf("exits must specify an epoch when they become valid; they are not valid before then") + return errors.New("exits must specify an epoch when they become valid; they are not valid before then") } // Verify the validator has been active long enough // assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD if !(curEpoch >= val.ActivationEpoch()+s.beaconCfg.ShardCommitteePeriod) { - return fmt.Errorf("verify the validator has been active long enough") + return errors.New("verify the validator has been active long enough") } // Verify signature @@ -127,6 +126,6 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 } s.operationsPool.VoluntaryExitsPool.Insert(voluntaryExit.ValidatorIndex, msg) - + s.emitters.Operation().SendVoluntaryExit(msg) return nil } diff --git a/cl/phase1/network/services/voluntary_exit_service_test.go b/cl/phase1/network/services/voluntary_exit_service_test.go index 76cc9617272..be539b62f5c 100644 --- a/cl/phase1/network/services/voluntary_exit_service_test.go +++ b/cl/phase1/network/services/voluntary_exit_service_test.go @@ -39,7 +39,7 @@ type voluntaryExitTestSuite struct { suite.Suite gomockCtrl *gomock.Controller operationsPool *pool.OperationsPool - emitters *beaconevents.Emitters + emitters *beaconevents.EventEmitter syncedData *mockSync.MockSyncedData ethClock *eth_clock.MockEthereumClock beaconCfg *clparams.BeaconChainConfig @@ -53,7 +53,7 @@ func (t *voluntaryExitTestSuite) SetupTest() { return [32]byte{}, nil } t.gomockCtrl = gomock.NewController(t.T()) - t.emitters = beaconevents.NewEmitters() + t.emitters = beaconevents.NewEventEmitter() t.operationsPool = &pool.OperationsPool{ VoluntaryExitsPool: pool.NewOperationPool[uint64, *cltypes.SignedVoluntaryExit](10, "voluntaryExitsPool"), } diff --git a/cl/phase1/stages/chain_tip_sync.go b/cl/phase1/stages/chain_tip_sync.go new file mode 100644 index 00000000000..71da28f15a9 --- /dev/null +++ b/cl/phase1/stages/chain_tip_sync.go @@ -0,0 +1,268 @@ +package stages + +import ( + "context" + "time" + + "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/cl/beacon/beaconevents" + "github.com/erigontech/erigon/cl/cltypes" + "github.com/erigontech/erigon/cl/persistence/blob_storage" + network2 "github.com/erigontech/erigon/cl/phase1/network" + "github.com/erigontech/erigon/cl/sentinel/peers" +) + +// waitForExecutionEngineToBeFinished checks if the execution engine is ready within a specified timeout. +// It periodically checks the readiness of the execution client and returns true if the client is ready before +// the timeout occurs. If the context is canceled or a timeout occurs, it returns false with the corresponding error. +func waitForExecutionEngineToBeFinished(ctx context.Context, cfg *Cfg) (ready bool, err error) { + // If no execution client is set, then we can skip this step + if cfg.executionClient == nil { + return true, nil + } + + // Setup the timers + readyTimeout := time.NewTimer(10 * time.Second) + readyInterval := time.NewTimer(50 * time.Millisecond) + + // Ensure the timers are stopped to release resources + defer readyTimeout.Stop() + defer readyInterval.Stop() + + // Loop to check the readiness status + for { + select { + case <-ctx.Done(): + // Context canceled or timed out + return false, ctx.Err() + case <-readyTimeout.C: + // Timeout reached without the execution engine being ready + return false, nil + case <-readyInterval.C: + // Check the readiness of the execution engine + ready, err := cfg.executionClient.Ready(ctx) + if err != nil { + return false, err + } + if !ready { + // If not ready, continue checking in the next interval + continue + } + // Execution engine is ready + return true, nil + } + } +} + +// fetchBlocksFromReqResp retrieves blocks starting from a specified block number and continues for a given count. +// It sends a request to fetch the blocks, verifies the associated blobs, and inserts them into the blob store. +// It returns a PeeredObject containing the blocks and the peer ID, or an error if something goes wrong. +func fetchBlocksFromReqResp(ctx context.Context, cfg *Cfg, from uint64, count uint64) (*peers.PeeredObject[[]*cltypes.SignedBeaconBlock], error) { + // spam requests to fetch blocks by range from the execution client + + blocks, pid, err := cfg.rpc.SendBeaconBlocksByRangeReq(ctx, from, count) + for err != nil { + blocks, pid, err = cfg.rpc.SendBeaconBlocksByRangeReq(ctx, from, count) + } + + // If no blocks are returned, return nil without error + if len(blocks) == 0 { + return nil, nil + } + + // Generate blob identifiers from the retrieved blocks + ids, err := network2.BlobsIdentifiersFromBlocks(blocks) + if err != nil { + return nil, err + } + + var inserted uint64 + + // Loop until all blobs are inserted into the blob store + for inserted != uint64(ids.Len()) { + select { + case <-ctx.Done(): + // Context canceled or timed out + return nil, ctx.Err() + default: + } + + // Request blobs frantically from the execution client + blobs, err := network2.RequestBlobsFrantically(ctx, cfg.rpc, ids) + if err != nil { + return nil, err + } + + // Verify the blobs against identifiers and insert them into the blob store + if _, inserted, err = blob_storage.VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx, cfg.blobStore, ids, blobs.Responses, nil); err != nil { + return nil, err + } + } + + // Return the blocks and the peer ID wrapped in a PeeredObject + return &peers.PeeredObject[[]*cltypes.SignedBeaconBlock]{ + Data: blocks, + Peer: pid, + }, nil +} + +// startFetchingBlocksMissedByGossipAfterSomeTime starts fetching blocks that might have been missed by gossip after a delay. +// It periodically fetches blocks from the highest seen block up to the current slot and sends the results or errors to the provided channels. +func startFetchingBlocksMissedByGossipAfterSomeTime(ctx context.Context, cfg *Cfg, args Args, respCh chan<- *peers.PeeredObject[[]*cltypes.SignedBeaconBlock], errCh chan error) { + // Wait for half the duration of SecondsPerSlot or until the context is done + select { + case <-time.After((time.Duration(cfg.beaconCfg.SecondsPerSlot) * time.Second) / 2): + case <-ctx.Done(): + return + } + + // Continuously fetch and process blocks + for { + // Calculate the range of blocks to fetch + from := cfg.forkChoice.HighestSeen() - 2 + currentSlot := cfg.ethClock.GetCurrentSlot() + count := (currentSlot - from) + 4 + + // Stop fetching if the highest seen block is greater than or equal to the target slot + if cfg.forkChoice.HighestSeen() >= args.targetSlot { + return + } + + // Fetch blocks from the specified range + blocks, err := fetchBlocksFromReqResp(ctx, cfg, from, count) + if err != nil { + // Send error to the error channel and return + errCh <- err + return + } + + // Send fetched blocks to the response channel or handle context cancellation + select { + case respCh <- blocks: + case <-ctx.Done(): + return + case <-time.After(time.Second): // Take a short pause before the next iteration + } + } +} + +// listenToIncomingBlocksUntilANewBlockIsReceived listens for incoming blocks until a new block with a slot greater than or equal to the target slot is received. +// It processes blocks, checks their validity, and publishes them. It also handles context cancellation and logs progress periodically. +func listenToIncomingBlocksUntilANewBlockIsReceived(ctx context.Context, logger log.Logger, cfg *Cfg, args Args, respCh <-chan *peers.PeeredObject[[]*cltypes.SignedBeaconBlock], errCh chan error) error { + // Timer to log progress every 30 seconds + logTimer := time.NewTicker(30 * time.Second) + defer logTimer.Stop() + + // Timer to check block presence every 20 milliseconds + presenceTicker := time.NewTicker(20 * time.Millisecond) + defer presenceTicker.Stop() + + // Map to keep track of seen block roots + seenBlockRoots := make(map[common.Hash]struct{}) + +MainLoop: + for { + select { + case <-presenceTicker.C: + // Check if the highest seen block is greater than or equal to the target slot + if cfg.forkChoice.HighestSeen() >= args.targetSlot { + break MainLoop + } + case <-ctx.Done(): + // Handle context cancellation + return ctx.Err() + case err := <-errCh: + // Handle errors received on the error channel + return err + case blocks := <-respCh: + // Handle blocks received on the response channel + for _, block := range blocks.Data { + // Check if the parent block is known + if _, ok := cfg.forkChoice.GetHeader(block.Block.ParentRoot); !ok { + time.Sleep(time.Millisecond) + continue + } + + // Calculate the block root and check if the block is already known + blockRoot, _ := block.Block.HashSSZ() // Ignoring error as block would not process if HashSSZ failed + if _, ok := cfg.forkChoice.GetHeader(blockRoot); ok { + // Check if the block slot is greater than or equal to the target slot + if block.Block.Slot >= args.targetSlot { + break MainLoop + } + continue + } + + // Check if the block root has already been seen + if _, ok := seenBlockRoots[blockRoot]; ok { + continue + } + + // Mark the block root as seen + seenBlockRoots[blockRoot] = struct{}{} + + // Process the block + if err := processBlock(ctx, cfg, cfg.indiciesDB, block, true, true, true); err != nil { + log.Debug("bad blocks segment received", "err", err) + continue + } + + // Publish the block to the event handler + cfg.emitter.State().SendBlock(&beaconevents.BlockData{ + Slot: block.Block.Slot, + Block: blockRoot, + ExecutionOptimistic: false, // todo: fix this + }) + + // Notify the validator monitor of the new block + cfg.validatorMonitor.OnNewBlock(block.Block) + + // Check if the block slot is greater than or equal to the target slot + if block.Block.Slot >= args.targetSlot { + break MainLoop + } + } + case <-logTimer.C: + // Log progress periodically + logger.Info("[Caplin] Progress", "progress", cfg.forkChoice.HighestSeen(), "from", args.seenSlot, "to", args.targetSlot) + } + } + return nil +} + +// chainTipSync synchronizes the chain tip by fetching blocks from the highest seen block up to the target slot by listening to incoming blocks. +// or by fetching blocks that might have been missed by gossip after a delay. +func chainTipSync(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { + totalRequest := args.targetSlot - args.seenSlot + // If the execution engine is not ready, wait for it to be ready. + ready, err := waitForExecutionEngineToBeFinished(ctx, cfg) + if err != nil { + return err + } + if !ready { + return nil + } + + if cfg.executionClient != nil && cfg.executionClient.SupportInsertion() { + if err := cfg.blockCollector.Flush(context.Background()); err != nil { + return err + } + } + + logger.Debug("waiting for blocks...", + "seenSlot", args.seenSlot, + "targetSlot", args.targetSlot, + "requestedSlots", totalRequest, + ) + respCh := make(chan *peers.PeeredObject[[]*cltypes.SignedBeaconBlock], 1024) + errCh := make(chan error) + + // 25 seconds is a good timeout for this + ctx, cn := context.WithTimeout(ctx, 25*time.Second) + defer cn() + + go startFetchingBlocksMissedByGossipAfterSomeTime(ctx, cfg, args, respCh, errCh) + + return listenToIncomingBlocksUntilANewBlockIsReceived(ctx, logger, cfg, args, respCh, errCh) +} diff --git a/cl/phase1/stages/cleanup_and_pruning.go b/cl/phase1/stages/cleanup_and_pruning.go new file mode 100644 index 00000000000..cdd14cb7f55 --- /dev/null +++ b/cl/phase1/stages/cleanup_and_pruning.go @@ -0,0 +1,29 @@ +package stages + +import ( + "context" + + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/cl/persistence/beacon_indicies" +) + +// cleanupAndPruning cleans up the database and prunes old data. +func cleanupAndPruning(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { + tx, err := cfg.indiciesDB.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + pruneDistance := uint64(1_000_000) + + if !cfg.backfilling { + if err := beacon_indicies.PruneBlocks(ctx, tx, args.seenSlot-pruneDistance); err != nil { + return err + } + } + + if err := tx.Commit(); err != nil { + return err + } + return cfg.blobStore.Prune() +} diff --git a/cl/phase1/stages/clstages.go b/cl/phase1/stages/clstages.go index b8e05cdc756..1ddb16acefb 100644 --- a/cl/phase1/stages/clstages.go +++ b/cl/phase1/stages/clstages.go @@ -18,28 +18,19 @@ package stages import ( "context" - "errors" - "fmt" - "runtime" - "sort" - "strconv" - "sync/atomic" "time" - "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/common/dbg" + "github.com/erigontech/erigon-lib/common/datadir" "github.com/erigontech/erigon-lib/kv" - "github.com/erigontech/erigon-lib/kv/memdb" "github.com/erigontech/erigon/cl/antiquary" "github.com/erigontech/erigon/cl/beacon/beaconevents" "github.com/erigontech/erigon/cl/beacon/synced_data" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/clstages" "github.com/erigontech/erigon/cl/cltypes" - "github.com/erigontech/erigon/cl/persistence" + "github.com/erigontech/erigon/cl/monitor" "github.com/erigontech/erigon/cl/persistence/beacon_indicies" "github.com/erigontech/erigon/cl/persistence/blob_storage" - state_accessors "github.com/erigontech/erigon/cl/persistence/state" "github.com/erigontech/erigon/cl/phase1/core/state" "github.com/erigontech/erigon/cl/phase1/execution_client" "github.com/erigontech/erigon/cl/phase1/execution_client/block_collector" @@ -52,7 +43,6 @@ import ( network2 "github.com/erigontech/erigon/cl/phase1/network" "github.com/erigontech/erigon/cl/rpc" - "github.com/erigontech/erigon/cl/sentinel/peers" ) type Cfg struct { @@ -64,15 +54,16 @@ type Cfg struct { gossipManager *network2.GossipManager forkChoice *forkchoice.ForkChoiceStore indiciesDB kv.RwDB - tmpdir string + dirs datadir.Dirs blockReader freezeblocks.BeaconSnapshotReader antiquary *antiquary.Antiquary syncedData *synced_data.SyncedDataManager - emitter *beaconevents.Emitters + emitter *beaconevents.EventEmitter blockCollector block_collector.BlockCollector sn *freezeblocks.CaplinSnapshots blobStore blob_storage.BlobStorage attestationDataProducer attestation_producer.AttestationDataProducer + validatorMonitor monitor.ValidatorMonitor hasDownloaded, backfilling, blobBackfilling bool } @@ -98,14 +89,15 @@ func ClStagesCfg( indiciesDB kv.RwDB, sn *freezeblocks.CaplinSnapshots, blockReader freezeblocks.BeaconSnapshotReader, - tmpdir string, + dirs datadir.Dirs, syncBackLoopLimit uint64, backfilling bool, blobBackfilling bool, syncedData *synced_data.SyncedDataManager, - emitters *beaconevents.Emitters, + emitters *beaconevents.EventEmitter, blobStore blob_storage.BlobStorage, attestationDataProducer attestation_producer.AttestationDataProducer, + validatorMonitor monitor.ValidatorMonitor, ) *Cfg { return &Cfg{ rpc: rpc, @@ -116,7 +108,7 @@ func ClStagesCfg( executionClient: executionClient, gossipManager: gossipManager, forkChoice: forkChoice, - tmpdir: tmpdir, + dirs: dirs, indiciesDB: indiciesDB, sn: sn, blockReader: blockReader, @@ -124,9 +116,10 @@ func ClStagesCfg( syncedData: syncedData, emitter: emitters, blobStore: blobStore, - blockCollector: block_collector.NewBlockCollector(log.Root(), executionClient, beaconCfg, syncBackLoopLimit, tmpdir), + blockCollector: block_collector.NewBlockCollector(log.Root(), executionClient, beaconCfg, syncBackLoopLimit, dirs.Tmp), blobBackfilling: blobBackfilling, attestationDataProducer: attestationDataProducer, + validatorMonitor: validatorMonitor, } } @@ -134,18 +127,13 @@ type StageName = string const ( ForwardSync StageName = "ForwardSync" - CatchUpBlocks StageName = "CatchUpBlocks" + ChainTipSync StageName = "ChainTipSync" ForkChoice StageName = "ForkChoice" - ListenForForks StageName = "ListenForForks" CleanupAndPruning StageName = "CleanupAndPruning" SleepForSlot StageName = "SleepForSlot" DownloadHistoricalBlocks StageName = "DownloadHistoricalBlocks" ) -const ( - minPeersForDownload = uint64(4) -) - func MetaCatchingUp(args Args) StageName { if !args.hasDownloaded { return DownloadHistoricalBlocks @@ -154,12 +142,25 @@ func MetaCatchingUp(args Args) StageName { return ForwardSync } if args.seenSlot < args.targetSlot { - return CatchUpBlocks + return ChainTipSync } return "" } +func processBlock(ctx context.Context, cfg *Cfg, db kv.RwDB, block *cltypes.SignedBeaconBlock, newPayload, fullValidation, checkDataAvaiability bool) error { + if err := db.Update(ctx, func(tx kv.RwTx) error { + if err := beacon_indicies.WriteHighestFinalized(tx, cfg.forkChoice.FinalizedSlot()); err != nil { + return err + } + return beacon_indicies.WriteBeaconBlockAndIndicies(ctx, tx, block, false) + }); err != nil { + return err + } + + return cfg.forkChoice.OnBlock(ctx, block, newPayload, fullValidation, checkDataAvaiability) +} + /* this graph describes the state transitions for cl @@ -168,42 +169,29 @@ digraph { compound=true; subgraph cluster_0 { label="syncing"; - WaitForPeers; - CatchUpBlocks; + DownloadHistoricalBlocks; ForwardSync; } - subgraph cluster_3 { - label="if behind (transition function)" - MetaCatchingUp; - } + subgraph cluster_1 { label="head"; - ForkChoice; CleanupAndPruning; ListenForForks; SleepForSlot; + ChainTipSync; ForkChoice; CleanupAndPruning; SleepForSlot; } - MetaCatchingUp -> WaitForPeers - MetaCatchingUp -> ForwardSync - MetaCatchingUp -> CatchUpBlocks - - WaitForPeers -> MetaCatchingUp[lhead=cluster_3] - ForwardSync -> MetaCatchingUp[lhead=cluster_3] - CatchUpBlocks -> MetaCatchingUp[lhead=cluster_3] - CleanupAndPruning -> MetaCatchingUp[lhead=cluster_3] - ListenForForks -> MetaCatchingUp[lhead=cluster_3] - ForkChoice -> MetaCatchingUp[lhead=cluster_3] - CatchUpBlocks -> ForkChoice - ForkChoice -> ListenForForks + DownloadHistoricalBlocks -> ForwardSync + ForwardSync -> ChainTipSync + ChainTipSync -> ForkChoice; + ForkChoice -> CleanupAndPruning; + ForkChoice -> NotInSync + NotInSync -> ForwardSync + SleepForSlot -> ChainTipSync - SleepForSlot -> WaitForPeers - - ListenForForks -> ForkChoice - ListenForForks -> SleepForSlot - ListenForForks -> CleanupAndPruning CleanupAndPruning -> SleepForSlot } + */ // ConsensusClStages creates a stage loop container to be used to run caplin @@ -211,25 +199,6 @@ func ConsensusClStages(ctx context.Context, cfg *Cfg, ) *clstages.StageGraph[*Cfg, Args] { - rpcSource := persistence.NewBeaconRpcSource(cfg.rpc) - processBlock := func(db kv.RwDB, block *cltypes.SignedBeaconBlock, newPayload, fullValidation, checkDataAvaiability bool) error { - if err := db.Update(ctx, func(tx kv.RwTx) error { - if err := beacon_indicies.WriteHighestFinalized(tx, cfg.forkChoice.FinalizedSlot()); err != nil { - return err - } - return beacon_indicies.WriteBeaconBlockAndIndicies(ctx, tx, block, false) - }); err != nil { - return err - } - - return cfg.forkChoice.OnBlock(ctx, block, newPayload, fullValidation, checkDataAvaiability) - } - - // TODO: this is an ugly hack, but it works! Basically, we want shared state in the clstages. - // Probably the correct long term solution is to create a third generic parameter that defines shared state - // but for now, all it would have are the two gossip sources and the forkChoicesSinceReorg, so i don't think its worth it (yet). - shouldForkChoiceSinceReorg := false - // clstages run in a single thread - so we don't need to worry about any synchronization. return &clstages.StageGraph[*Cfg, Args]{ // the ArgsFunc is run after every stage. It is passed into the transition function, and the same args are passed into the next stage. @@ -255,22 +224,27 @@ func ConsensusClStages(ctx context.Context, if x := MetaCatchingUp(args); x != "" { return x } - return CatchUpBlocks + return ChainTipSync }, ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { + if err := saveHeadStateOnDiskIfNeeded(cfg, cfg.state); err != nil { + return err + } + // We only download historical blocks once cfg.hasDownloaded = true startingRoot, err := cfg.state.BlockRoot() if err != nil { return err } - // This stage is special so use context.Background() TODO(Giulio2002): make the context be passed in + startingSlot := cfg.state.LatestBlockHeader().Slot downloader := network2.NewBackwardBeaconDownloader(ctx, cfg.rpc, cfg.executionClient, cfg.indiciesDB) - if err := SpawnStageHistoryDownload(StageHistoryReconstruction(downloader, cfg.antiquary, cfg.sn, cfg.indiciesDB, cfg.executionClient, cfg.beaconCfg, cfg.backfilling, cfg.blobBackfilling, false, startingRoot, startingSlot, cfg.tmpdir, 600*time.Millisecond, cfg.blockCollector, cfg.blockReader, cfg.blobStore, logger), context.Background(), logger); err != nil { + if err := SpawnStageHistoryDownload(StageHistoryReconstruction(downloader, cfg.antiquary, cfg.sn, cfg.indiciesDB, cfg.executionClient, cfg.beaconCfg, cfg.backfilling, cfg.blobBackfilling, false, startingRoot, startingSlot, cfg.dirs.Tmp, 600*time.Millisecond, cfg.blockCollector, cfg.blockReader, cfg.blobStore, logger), context.Background(), logger); err != nil { cfg.hasDownloaded = false return err } + cfg.state = nil // Release the state return nil }, }, @@ -280,122 +254,11 @@ func ConsensusClStages(ctx context.Context, if x := MetaCatchingUp(args); x != "" { return x } - return CatchUpBlocks - }, - ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - shouldInsert := cfg.executionClient != nil && cfg.executionClient.SupportInsertion() - - downloader := network2.NewForwardBeaconDownloader(ctx, cfg.rpc) - finalizedCheckpoint := cfg.forkChoice.FinalizedCheckpoint() - var currentSlot atomic.Uint64 - currentSlot.Store(finalizedCheckpoint.Epoch() * cfg.beaconCfg.SlotsPerEpoch) - secsPerLog := 30 - logTicker := time.NewTicker(time.Duration(secsPerLog) * time.Second) - // Always start from the current finalized checkpoint - downloader.SetHighestProcessedRoot(finalizedCheckpoint.BlockRoot()) - downloader.SetHighestProcessedSlot(currentSlot.Load()) - downloader.SetProcessFunction(func(highestSlotProcessed uint64, highestBlockRootProcessed common.Hash, blocks []*cltypes.SignedBeaconBlock) (newHighestSlotProcessed uint64, newHighestBlockRootProcessed common.Hash, err error) { - initialHighestSlotProcessed := highestSlotProcessed - initialHighestBlockRootProcessed := highestBlockRootProcessed - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].Block.Slot < blocks[j].Block.Slot - }) - - for i, block := range blocks { - blockRoot, err := block.Block.HashSSZ() - if err != nil { - logger.Warn("failed to hash block", "err", err) - blocks = blocks[i:] - break - } - - if err := processBlock(cfg.indiciesDB, block, false, true, false); err != nil { - log.Warn("bad blocks segment received", "err", err) - blocks = blocks[i:] - break - } - - st, err := cfg.forkChoice.GetStateAtBlockRoot(blockRoot, false) - if err == nil && block.Block.Slot%(cfg.beaconCfg.SlotsPerEpoch*2) == 0 && st != nil { - if err := cfg.forkChoice.DumpBeaconStateOnDisk(st); err != nil { - logger.Warn("failed to dump state", "err", err) - } - } - if shouldInsert && block.Version() >= clparams.BellatrixVersion { - if err := cfg.blockCollector.AddBlock(block.Block); err != nil { - logger.Warn("failed to add block to collector", "err", err) - blocks = blocks[i:] - break - } - } - - if highestSlotProcessed < block.Block.Slot { - currentSlot.Store(block.Block.Slot) - highestSlotProcessed = block.Block.Slot - highestBlockRootProcessed, err = block.Block.HashSSZ() - if err != nil { - blocks = blocks[i:] - logger.Warn("failed to hash block", "err", err) - break - } - } - } - // Do the DA now, first of all see what blobs to retrieve - ids, err := network2.BlobsIdentifiersFromBlocks(blocks) - if err != nil { - logger.Warn("failed to get blob identifiers", "err", err) - return initialHighestSlotProcessed, initialHighestBlockRootProcessed, err - } - if ids.Len() == 0 { // no blobs, no DA. - return highestSlotProcessed, highestBlockRootProcessed, nil - } - blobs, err := network2.RequestBlobsFrantically(ctx, cfg.rpc, ids) - if err != nil { - logger.Warn("failed to get blobs", "err", err) - return initialHighestSlotProcessed, initialHighestBlockRootProcessed, err - } - var highestProcessed, inserted uint64 - if highestProcessed, inserted, err = blob_storage.VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx, cfg.blobStore, ids, blobs.Responses, nil); err != nil { - logger.Warn("failed to get verify blobs", "err", err) - cfg.rpc.BanPeer(blobs.Peer) - return initialHighestSlotProcessed, initialHighestBlockRootProcessed, err - } - if inserted == uint64(ids.Len()) { - return highestSlotProcessed, highestBlockRootProcessed, nil - } - - if highestProcessed <= initialHighestSlotProcessed { - return initialHighestSlotProcessed, initialHighestBlockRootProcessed, nil - } - return highestProcessed - 1, highestBlockRootProcessed, err - }) - chainTipSlot := cfg.ethClock.GetCurrentSlot() - logger.Info("[Caplin] Forward Sync", "from", currentSlot.Load(), "to", chainTipSlot) - prevProgress := currentSlot.Load() - for downloader.GetHighestProcessedSlot() < chainTipSlot { - downloader.RequestMore(ctx) - - select { - case <-ctx.Done(): - return ctx.Err() - case <-logTicker.C: - progressMade := chainTipSlot - currentSlot.Load() - distFromChainTip := time.Duration(progressMade*cfg.beaconCfg.SecondsPerSlot) * time.Second - timeProgress := currentSlot.Load() - prevProgress - estimatedTimeRemaining := 999 * time.Hour - if timeProgress > 0 { - estimatedTimeRemaining = time.Duration(float64(progressMade)/(float64(currentSlot.Load()-prevProgress)/float64(secsPerLog))) * time.Second - } - prevProgress = currentSlot.Load() - logger.Info("[Caplin] Forward Sync", "progress", currentSlot.Load(), "distance-from-chain-tip", distFromChainTip, "estimated-time-remaining", estimatedTimeRemaining) - default: - } - } - - return nil + return ChainTipSync }, + ActionFunc: forwardSync, }, - CatchUpBlocks: { + ChainTipSync: { Description: `if we are within the epoch but not at head, we run catchupblocks`, TransitionFunc: func(cfg *Cfg, args Args, err error) string { if x := MetaCatchingUp(args); x != "" { @@ -403,177 +266,7 @@ func ConsensusClStages(ctx context.Context, } return ForkChoice }, - ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - totalRequest := args.targetSlot - args.seenSlot - readyTimeout := time.NewTimer(10 * time.Second) - readyInterval := time.NewTimer(50 * time.Millisecond) - defer readyTimeout.Stop() - defer readyInterval.Stop() - if cfg.executionClient != nil { - ReadyLoop: - for { // if the client does not support insertion, then skip - select { - case <-ctx.Done(): - return ctx.Err() - case <-readyTimeout.C: - return nil - case <-readyInterval.C: - ready, err := cfg.executionClient.Ready(ctx) - if err != nil { - return err - } - if ready { - break ReadyLoop - } - } - } - } - - tmpDB := memdb.New(cfg.tmpdir) - defer tmpDB.Close() - tx, err := tmpDB.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - - if cfg.executionClient != nil && cfg.executionClient.SupportInsertion() { - if err := cfg.blockCollector.Flush(context.Background()); err != nil { - return err - } - } - tx.Rollback() - - logger.Debug("waiting for blocks...", - "seenSlot", args.seenSlot, - "targetSlot", args.targetSlot, - "requestedSlots", totalRequest, - ) - respCh := make(chan *peers.PeeredObject[[]*cltypes.SignedBeaconBlock], 1024) - errCh := make(chan error) - - // 15 seconds is a good timeout for this - ctx, cn := context.WithTimeout(ctx, 25*time.Second) - defer cn() - - go func() { - select { - case <-time.After((time.Duration(cfg.beaconCfg.SecondsPerSlot) * time.Second) / 2): - case <-ctx.Done(): - return - } - - for { - var blocks *peers.PeeredObject[[]*cltypes.SignedBeaconBlock] - var err error - from := cfg.forkChoice.HighestSeen() - 2 - currentSlot := cfg.ethClock.GetCurrentSlot() - count := (currentSlot - from) + 4 - if cfg.forkChoice.HighestSeen() >= args.targetSlot { - return - } - blocks, err = rpcSource.GetRange(ctx, nil, from, count) - if err != nil { - errCh <- err - return - } - if len(blocks.Data) == 0 { - continue - } - ids, err := network2.BlobsIdentifiersFromBlocks(blocks.Data) - if err != nil { - errCh <- err - return - } - var inserted uint64 - - for inserted != uint64(ids.Len()) { - select { - case <-ctx.Done(): - return - default: - } - blobs, err := network2.RequestBlobsFrantically(ctx, cfg.rpc, ids) - if err != nil { - errCh <- err - return - } - if _, inserted, err = blob_storage.VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx, cfg.blobStore, ids, blobs.Responses, nil); err != nil { - errCh <- err - return - } - } - select { - case respCh <- blocks: - case <-ctx.Done(): - return - case <-time.After(time.Second): // take a smol pause - } - } - }() - - logTimer := time.NewTicker(30 * time.Second) - defer logTimer.Stop() - // blocks may be scheduled for later execution outside of the catch-up flow - presenceTicker := time.NewTicker(20 * time.Millisecond) - defer presenceTicker.Stop() - seenBlockRoots := make(map[common.Hash]struct{}) - MainLoop: - for { - select { - case <-presenceTicker.C: - if cfg.forkChoice.HighestSeen() >= args.targetSlot { - break MainLoop - } - case <-ctx.Done(): - return errors.New("timeout waiting for blocks") - case err := <-errCh: - return err - case blocks := <-respCh: - for _, block := range blocks.Data { - - if _, ok := cfg.forkChoice.GetHeader(block.Block.ParentRoot); !ok { - time.Sleep(time.Millisecond) - continue - } - // we can ignore this error because the block would not process if the hashssz failed - blockRoot, _ := block.Block.HashSSZ() - if _, ok := cfg.forkChoice.GetHeader(blockRoot); ok { - if block.Block.Slot >= args.targetSlot { - break MainLoop - } - continue - } - if _, ok := seenBlockRoots[blockRoot]; ok { - continue - } - seenBlockRoots[blockRoot] = struct{}{} - if err := processBlock(cfg.indiciesDB, block, true, true, true); err != nil { - log.Debug("bad blocks segment received", "err", err) - continue - } - - if err := tx.Commit(); err != nil { - return err - } - - // publish block to event handler - cfg.emitter.Publish("block", map[string]any{ - "slot": strconv.Itoa(int(block.Block.Slot)), - "block": common.Hash(blockRoot), - "execution_optimistic": false, // TODO: i don't know what to put here. i see other places doing false, leaving flase for now - }) - if block.Block.Slot >= args.targetSlot { - break MainLoop - } - } - case <-logTimer.C: - logger.Info("[Caplin] Progress", "progress", cfg.forkChoice.HighestSeen(), "from", args.seenSlot, "to", args.targetSlot) - } - } - - return nil - }, + ActionFunc: chainTipSync, }, ForkChoice: { Description: `fork choice stage. We will send all fork choise things here @@ -582,175 +275,9 @@ func ConsensusClStages(ctx context.Context, if x := MetaCatchingUp(args); x != "" { return x } - return ListenForForks - }, - ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - - // Now check the head - headRoot, headSlot, err := cfg.forkChoice.GetHead() - if err != nil { - return fmt.Errorf("failed to get head: %w", err) - } - - // Do forkchoice if possible - if cfg.forkChoice.Engine() != nil { - finalizedCheckpoint := cfg.forkChoice.FinalizedCheckpoint() - logger.Debug("Caplin is sending forkchoice") - // Run forkchoice - if _, err := cfg.forkChoice.Engine().ForkChoiceUpdate( - ctx, - cfg.forkChoice.GetEth1Hash(finalizedCheckpoint.BlockRoot()), - cfg.forkChoice.GetEth1Hash(headRoot), nil, - ); err != nil { - logger.Warn("Could not set forkchoice", "err", err) - return err - } - } - if err := cfg.rpc.SetStatus(cfg.forkChoice.FinalizedCheckpoint().BlockRoot(), - cfg.forkChoice.FinalizedCheckpoint().Epoch(), - headRoot, headSlot); err != nil { - logger.Warn("Could not set status", "err", err) - } - tx, err := cfg.indiciesDB.BeginRw(ctx) - if err != nil { - return fmt.Errorf("failed to begin transaction: %w", err) - } - defer tx.Rollback() - - type canonicalEntry struct { - slot uint64 - root common.Hash - } - - currentRoot := headRoot - currentSlot := headSlot - currentCanonical, err := beacon_indicies.ReadCanonicalBlockRoot(tx, currentSlot) - if err != nil { - return fmt.Errorf("failed to read canonical block root: %w", err) - } - reconnectionRoots := []canonicalEntry{{currentSlot, currentRoot}} - - for currentRoot != currentCanonical { - var newFoundSlot *uint64 - - if currentRoot, err = beacon_indicies.ReadParentBlockRoot(ctx, tx, currentRoot); err != nil { - return fmt.Errorf("failed to read parent block root: %w", err) - } - if newFoundSlot, err = beacon_indicies.ReadBlockSlotByBlockRoot(tx, currentRoot); err != nil { - return fmt.Errorf("failed to read block slot by block root: %w", err) - } - if newFoundSlot == nil { - break - } - currentSlot = *newFoundSlot - currentCanonical, err = beacon_indicies.ReadCanonicalBlockRoot(tx, currentSlot) - if err != nil { - return fmt.Errorf("failed to read canonical block root: %w", err) - } - reconnectionRoots = append(reconnectionRoots, canonicalEntry{currentSlot, currentRoot}) - } - if err := beacon_indicies.TruncateCanonicalChain(ctx, tx, currentSlot); err != nil { - return fmt.Errorf("failed to truncate canonical chain: %w", err) - } - for i := len(reconnectionRoots) - 1; i >= 0; i-- { - if err := beacon_indicies.MarkRootCanonical(ctx, tx, reconnectionRoots[i].slot, reconnectionRoots[i].root); err != nil { - return fmt.Errorf("failed to mark root canonical: %w", err) - } - } - if err := beacon_indicies.MarkRootCanonical(ctx, tx, headSlot, headRoot); err != nil { - return fmt.Errorf("failed to mark root canonical: %w", err) - } - - // Increment validator set - headState, err := cfg.forkChoice.GetStateAtBlockRoot(headRoot, false) - if err != nil { - return fmt.Errorf("failed to get state at block root: %w", err) - } - cfg.forkChoice.SetSynced(true) - if err := cfg.syncedData.OnHeadState(headState); err != nil { - return fmt.Errorf("failed to set head state: %w", err) - } - start := time.Now() - - copiedHeadState := cfg.syncedData.HeadState() // it is just copied, so we can use it without worrying about concurrency - - if _, err = cfg.attestationDataProducer.ProduceAndCacheAttestationData(copiedHeadState, copiedHeadState.Slot(), 0); err != nil { - logger.Warn("failed to produce and cache attestation data", "err", err) - } - - // Incement some stuff here - preverifiedValidators := cfg.forkChoice.PreverifiedValidator(headState.FinalizedCheckpoint().BlockRoot()) - preverifiedHistoricalSummary := cfg.forkChoice.PreverifiedHistoricalSummaries(headState.FinalizedCheckpoint().BlockRoot()) - preverifiedHistoricalRoots := cfg.forkChoice.PreverifiedHistoricalRoots(headState.FinalizedCheckpoint().BlockRoot()) - if err := state_accessors.IncrementPublicKeyTable(tx, headState, preverifiedValidators); err != nil { - return fmt.Errorf("failed to increment public key table: %w", err) - } - if err := state_accessors.IncrementHistoricalSummariesTable(tx, headState, preverifiedHistoricalSummary); err != nil { - return fmt.Errorf("failed to increment historical summaries table: %w", err) - } - if err := state_accessors.IncrementHistoricalRootsTable(tx, headState, preverifiedHistoricalRoots); err != nil { - return fmt.Errorf("failed to increment historical roots table: %w", err) - } - log.Debug("Incremented state history", "elapsed", time.Since(start), "preverifiedValidators", preverifiedValidators) - - stateRoot, err := headState.HashSSZ() - if err != nil { - return fmt.Errorf("failed to hash ssz: %w", err) - } - - if err := cfg.forkChoice.DumpBeaconStateOnDisk(headState); err != nil { - return fmt.Errorf("failed to dump beacon state on disk: %w", err) - } - - headEpoch := headSlot / cfg.beaconCfg.SlotsPerEpoch - previous_duty_dependent_root, err := headState.GetBlockRootAtSlot((headEpoch-1)*cfg.beaconCfg.SlotsPerEpoch - 1) - if err != nil { - return fmt.Errorf("failed to get block root at slot for previous_duty_dependent_root: %w", err) - } - current_duty_dependent_root, err := headState.GetBlockRootAtSlot(headEpoch*cfg.beaconCfg.SlotsPerEpoch - 1) - if err != nil { - return fmt.Errorf("failed to get block root at slot for current_duty_dependent_root: %w", err) - } - // emit the head event - cfg.emitter.Publish("head", map[string]any{ - "slot": strconv.Itoa(int(headSlot)), - "block": headRoot, - "state": common.Hash(stateRoot), - "epoch_transition": true, - "previous_duty_dependent_root": previous_duty_dependent_root, - "current_duty_dependent_root": current_duty_dependent_root, - "execution_optimistic": false, - }) - - var m runtime.MemStats - dbg.ReadMemStats(&m) - logger.Debug("Imported chain segment", - "hash", headRoot, "slot", headSlot, - "alloc", common.ByteCount(m.Alloc), - "sys", common.ByteCount(m.Sys)) - if err := tx.Commit(); err != nil { - return err - } - return nil - }, - }, - ListenForForks: { - TransitionFunc: func(cfg *Cfg, args Args, err error) string { - defer func() { - shouldForkChoiceSinceReorg = false - }() - if x := MetaCatchingUp(args); x != "" { - return x - } - if shouldForkChoiceSinceReorg { - return ForkChoice - } - return CleanupAndPruning - - }, - ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - return nil // Remove completely in a subsequent refactor + return SleepForSlot }, + ActionFunc: doForkchoiceRoutine, }, CleanupAndPruning: { Description: `cleanup and pruning is done here`, @@ -760,26 +287,7 @@ func ConsensusClStages(ctx context.Context, } return SleepForSlot }, - ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - tx, err := cfg.indiciesDB.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - pruneDistance := uint64(1_000_000) - - if !cfg.backfilling { - if err := beacon_indicies.PruneBlocks(ctx, tx, args.seenSlot-pruneDistance); err != nil { - return err - } - } - - if err := tx.Commit(); err != nil { - return err - } - return cfg.blobStore.Prune() - - }, + ActionFunc: cleanupAndPruning, }, SleepForSlot: { Description: `sleep until the next slot`, @@ -787,14 +295,12 @@ func ConsensusClStages(ctx context.Context, if x := MetaCatchingUp(args); x != "" { return x } - return ListenForForks + return ChainTipSync }, ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { nextSlot := args.seenSlot + 1 nextSlotTime := cfg.ethClock.GetSlotTime(nextSlot) - nextSlotDur := nextSlotTime.Sub(time.Now()) - logger.Debug("sleeping until next slot", "slot", nextSlot, "time", nextSlotTime, "dur", nextSlotDur) - time.Sleep(nextSlotDur) + time.Sleep(time.Until(nextSlotTime)) return nil }, }, diff --git a/cl/phase1/stages/forkchoice.go b/cl/phase1/stages/forkchoice.go new file mode 100644 index 00000000000..1310c1da14f --- /dev/null +++ b/cl/phase1/stages/forkchoice.go @@ -0,0 +1,271 @@ +package stages + +import ( + "context" + "fmt" + "os" + "runtime" + + "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/dbg" + "github.com/erigontech/erigon-lib/kv" + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/cl/beacon/beaconevents" + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/persistence/beacon_indicies" + state_accessors "github.com/erigontech/erigon/cl/persistence/state" + "github.com/erigontech/erigon/cl/phase1/core/state" + "github.com/erigontech/erigon/cl/utils" +) + +// computeAndNotifyServicesOfNewForkChoice calculates the new head of the fork choice and notifies relevant services. +// It updates the fork choice if possible and sets the status in the RPC. It returns the head slot, head root, and any error encountered. +func computeAndNotifyServicesOfNewForkChoice(ctx context.Context, logger log.Logger, cfg *Cfg) (headSlot uint64, headRoot common.Hash, err error) { + // Get the current head of the fork choice + headRoot, headSlot, err = cfg.forkChoice.GetHead() + if err != nil { + err = fmt.Errorf("failed to get head: %w", err) + return + } + + // Perform fork choice update if the engine is available + if cfg.forkChoice.Engine() != nil { + finalizedCheckpoint := cfg.forkChoice.FinalizedCheckpoint() + logger.Debug("Caplin is sending forkchoice") + + // Run fork choice update with finalized checkpoint and head + if _, err = cfg.forkChoice.Engine().ForkChoiceUpdate( + ctx, + cfg.forkChoice.GetEth1Hash(finalizedCheckpoint.BlockRoot()), + cfg.forkChoice.GetEth1Hash(headRoot), nil, + ); err != nil { + err = fmt.Errorf("failed to run forkchoice: %w", err) + return + } + } + + // Set the status in the RPC + if err2 := cfg.rpc.SetStatus( + cfg.forkChoice.FinalizedCheckpoint().BlockRoot(), + cfg.forkChoice.FinalizedCheckpoint().Epoch(), + headRoot, headSlot); err2 != nil { + logger.Warn("Could not set status", "err", err2) + } + + return +} + +// updateCanonicalChainInTheDatabase updates the canonical chain in the database by marking the given head slot and root as canonical. +// It traces back through parent block roots to find the common ancestor with the existing canonical chain, truncates the chain, +// and then marks the new chain segments as canonical. +func updateCanonicalChainInTheDatabase(ctx context.Context, tx kv.RwTx, headSlot uint64, headRoot common.Hash) error { + type canonicalEntry struct { + slot uint64 + root common.Hash + } + + currentRoot := headRoot + currentSlot := headSlot + // Read the current canonical block root for the given slot + currentCanonical, err := beacon_indicies.ReadCanonicalBlockRoot(tx, currentSlot) + if err != nil { + return fmt.Errorf("failed to read canonical block root: %w", err) + } + + // List of new canonical chain entries + reconnectionRoots := []canonicalEntry{{currentSlot, currentRoot}} + + // Trace back through the parent block roots until the current root matches the canonical root + for currentRoot != currentCanonical { + var newFoundSlot *uint64 + + // Read the parent block root + if currentRoot, err = beacon_indicies.ReadParentBlockRoot(ctx, tx, currentRoot); err != nil { + return fmt.Errorf("failed to read parent block root: %w", err) + } + + // Read the slot for the current block root + if newFoundSlot, err = beacon_indicies.ReadBlockSlotByBlockRoot(tx, currentRoot); err != nil { + return fmt.Errorf("failed to read block slot by block root: %w", err) + } + if newFoundSlot == nil { + break + } + + currentSlot = *newFoundSlot + + // Read the canonical block root for the new slot + currentCanonical, err = beacon_indicies.ReadCanonicalBlockRoot(tx, currentSlot) + if err != nil { + return fmt.Errorf("failed to read canonical block root: %w", err) + } + + // Append the current slot and root to the list of reconnection roots + reconnectionRoots = append(reconnectionRoots, canonicalEntry{currentSlot, currentRoot}) + } + + // Truncate the canonical chain at the current slot + if err := beacon_indicies.TruncateCanonicalChain(ctx, tx, currentSlot); err != nil { + return fmt.Errorf("failed to truncate canonical chain: %w", err) + } + + // Mark the new canonical chain segments in reverse order + for i := len(reconnectionRoots) - 1; i >= 0; i-- { + if err := beacon_indicies.MarkRootCanonical(ctx, tx, reconnectionRoots[i].slot, reconnectionRoots[i].root); err != nil { + return fmt.Errorf("failed to mark root canonical: %w", err) + } + } + + // Mark the head slot and root as canonical + if err := beacon_indicies.MarkRootCanonical(ctx, tx, headSlot, headRoot); err != nil { + return fmt.Errorf("failed to mark root canonical: %w", err) + } + + return nil +} + +// runIndexingRoutines runs the indexing routines for the database. +func runIndexingRoutines(ctx context.Context, tx kv.RwTx, cfg *Cfg, headState *state.CachingBeaconState) error { + preverifiedValidators := cfg.forkChoice.PreverifiedValidator(headState.FinalizedCheckpoint().BlockRoot()) + preverifiedHistoricalSummary := cfg.forkChoice.PreverifiedHistoricalSummaries(headState.FinalizedCheckpoint().BlockRoot()) + preverifiedHistoricalRoots := cfg.forkChoice.PreverifiedHistoricalRoots(headState.FinalizedCheckpoint().BlockRoot()) + + if err := state_accessors.IncrementPublicKeyTable(tx, headState, preverifiedValidators); err != nil { + return fmt.Errorf("failed to increment public key table: %w", err) + } + if err := state_accessors.IncrementHistoricalSummariesTable(tx, headState, preverifiedHistoricalSummary); err != nil { + return fmt.Errorf("failed to increment historical summaries table: %w", err) + } + if err := state_accessors.IncrementHistoricalRootsTable(tx, headState, preverifiedHistoricalRoots); err != nil { + return fmt.Errorf("failed to increment historical roots table: %w", err) + } + return nil +} + +// emitHeadEvent emits the head event with the given head slot, head root, and head state. +func emitHeadEvent(cfg *Cfg, headSlot uint64, headRoot common.Hash, headState *state.CachingBeaconState) error { + headEpoch := headSlot / cfg.beaconCfg.SlotsPerEpoch + previous_duty_dependent_root, err := headState.GetBlockRootAtSlot((headEpoch-1)*cfg.beaconCfg.SlotsPerEpoch - 1) + if err != nil { + return fmt.Errorf("failed to get block root at slot for previous_duty_dependent_root: %w", err) + } + current_duty_dependent_root, err := headState.GetBlockRootAtSlot(headEpoch*cfg.beaconCfg.SlotsPerEpoch - 1) + if err != nil { + return fmt.Errorf("failed to get block root at slot for current_duty_dependent_root: %w", err) + } + + stateRoot, err := headState.HashSSZ() + if err != nil { + return fmt.Errorf("failed to hash ssz: %w", err) + } + // emit the head event + cfg.emitter.State().SendHead(&beaconevents.HeadData{ + Slot: headSlot, + Block: headRoot, + State: stateRoot, + EpochTransition: true, + PreviousDutyDependentRoot: previous_duty_dependent_root, + CurrentDutyDependentRoot: current_duty_dependent_root, + ExecutionOptimistic: false, + }) + return nil +} + +// saveHeadStateOnDiskIfNeeded saves the head state on disk for eventual node restarts without checkpoint sync. +func saveHeadStateOnDiskIfNeeded(cfg *Cfg, headState *state.CachingBeaconState) error { + epochFrequency := uint64(5) + if headState.Slot()%(cfg.beaconCfg.SlotsPerEpoch*epochFrequency) == 0 { + dat, err := utils.EncodeSSZSnappy(headState) + if err != nil { + return fmt.Errorf("failed to encode ssz snappy: %w", err) + } + // Write the head state to disk + fileToWriteTo := fmt.Sprintf("%s/%s", cfg.dirs.CaplinLatest, clparams.LatestStateFileName) + + // Create the directory if it doesn't exist + err = os.MkdirAll(cfg.dirs.CaplinLatest, 0755) + if err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + + // Write the data to the file + err = os.WriteFile(fileToWriteTo, dat, 0644) + if err != nil { + return fmt.Errorf("failed to write head state to disk: %w", err) + } + } + return nil +} + +// postForkchoiceOperations performs the post fork choice operations such as updating the head state, producing and caching attestation data, +// these sets of operations can take as long as they need to run, as by-now we are already synced. +func postForkchoiceOperations(ctx context.Context, tx kv.RwTx, logger log.Logger, cfg *Cfg, headSlot uint64, headRoot common.Hash) error { + // Retrieve the head state + headState, err := cfg.forkChoice.GetStateAtBlockRoot(headRoot, false) + if err != nil { + return fmt.Errorf("failed to get state at block root: %w", err) + } + cfg.forkChoice.SetSynced(true) // Now we are synced + // Update the head state with the new head state + if err := cfg.syncedData.OnHeadState(headState); err != nil { + return fmt.Errorf("failed to set head state: %w", err) + } + headState = cfg.syncedData.HeadState() // headState is a copy of the head state here. + + // Produce and cache attestation data for validator node (this is not an expensive operation so we can do it for all nodes) + if _, err = cfg.attestationDataProducer.ProduceAndCacheAttestationData(headState, headState.Slot(), 0); err != nil { + logger.Warn("failed to produce and cache attestation data", "err", err) + } + + // Run indexing routines for the database + if err := runIndexingRoutines(ctx, tx, cfg, headState); err != nil { + return fmt.Errorf("failed to run indexing routines: %w", err) + } + + // Dump the head state on disk for ease of chain reorgs + if err := cfg.forkChoice.DumpBeaconStateOnDisk(headState); err != nil { + return fmt.Errorf("failed to dump beacon state on disk: %w", err) + } + + // Save the head state on disk for eventual node restarts without checkpoint sync + if err := saveHeadStateOnDiskIfNeeded(cfg, headState); err != nil { + return fmt.Errorf("failed to save head state on disk: %w", err) + } + // Lastly, emit the head event + return emitHeadEvent(cfg, headSlot, headRoot, headState) +} + +// doForkchoiceRoutine performs the fork choice routine by computing the new fork choice, updating the canonical chain in the database, +func doForkchoiceRoutine(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { + var ( + headSlot uint64 + headRoot common.Hash + err error + ) + if headSlot, headRoot, err = computeAndNotifyServicesOfNewForkChoice(ctx, logger, cfg); err != nil { + return fmt.Errorf("failed to compute and notify services of new fork choice: %w", err) + } + + tx, err := cfg.indiciesDB.BeginRw(ctx) + if err != nil { + return fmt.Errorf("failed to begin transaction: %w", err) + } + defer tx.Rollback() + + if err := updateCanonicalChainInTheDatabase(ctx, tx, headSlot, headRoot); err != nil { + return fmt.Errorf("failed to update canonical chain in the database: %w", err) + } + + if err := postForkchoiceOperations(ctx, tx, logger, cfg, headSlot, headRoot); err != nil { + return fmt.Errorf("failed to post forkchoice operations: %w", err) + } + + var m runtime.MemStats + dbg.ReadMemStats(&m) + logger.Debug("Imported chain segment", + "hash", headRoot, "slot", headSlot, + "alloc", common.ByteCount(m.Alloc), + "sys", common.ByteCount(m.Sys)) + + return tx.Commit() +} diff --git a/cl/phase1/stages/forward_sync.go b/cl/phase1/stages/forward_sync.go new file mode 100644 index 00000000000..163e97b4722 --- /dev/null +++ b/cl/phase1/stages/forward_sync.go @@ -0,0 +1,223 @@ +package stages + +import ( + "context" + "fmt" + "sort" + "sync/atomic" + "time" + + "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/cltypes" + "github.com/erigontech/erigon/cl/cltypes/solid" + "github.com/erigontech/erigon/cl/persistence/blob_storage" + "github.com/erigontech/erigon/cl/phase1/core/state" + network2 "github.com/erigontech/erigon/cl/phase1/network" +) + +// shouldProcessBlobs checks if any block in the given list of blocks +// has a version greater than or equal to DenebVersion and contains BlobKzgCommitments. +func shouldProcessBlobs(blocks []*cltypes.SignedBeaconBlock) bool { + for _, block := range blocks { + // Check if block version is greater than or equal to DenebVersion and contains BlobKzgCommitments + if block.Version() >= clparams.DenebVersion && block.Block.Body.BlobKzgCommitments.Len() > 0 { + return true + } + } + return false +} + +// downloadAndProcessEip4844DA handles downloading and processing of EIP-4844 data availability blobs. +// It takes highest slot processed, and a list of signed beacon blocks as input. +// It returns the highest blob slot processed and an error if any. +func downloadAndProcessEip4844DA(ctx context.Context, logger log.Logger, cfg *Cfg, highestSlotProcessed uint64, blocks []*cltypes.SignedBeaconBlock) (highestBlobSlotProcessed uint64, err error) { + var ( + ids *solid.ListSSZ[*cltypes.BlobIdentifier] + blobs *network2.PeerAndSidecars + ) + + // Retrieve blob identifiers from the given blocks + ids, err = network2.BlobsIdentifiersFromBlocks(blocks) + if err != nil { + // Return an error if blob identifiers could not be retrieved + err = fmt.Errorf("failed to get blob identifiers: %w", err) + return + } + + // If there are no blobs to retrieve, return the highest slot processed + if ids.Len() == 0 { + return highestSlotProcessed, nil + } + + // Request blobs from the network + blobs, err = network2.RequestBlobsFrantically(ctx, cfg.rpc, ids) + if err != nil { + // Return an error if blobs could not be retrieved + err = fmt.Errorf("failed to get blobs: %w", err) + return + } + + var highestProcessed, inserted uint64 + + // Verify and insert blobs into the blob store + if highestProcessed, inserted, err = blob_storage.VerifyAgainstIdentifiersAndInsertIntoTheBlobStore(ctx, cfg.blobStore, ids, blobs.Responses, nil); err != nil { + // Ban the peer if verification fails + cfg.rpc.BanPeer(blobs.Peer) + // Return an error if blobs could not be verified + err = fmt.Errorf("failed to verify blobs: %w", err) + return + } + + // If all blobs were inserted successfully, return the highest processed slot + if inserted == uint64(ids.Len()) { + return highestProcessed, nil + } + + // If not all blobs were inserted, return the highest processed slot minus one + return highestProcessed - 1, err +} + +// processDownloadedBlockBatches processes a batch of downloaded blocks. +// It takes the highest block processed, a flag to determine if insertion is needed, and a list of signed beacon blocks as input. +// It returns the new highest block processed and an error if any. +func processDownloadedBlockBatches(ctx context.Context, cfg *Cfg, highestBlockProcessed uint64, shouldInsert bool, blocks []*cltypes.SignedBeaconBlock) (newHighestBlockProcessed uint64, err error) { + // Pre-process the block batch to ensure that the blocks are sorted by slot in ascending order + sort.Slice(blocks, func(i, j int) bool { + return blocks[i].Block.Slot < blocks[j].Block.Slot + }) + + var ( + blockRoot common.Hash + st *state.CachingBeaconState + ) + newHighestBlockProcessed = highestBlockProcessed + + // Iterate over each block in the sorted list + for _, block := range blocks { + // Compute the hash of the current block + blockRoot, err = block.Block.HashSSZ() + if err != nil { + // Return an error if block hashing fails + err = fmt.Errorf("failed to hash block: %w", err) + return + } + + // Process the block + if err = processBlock(ctx, cfg, cfg.indiciesDB, block, false, true, false); err != nil { + // Return an error if block processing fails + err = fmt.Errorf("bad blocks segment received: %w", err) + return + } + + // Perform post-processing on the block + st, err = cfg.forkChoice.GetStateAtBlockRoot(blockRoot, false) + if err == nil && block.Block.Slot%(cfg.beaconCfg.SlotsPerEpoch*2) == 0 && st != nil { + // Dump the beacon state on disk if conditions are met + if err = cfg.forkChoice.DumpBeaconStateOnDisk(st); err != nil { + // Return an error if dumping the state fails + err = fmt.Errorf("failed to dump state: %w", err) + return + } + if err = saveHeadStateOnDiskIfNeeded(cfg, st); err != nil { + // Return an error if saving the head state fails + err = fmt.Errorf("failed to save head state: %w", err) + return + } + } + + // Update the highest block processed if the current block's slot is higher + if newHighestBlockProcessed < block.Block.Slot { + newHighestBlockProcessed = block.Block.Slot + } + + // If block version is less than BellatrixVersion or shouldInsert is false, skip insertion + if block.Version() < clparams.BellatrixVersion || !shouldInsert { + continue + } + + // Add the block to the block collector + if err = cfg.blockCollector.AddBlock(block.Block); err != nil { + // Return an error if adding the block to the collector fails + err = fmt.Errorf("failed to add block to collector: %w", err) + return + } + } + return +} + +// forwardSync (MAIN ROUTINE FOR ForwardSync) performs the forward synchronization of beacon blocks. +func forwardSync(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { + var ( + shouldInsert = cfg.executionClient != nil && cfg.executionClient.SupportInsertion() // Check if the execution client supports insertion + finalizedCheckpoint = cfg.forkChoice.FinalizedCheckpoint() // Get the finalized checkpoint from fork choice + secsPerLog = 30 // Interval in seconds for logging progress + logTicker = time.NewTicker(time.Duration(secsPerLog) * time.Second) // Ticker for logging progress + downloader = network2.NewForwardBeaconDownloader(ctx, cfg.rpc) // Initialize a new forward beacon downloader + currentSlot atomic.Uint64 // Atomic variable to track the current slot + ) + + // Initialize the slot to download from the finalized checkpoint + currentSlot.Store(finalizedCheckpoint.Epoch() * cfg.beaconCfg.SlotsPerEpoch) + + // Always start from the current finalized checkpoint + downloader.SetHighestProcessedSlot(currentSlot.Load()) + + // Set the function to process downloaded blocks + downloader.SetProcessFunction(func(initialHighestSlotProcessed uint64, blocks []*cltypes.SignedBeaconBlock) (newHighestSlotProcessed uint64, err error) { + highestSlotProcessed, err := processDownloadedBlockBatches(ctx, cfg, initialHighestSlotProcessed, shouldInsert, blocks) + if err != nil { + logger.Warn("[Caplin] Failed to process block batch", "err", err) + return initialHighestSlotProcessed, err + } + + // Exit if we are pre-EIP-4844 + if !shouldProcessBlobs(blocks) { + currentSlot.Store(highestSlotProcessed) + return highestSlotProcessed, nil + } + + // Process blobs for EIP-4844 + highestBlobSlotProcessed, err := downloadAndProcessEip4844DA(ctx, logger, cfg, initialHighestSlotProcessed, blocks) + if err != nil { + logger.Warn("[Caplin] Failed to process blobs", "err", err) + return initialHighestSlotProcessed, err + } + if highestBlobSlotProcessed <= initialHighestSlotProcessed { + return initialHighestSlotProcessed, nil + } + currentSlot.Store(highestBlobSlotProcessed) + return highestBlobSlotProcessed, nil + }) + + // Get the current slot of the chain tip + chainTipSlot := cfg.ethClock.GetCurrentSlot() + logger.Info("[Caplin] Forward Sync", "from", currentSlot.Load(), "to", chainTipSlot) + prevProgress := currentSlot.Load() + + // Run the log loop until the highest processed slot reaches the chain tip slot + for downloader.GetHighestProcessedSlot() < chainTipSlot { + downloader.RequestMore(ctx) + + select { + case <-ctx.Done(): + // Return if the context is done + return ctx.Err() + case <-logTicker.C: + // Log progress at regular intervals + progressMade := chainTipSlot - currentSlot.Load() + distFromChainTip := time.Duration(progressMade*cfg.beaconCfg.SecondsPerSlot) * time.Second + timeProgress := currentSlot.Load() - prevProgress + estimatedTimeRemaining := 999 * time.Hour + if timeProgress > 0 { + estimatedTimeRemaining = time.Duration(float64(progressMade)/(float64(currentSlot.Load()-prevProgress)/float64(secsPerLog))) * time.Second + } + prevProgress = currentSlot.Load() + logger.Info("[Caplin] Forward Sync", "progress", currentSlot.Load(), "distance-from-chain-tip", distFromChainTip, "estimated-time-remaining", estimatedTimeRemaining) + default: + } + } + + return nil +} diff --git a/cl/phase1/stages/stage_history_download.go b/cl/phase1/stages/stage_history_download.go index 12c0678d6ef..835a1f7cb6c 100644 --- a/cl/phase1/stages/stage_history_download.go +++ b/cl/phase1/stages/stage_history_download.go @@ -18,6 +18,7 @@ package stages import ( "context" + "errors" "fmt" "math" "sync/atomic" @@ -151,6 +152,9 @@ func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Co destinationSlotForEL = frozenBlocksInEL - 1 } } + if slot == 0 { + return true, tx.Commit() + } return (!cfg.backfilling || slot <= destinationSlotForCL) && (slot <= destinationSlotForEL || isInElSnapshots), tx.Commit() }) prevProgress := cfg.downloader.Progress() @@ -361,11 +365,11 @@ func downloadBlobHistoryWorker(cfg StageHistoryReconstructionCfg, ctx context.Co continue } if block.Signature != header.Signature { - return fmt.Errorf("signature mismatch beetwen blob and stored block") + return errors.New("signature mismatch beetwen blob and stored block") } return nil } - return fmt.Errorf("block not in batch") + return errors.New("block not in batch") }) if err != nil { rpc.BanPeer(blobs.Peer) diff --git a/cl/rpc/rpc.go b/cl/rpc/rpc.go index a72d8f5ecdf..22e29ab2c46 100644 --- a/cl/rpc/rpc.go +++ b/cl/rpc/rpc.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "io" "time" @@ -105,7 +106,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(ctx context.Context, topic string, reqD } // Sanity check for message size. if encodedLn > uint64(maxMessageLength) { - return nil, message.Peer.Pid, fmt.Errorf("received message too big") + return nil, message.Peer.Pid, errors.New("received message too big") } // Read bytes using snappy into a new raw buffer of side encodedLn. @@ -122,7 +123,7 @@ func (b *BeaconRpcP2P) sendBlocksRequest(ctx context.Context, topic string, reqD // Fork digests respForkDigest := binary.BigEndian.Uint32(forkDigest) if respForkDigest == 0 { - return nil, message.Peer.Pid, fmt.Errorf("null fork digest") + return nil, message.Peer.Pid, errors.New("null fork digest") } version, err := b.ethClock.StateVersionByForkDigest(utils.Uint32ToBytes4(respForkDigest)) @@ -179,7 +180,7 @@ func (b *BeaconRpcP2P) sendBlobsSidecar(ctx context.Context, topic string, reqDa } // Sanity check for message size. if encodedLn > uint64(maxMessageLength) { - return nil, message.Peer.Pid, fmt.Errorf("received message too big") + return nil, message.Peer.Pid, errors.New("received message too big") } // Read bytes using snappy into a new raw buffer of side encodedLn. @@ -196,7 +197,7 @@ func (b *BeaconRpcP2P) sendBlobsSidecar(ctx context.Context, topic string, reqDa // Fork digests respForkDigest := binary.BigEndian.Uint32(forkDigest) if respForkDigest == 0 { - return nil, message.Peer.Pid, fmt.Errorf("null fork digest") + return nil, message.Peer.Pid, errors.New("null fork digest") } version, err := b.ethClock.StateVersionByForkDigest(utils.Uint32ToBytes4(respForkDigest)) diff --git a/cl/sentinel/communication/ssz_snappy/encoding.go b/cl/sentinel/communication/ssz_snappy/encoding.go index 5d841614f4f..54b587d47f3 100644 --- a/cl/sentinel/communication/ssz_snappy/encoding.go +++ b/cl/sentinel/communication/ssz_snappy/encoding.go @@ -20,6 +20,7 @@ import ( "bufio" "bytes" "encoding/binary" + "errors" "fmt" "io" "sync" @@ -88,7 +89,7 @@ func DecodeAndReadNoForkDigest(r io.Reader, val ssz.EncodableSSZ, version clpara return fmt.Errorf("unable to read varint from message prefix: %v", err) } if encodedLn > uint64(16*datasize.MB) { - return fmt.Errorf("payload too big") + return errors.New("payload too big") } sr := snappy.NewReader(r) diff --git a/cl/sentinel/discovery.go b/cl/sentinel/discovery.go index 53448906d0f..02a1fff5db3 100644 --- a/cl/sentinel/discovery.go +++ b/cl/sentinel/discovery.go @@ -18,7 +18,7 @@ package sentinel import ( "context" - "fmt" + "errors" "time" "github.com/libp2p/go-libp2p/core/network" @@ -42,7 +42,7 @@ func (s *Sentinel) ConnectWithPeer(ctx context.Context, info peer.AddrInfo) (err return nil } if s.peers.BanStatus(info.ID) { - return fmt.Errorf("refused to connect to bad peer") + return errors.New("refused to connect to bad peer") } ctxWithTimeout, cancel := context.WithTimeout(ctx, clparams.MaxDialTimeout) defer cancel() diff --git a/cl/sentinel/handlers/blobs_test.go b/cl/sentinel/handlers/blobs_test.go index 6465776cd69..12fee5be072 100644 --- a/cl/sentinel/handlers/blobs_test.go +++ b/cl/sentinel/handlers/blobs_test.go @@ -20,7 +20,7 @@ import ( "bytes" "context" "encoding/binary" - "fmt" + "errors" "io" "math" "testing" @@ -166,7 +166,7 @@ func TestBlobsByRangeHandler(t *testing.T) { // Fork digests respForkDigest := binary.BigEndian.Uint32(forkDigest) if respForkDigest == 0 { - require.NoError(t, fmt.Errorf("null fork digest")) + require.NoError(t, errors.New("null fork digest")) } version, err := ethClock.StateVersionByForkDigest(utils.Uint32ToBytes4(respForkDigest)) if err != nil { @@ -288,7 +288,7 @@ func TestBlobsByIdentifiersHandler(t *testing.T) { // Fork digests respForkDigest := binary.BigEndian.Uint32(forkDigest) if respForkDigest == 0 { - require.NoError(t, fmt.Errorf("null fork digest")) + require.NoError(t, errors.New("null fork digest")) } version, err := ethClock.StateVersionByForkDigest(utils.Uint32ToBytes4(respForkDigest)) if err != nil { diff --git a/cl/sentinel/peers/pool.go b/cl/sentinel/peers/pool.go index ad969212ad6..cfa9f9d8a36 100644 --- a/cl/sentinel/peers/pool.go +++ b/cl/sentinel/peers/pool.go @@ -17,7 +17,7 @@ package peers import ( - "fmt" + "errors" "sync" "sync/atomic" @@ -151,7 +151,7 @@ func (p *Pool) Request() (pid *Item, done func(), err error) { //grab a peer from our ringbuffer val, ok := p.queue.PopFront() if !ok { - return nil, nil, fmt.Errorf("no peers? ( :( > ") + return nil, nil, errors.New("no peers? ( :( > ") } return val, func() { p.mu.Lock() diff --git a/cl/sentinel/sentinel_gossip_test.go b/cl/sentinel/sentinel_gossip_test.go index db099b8b9d7..9773e956e34 100644 --- a/cl/sentinel/sentinel_gossip_test.go +++ b/cl/sentinel/sentinel_gossip_test.go @@ -106,7 +106,7 @@ func TestSentinelGossipOnHardFork(t *testing.T) { // delay to make sure that the connection is established sub1.Publish(msg) }() - previousTopic := "" + var previousTopic string ans := <-ch require.Equal(t, ans.Data, msg) diff --git a/cl/sentinel/service/notifiers.go b/cl/sentinel/service/notifiers.go index 9a2c0a34d9a..5c35cb51693 100644 --- a/cl/sentinel/service/notifiers.go +++ b/cl/sentinel/service/notifiers.go @@ -17,7 +17,7 @@ package service import ( - "fmt" + "errors" "sync" ) @@ -58,7 +58,7 @@ func (g *gossipNotifier) addSubscriber() (chan gossipObject, int, error) { defer g.mu.Unlock() if len(g.notifiers) >= maxSubscribers { - return nil, -1, fmt.Errorf("too many subsribers, try again later") + return nil, -1, errors.New("too many subsribers, try again later") } ch := make(chan gossipObject, 1<<16) g.notifiers = append(g.notifiers, ch) @@ -70,7 +70,7 @@ func (g *gossipNotifier) removeSubscriber(id int) error { defer g.mu.Unlock() if len(g.notifiers) <= id { - return fmt.Errorf("invalid id, no subscription exist with this id") + return errors.New("invalid id, no subscription exist with this id") } close(g.notifiers[id]) g.notifiers = append(g.notifiers[:id], g.notifiers[id+1:]...) diff --git a/cl/sentinel/service/service.go b/cl/sentinel/service/service.go index 2084c1f8b11..798e31ca33a 100644 --- a/cl/sentinel/service/service.go +++ b/cl/sentinel/service/service.go @@ -19,6 +19,7 @@ package service import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -121,17 +122,17 @@ func (s *SentinelServer) PublishGossip(_ context.Context, msg *sentinelrpc.Gossi switch { case gossip.IsTopicBlobSidecar(msg.Name): if msg.SubnetId == nil { - return nil, fmt.Errorf("subnetId is required for blob sidecar") + return nil, errors.New("subnetId is required for blob sidecar") } subscription = manager.GetMatchingSubscription(gossip.TopicNameBlobSidecar(*msg.SubnetId)) case gossip.IsTopicSyncCommittee(msg.Name): if msg.SubnetId == nil { - return nil, fmt.Errorf("subnetId is required for sync_committee") + return nil, errors.New("subnetId is required for sync_committee") } subscription = manager.GetMatchingSubscription(gossip.TopicNameSyncCommittee(int(*msg.SubnetId))) case gossip.IsTopicBeaconAttestation(msg.Name): if msg.SubnetId == nil { - return nil, fmt.Errorf("subnetId is required for beacon attestation") + return nil, errors.New("subnetId is required for beacon attestation") } subscription = manager.GetMatchingSubscription(gossip.TopicNameBeaconAttestation(*msg.SubnetId)) default: @@ -368,7 +369,7 @@ func (s *SentinelServer) SetSubscribeExpiry(ctx context.Context, expiryReq *sent ) subs := s.sentinel.GossipManager().GetMatchingSubscription(topic) if subs == nil { - return nil, fmt.Errorf("no such subscription") + return nil, errors.New("no such subscription") } subs.OverwriteSubscriptionExpiry(expiryTime) return &sentinelrpc.EmptyMessage{}, nil diff --git a/cl/sentinel/utils.go b/cl/sentinel/utils.go index f108c91d257..4758815bd0d 100644 --- a/cl/sentinel/utils.go +++ b/cl/sentinel/utils.go @@ -18,6 +18,7 @@ package sentinel import ( "crypto/ecdsa" + "errors" "fmt" "net" "strings" @@ -41,11 +42,11 @@ func convertToInterfacePubkey(pubkey *ecdsa.PublicKey) (crypto.PubKey, error) { xVal, yVal := new(btcec.FieldVal), new(btcec.FieldVal) overflows := xVal.SetByteSlice(pubkey.X.Bytes()) if overflows { - return nil, fmt.Errorf("x value overflows") + return nil, errors.New("x value overflows") } overflows = yVal.SetByteSlice(pubkey.Y.Bytes()) if overflows { - return nil, fmt.Errorf("y value overflows") + return nil, errors.New("y value overflows") } newKey := crypto.PubKey((*crypto.Secp256k1PublicKey)(btcec.NewPublicKey(xVal, yVal))) // Zero out temporary values. @@ -85,7 +86,7 @@ func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) ( return nil, fmt.Errorf("invalid ip address provided: %s", ipAddr) } if id.String() == "" { - return nil, fmt.Errorf("empty peer id given") + return nil, errors.New("empty peer id given") } if parsedIP.To4() != nil { return multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String())) diff --git a/cl/spectest/Makefile b/cl/spectest/Makefile index b507c4613b1..5b6856347e7 100644 --- a/cl/spectest/Makefile +++ b/cl/spectest/Makefile @@ -13,4 +13,4 @@ clean: rm -rf tests mainnet: - CGO_CFLAGS=-D__BLST_PORTABLE__ go test -tags=spectest -run=/mainnet -failfast -v --timeout 30m + CGO_CFLAGS=-D__BLST_PORTABLE__ go test -tags=spectest -run=/mainnet -v --timeout 30m diff --git a/cl/spectest/consensus_tests/fork_choice.go b/cl/spectest/consensus_tests/fork_choice.go index e88fd21003b..9cd58e2f77a 100644 --- a/cl/spectest/consensus_tests/fork_choice.go +++ b/cl/spectest/consensus_tests/fork_choice.go @@ -199,7 +199,7 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err genesisState, err := initial_state.GetGenesisState(clparams.MainnetNetwork) require.NoError(t, err) - emitters := beaconevents.NewEmitters() + emitters := beaconevents.NewEventEmitter() _, beaconConfig := clparams.GetConfigsByNetwork(clparams.MainnetNetwork) ethClock := eth_clock.NewEthereumClock(genesisState.GenesisTime(), genesisState.GenesisValidatorsRoot(), beaconConfig) blobStorage := blob_storage.NewBlobStore(memdb.New("/tmp"), afero.NewMemMapFs(), math.MaxUint64, &clparams.MainnetBeaconConfig, ethClock) @@ -245,7 +245,7 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err continue } } - blobSidecarService := services.NewBlobSidecarService(ctx, &clparams.MainnetBeaconConfig, forkStore, nil, ethClock, true) + blobSidecarService := services.NewBlobSidecarService(ctx, &clparams.MainnetBeaconConfig, forkStore, nil, ethClock, emitters, true) blobs.Range(func(index int, value *cltypes.Blob, length int) bool { var proof libcommon.Bytes48 diff --git a/cl/spectest/consensus_tests/operations.go b/cl/spectest/consensus_tests/operations.go index af719ecee74..a00bdd0179d 100644 --- a/cl/spectest/consensus_tests/operations.go +++ b/cl/spectest/consensus_tests/operations.go @@ -17,7 +17,7 @@ package consensus_tests import ( - "fmt" + "errors" "io/fs" "os" "testing" @@ -63,7 +63,7 @@ func operationAttestationHandler(t *testing.T, root fs.FS, c spectest.TestCase) return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -93,7 +93,7 @@ func operationAttesterSlashingHandler(t *testing.T, root fs.FS, c spectest.TestC return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -123,7 +123,7 @@ func operationProposerSlashingHandler(t *testing.T, root fs.FS, c spectest.TestC return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -155,7 +155,7 @@ func operationBlockHeaderHandler(t *testing.T, root fs.FS, c spectest.TestCase) return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -185,7 +185,7 @@ func operationDepositHandler(t *testing.T, root fs.FS, c spectest.TestCase) erro return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -215,7 +215,7 @@ func operationSyncAggregateHandler(t *testing.T, root fs.FS, c spectest.TestCase return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -245,7 +245,7 @@ func operationVoluntaryExitHandler(t *testing.T, root fs.FS, c spectest.TestCase return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -275,7 +275,7 @@ func operationWithdrawalHandler(t *testing.T, root fs.FS, c spectest.TestCase) e return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) @@ -305,7 +305,7 @@ func operationSignedBlsChangeHandler(t *testing.T, root fs.FS, c spectest.TestCa return err } if expectedError { - return fmt.Errorf("expected error") + return errors.New("expected error") } haveRoot, err := preState.HashSSZ() require.NoError(t, err) diff --git a/cl/transition/impl/eth2/operations.go b/cl/transition/impl/eth2/operations.go index 3d9dab984f5..a0328bfda6d 100644 --- a/cl/transition/impl/eth2/operations.go +++ b/cl/transition/impl/eth2/operations.go @@ -63,7 +63,7 @@ func (I *impl) ProcessProposerSlashing( } if *h1 == *h2 { - return fmt.Errorf("proposee slashing headers are the same") + return errors.New("proposee slashing headers are the same") } proposer, err := s.ValidatorForValidatorIndex(int(h1.ProposerIndex)) @@ -125,7 +125,7 @@ func (I *impl) ProcessAttesterSlashing( return fmt.Errorf("error calculating indexed attestation 1 validity: %v", err) } if !valid { - return fmt.Errorf("invalid indexed attestation 1") + return errors.New("invalid indexed attestation 1") } valid, err = state.IsValidIndexedAttestation(s, att2) @@ -133,7 +133,7 @@ func (I *impl) ProcessAttesterSlashing( return fmt.Errorf("error calculating indexed attestation 2 validity: %v", err) } if !valid { - return fmt.Errorf("invalid indexed attestation 2") + return errors.New("invalid indexed attestation 2") } slashedAny := false @@ -158,7 +158,7 @@ func (I *impl) ProcessAttesterSlashing( } if !slashedAny { - return fmt.Errorf("no validators slashed") + return errors.New("no validators slashed") } return nil } @@ -186,7 +186,7 @@ func (I *impl) ProcessDeposit(s abstract.BeaconState, deposit *cltypes.Deposit) depositIndex, eth1Data.Root, ) { - return fmt.Errorf("processDepositForAltair: Could not validate deposit root") + return errors.New("processDepositForAltair: Could not validate deposit root") } // Increment index @@ -360,7 +360,7 @@ func (I *impl) ProcessWithdrawals( func (I *impl) ProcessExecutionPayload(s abstract.BeaconState, parentHash, prevRandao common.Hash, time uint64, payloadHeader *cltypes.Eth1Header) error { if state.IsMergeTransitionComplete(s) { if parentHash != s.LatestExecutionPayloadHeader().BlockHash { - return fmt.Errorf("ProcessExecutionPayload: invalid eth1 chain. mismatching parent") + return errors.New("ProcessExecutionPayload: invalid eth1 chain. mismatching parent") } } if prevRandao != s.GetRandaoMixes(state.Epoch(s)) { @@ -371,7 +371,7 @@ func (I *impl) ProcessExecutionPayload(s abstract.BeaconState, parentHash, prevR ) } if time != state.ComputeTimestampAtSlot(s, s.Slot()) { - return fmt.Errorf("ProcessExecutionPayload: invalid Eth1 timestamp") + return errors.New("ProcessExecutionPayload: invalid Eth1 timestamp") } s.SetLatestExecutionPayloadHeader(payloadHeader) return nil @@ -490,13 +490,13 @@ func (I *impl) ProcessBlsToExecutionChange( if I.FullValidation { // Check the validator's withdrawal credentials prefix. if wc[0] != byte(beaconConfig.BLSWithdrawalPrefixByte) { - return fmt.Errorf("invalid withdrawal credentials prefix") + return errors.New("invalid withdrawal credentials prefix") } // Check the validator's withdrawal credentials against the provided message. hashedFrom := utils.Sha256(change.From[:]) if !bytes.Equal(hashedFrom[1:], wc[1:]) { - return fmt.Errorf("invalid withdrawal credentials") + return errors.New("invalid withdrawal credentials") } // Compute the signing domain and verify the message signature. @@ -517,7 +517,7 @@ func (I *impl) ProcessBlsToExecutionChange( return err } if !valid { - return fmt.Errorf("invalid signature") + return errors.New("invalid signature") } } credentials := wc @@ -658,7 +658,7 @@ func (I *impl) processAttestationPhase0( } if len(committee) != utils.GetBitlistLength(attestation.AggregationBits()) { - return nil, fmt.Errorf("processAttestationPhase0: mismatching aggregation bits size") + return nil, errors.New("processAttestationPhase0: mismatching aggregation bits size") } // Cached so it is performant. proposerIndex, err := s.GetBeaconProposerIndex() @@ -677,12 +677,12 @@ func (I *impl) processAttestationPhase0( // Depending of what slot we are on we put in either the current justified or previous justified. if isCurrentAttestation { if !data.Source().Equal(s.CurrentJustifiedCheckpoint()) { - return nil, fmt.Errorf("processAttestationPhase0: mismatching sources") + return nil, errors.New("processAttestationPhase0: mismatching sources") } s.AddCurrentEpochAtteastation(pendingAttestation) } else { if !data.Source().Equal(s.PreviousJustifiedCheckpoint()) { - return nil, fmt.Errorf("processAttestationPhase0: mismatching sources") + return nil, errors.New("processAttestationPhase0: mismatching sources") } s.AddPreviousEpochAttestation(pendingAttestation) } diff --git a/cl/transition/impl/eth2/validation.go b/cl/transition/impl/eth2/validation.go index 7f5c8c25445..05348d735ec 100644 --- a/cl/transition/impl/eth2/validation.go +++ b/cl/transition/impl/eth2/validation.go @@ -17,6 +17,7 @@ package eth2 import ( + "errors" "fmt" "github.com/Giulio2002/bls" @@ -49,7 +50,7 @@ func (I *impl) VerifyBlockSignature(s abstract.BeaconState, block *cltypes.Signe return fmt.Errorf("error validating block signature: %v", err) } if !valid { - return fmt.Errorf("block not valid") + return errors.New("block not valid") } return nil } diff --git a/cl/utils/eth_clock/ethereum_clock.go b/cl/utils/eth_clock/ethereum_clock.go index 6686f46eda6..de7f38d900a 100644 --- a/cl/utils/eth_clock/ethereum_clock.go +++ b/cl/utils/eth_clock/ethereum_clock.go @@ -32,6 +32,7 @@ var maximumClockDisparity = 500 * time.Millisecond type EthereumClock interface { GetSlotTime(slot uint64) time.Time GetCurrentSlot() uint64 + GetEpochAtSlot(slot uint64) uint64 IsSlotCurrentSlotWithMaximumClockDisparity(slot uint64) bool GetSlotByTime(time time.Time) uint64 GetCurrentEpoch() uint64 @@ -89,6 +90,10 @@ func (t *ethereumClockImpl) GetCurrentSlot() uint64 { return (now - t.genesisTime) / t.beaconCfg.SecondsPerSlot } +func (t *ethereumClockImpl) GetEpochAtSlot(slot uint64) uint64 { + return slot / t.beaconCfg.SlotsPerEpoch +} + func (t *ethereumClockImpl) IsSlotCurrentSlotWithMaximumClockDisparity(slot uint64) bool { slotTime := t.GetSlotTime(slot) currSlot := t.GetCurrentSlot() diff --git a/cl/utils/eth_clock/ethereum_clock_mock.go b/cl/utils/eth_clock/ethereum_clock_mock.go index 757028f484d..f6c1ce1f54b 100644 --- a/cl/utils/eth_clock/ethereum_clock_mock.go +++ b/cl/utils/eth_clock/ethereum_clock_mock.go @@ -310,6 +310,44 @@ func (c *MockEthereumClockGetCurrentSlotCall) DoAndReturn(f func() uint64) *Mock return c } +// GetEpochAtSlot mocks base method. +func (m *MockEthereumClock) GetEpochAtSlot(slot uint64) uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEpochAtSlot", slot) + ret0, _ := ret[0].(uint64) + return ret0 +} + +// GetEpochAtSlot indicates an expected call of GetEpochAtSlot. +func (mr *MockEthereumClockMockRecorder) GetEpochAtSlot(slot any) *MockEthereumClockGetEpochAtSlotCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEpochAtSlot", reflect.TypeOf((*MockEthereumClock)(nil).GetEpochAtSlot), slot) + return &MockEthereumClockGetEpochAtSlotCall{Call: call} +} + +// MockEthereumClockGetEpochAtSlotCall wrap *gomock.Call +type MockEthereumClockGetEpochAtSlotCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockEthereumClockGetEpochAtSlotCall) Return(arg0 uint64) *MockEthereumClockGetEpochAtSlotCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockEthereumClockGetEpochAtSlotCall) Do(f func(uint64) uint64) *MockEthereumClockGetEpochAtSlotCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockEthereumClockGetEpochAtSlotCall) DoAndReturn(f func(uint64) uint64) *MockEthereumClockGetEpochAtSlotCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // GetSlotByTime mocks base method. func (m *MockEthereumClock) GetSlotByTime(time time.Time) uint64 { m.ctrl.T.Helper() diff --git a/cl/validator/committee_subscription/committee_subscription.go b/cl/validator/committee_subscription/committee_subscription.go index 1f6859eec83..20841436bac 100644 --- a/cl/validator/committee_subscription/committee_subscription.go +++ b/cl/validator/committee_subscription/committee_subscription.go @@ -18,6 +18,7 @@ package committee_subscription import ( "context" + "errors" "fmt" "sync" "time" @@ -37,13 +38,13 @@ import ( ) var ( - ErrIgnore = fmt.Errorf("ignore") - ErrCommitteeIndexOutOfRange = fmt.Errorf("committee index out of range") - ErrWrongSubnet = fmt.Errorf("attestation is for the wrong subnet") + ErrIgnore = errors.New("ignore") + ErrCommitteeIndexOutOfRange = errors.New("committee index out of range") + ErrWrongSubnet = errors.New("attestation is for the wrong subnet") ErrNotInPropagationRange = fmt.Errorf("attestation is not in propagation range. %w", ErrIgnore) - ErrEpochMismatch = fmt.Errorf("epoch mismatch") - ErrExactlyOneBitSet = fmt.Errorf("exactly one aggregation bit should be set") - ErrAggregationBitsMismatch = fmt.Errorf("aggregation bits mismatch committee size") + ErrEpochMismatch = errors.New("epoch mismatch") + ErrExactlyOneBitSet = errors.New("exactly one aggregation bit should be set") + ErrAggregationBitsMismatch = errors.New("aggregation bits mismatch committee size") ) type CommitteeSubscribeMgmt struct { @@ -99,7 +100,7 @@ func (c *CommitteeSubscribeMgmt) AddAttestationSubscription(ctx context.Context, ) headState := c.syncedData.HeadState() if headState == nil { - return fmt.Errorf("head state not available") + return errors.New("head state not available") } log.Debug("Add attestation subscription", "slot", slot, "committeeIndex", cIndex, "isAggregator", p.IsAggregator, "validatorIndex", p.ValidatorIndex) diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go index 447848cb1b5..bdfd69ea4f4 100644 --- a/cmd/capcli/cli.go +++ b/cmd/capcli/cli.go @@ -18,7 +18,9 @@ package main import ( "context" + "encoding/binary" "encoding/json" + "errors" "fmt" "io" "math" @@ -44,12 +46,13 @@ import ( "github.com/erigontech/erigon/cl/antiquary" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/clparams/initial_state" + "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/persistence/beacon_indicies" "github.com/erigontech/erigon/cl/persistence/format/snapshot_format" "github.com/erigontech/erigon/cl/persistence/format/snapshot_format/getters" state_accessors "github.com/erigontech/erigon/cl/persistence/state" "github.com/erigontech/erigon/cl/persistence/state/historical_states_reader" - "github.com/erigontech/erigon/cl/phase1/core" + "github.com/erigontech/erigon/cl/phase1/core/checkpoint_sync" "github.com/erigontech/erigon/cl/phase1/core/state" "github.com/erigontech/erigon/cl/phase1/network" "github.com/erigontech/erigon/cl/phase1/stages" @@ -139,7 +142,7 @@ func (c *Chain) Run(ctx *Context) error { dirs := datadir.New(c.Datadir) csn := freezeblocks.NewCaplinSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs, log.Root()) - bs, err := core.RetrieveBeaconState(ctx, beaconConfig, networkType) + bs, err := checkpoint_sync.NewRemoteCheckpointSync(beaconConfig, networkType).GetLatestBeaconState(ctx) if err != nil { return err } @@ -183,13 +186,63 @@ type ChainEndpoint struct { outputFolder } +func retrieveAndSanitizeBlockFromRemoteEndpoint(ctx context.Context, beaconConfig *clparams.BeaconChainConfig, uri string, expectedBlockRoot *libcommon.Hash) (*cltypes.SignedBeaconBlock, error) { + log.Debug("[Checkpoint Sync] Requesting beacon block", "uri", uri) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", "application/octet-stream") + if err != nil { + return nil, fmt.Errorf("checkpoint sync request failed %s", err) + } + r, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer func() { + err = r.Body.Close() + }() + if r.StatusCode != http.StatusOK { + return nil, fmt.Errorf("checkpoint sync failed, bad status code %d", r.StatusCode) + } + marshaled, err := io.ReadAll(r.Body) + if err != nil { + return nil, fmt.Errorf("checkpoint sync read failed %s", err) + } + if len(marshaled) < 108 { + return nil, errors.New("read failed, too short") + } + currentSlot := binary.LittleEndian.Uint64(marshaled[100:108]) + v := beaconConfig.GetCurrentStateVersion(currentSlot / beaconConfig.SlotsPerEpoch) + + block := cltypes.NewSignedBeaconBlock(beaconConfig) + err = block.DecodeSSZ(marshaled, int(v)) + if err != nil { + return nil, fmt.Errorf("checkpoint sync decode failed %s", err) + } + if expectedBlockRoot != nil { + has, err := block.Block.HashSSZ() + if err != nil { + return nil, fmt.Errorf("checkpoint sync decode failed %s", err) + } + if has != *expectedBlockRoot { + return nil, fmt.Errorf("checkpoint sync decode failed, unexpected block root %s", has) + } + } + return block, nil +} + func (c *ChainEndpoint) Run(ctx *Context) error { _, beaconConfig, ntype, err := clparams.GetConfigsByNetworkName(c.Chain) if err != nil { return err } log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) - bs, err := core.RetrieveBeaconState(ctx, beaconConfig, ntype) + // Get latest state + checkPointSyncer := checkpoint_sync.NewRemoteCheckpointSync(beaconConfig, ntype) + bs, err := checkPointSyncer.GetLatestBeaconState(ctx) if err != nil { return err } @@ -208,9 +261,9 @@ func (c *ChainEndpoint) Run(ctx *Context) error { } log.Info("Hooked", "uri", baseUri) // Let's fetch the head first - currentBlock, err := core.RetrieveBlock(ctx, beaconConfig, fmt.Sprintf("%s/head", baseUri), nil) + currentBlock, err := retrieveAndSanitizeBlockFromRemoteEndpoint(ctx, beaconConfig, baseUri+"/head", nil) if err != nil { - return fmt.Errorf("failed to retrieve head: %w, uri: %s", err, fmt.Sprintf("%s/head", baseUri)) + return fmt.Errorf("failed to retrieve head: %w, uri: %s", err, baseUri+"/head") } currentRoot, err := currentBlock.Block.HashSSZ() if err != nil { @@ -244,7 +297,7 @@ func (c *ChainEndpoint) Run(ctx *Context) error { stringifiedRoot := common.Bytes2Hex(currentRoot[:]) // Let's fetch the head first - currentBlock, err := core.RetrieveBlock(ctx, beaconConfig, fmt.Sprintf("%s/0x%s", baseUri, stringifiedRoot), (*libcommon.Hash)(¤tRoot)) + currentBlock, err := retrieveAndSanitizeBlockFromRemoteEndpoint(ctx, beaconConfig, fmt.Sprintf("%s/0x%s", baseUri, stringifiedRoot), (*libcommon.Hash)(¤tRoot)) if err != nil { return false, fmt.Errorf("failed to retrieve block: %w, uri: %s", err, fmt.Sprintf("%s/0x%s", baseUri, stringifiedRoot)) } @@ -388,7 +441,7 @@ func (c *CheckSnapshots) Run(ctx *Context) error { if genesisHeader == nil { log.Warn("beaconIndices up to", "block", to, "caplinSnapIndexMax", csn.IndicesMax()) - return fmt.Errorf("genesis header is nil") + return errors.New("genesis header is nil") } previousBlockRoot, err := genesisHeader.Header.HashSSZ() if err != nil { @@ -601,7 +654,7 @@ type ArchiveSanitizer struct { func getHead(beaconApiURL string) (uint64, error) { headResponse := map[string]interface{}{} - req, err := http.NewRequest("GET", fmt.Sprintf("%s/eth/v2/debug/beacon/heads", beaconApiURL), nil) + req, err := http.NewRequest("GET", beaconApiURL+"/eth/v2/debug/beacon/heads", nil) if err != nil { return 0, err } @@ -616,12 +669,12 @@ func getHead(beaconApiURL string) (uint64, error) { } data := headResponse["data"].([]interface{}) if len(data) == 0 { - return 0, fmt.Errorf("no head found") + return 0, errors.New("no head found") } head := data[0].(map[string]interface{}) slotStr, ok := head["slot"].(string) if !ok { - return 0, fmt.Errorf("no slot found") + return 0, errors.New("no slot found") } slot, err := strconv.ParseUint(slotStr, 10, 64) if err != nil { @@ -650,7 +703,7 @@ func getStateRootAtSlot(beaconApiURL string, slot uint64) (libcommon.Hash, error } data := response["data"].(map[string]interface{}) if len(data) == 0 { - return libcommon.Hash{}, fmt.Errorf("no head found") + return libcommon.Hash{}, errors.New("no head found") } rootStr := data["root"].(string) @@ -781,8 +834,8 @@ func (b *BenchmarkNode) Run(ctx *Context) error { for i := uint64(startSlot); i < headSlot; i += uint64(interval) { uri := b.BaseURL + b.Endpoint - uri = strings.Replace(uri, "{slot}", fmt.Sprintf("%d", i), 1) - uri = strings.Replace(uri, "{epoch}", fmt.Sprintf("%d", i/beaconConfig.SlotsPerEpoch), 1) + uri = strings.Replace(uri, "{slot}", strconv.FormatUint(i, 10), 1) + uri = strings.Replace(uri, "{epoch}", strconv.FormatUint(i/beaconConfig.SlotsPerEpoch, 10), 1) elapsed, err := timeRequest(uri, b.Accept, b.Method, b.Body) if err != nil { log.Warn("Failed to benchmark", "error", err, "uri", uri) diff --git a/cmd/caplin/caplin1/run.go b/cmd/caplin/caplin1/run.go index 0032ec9b213..a8c78eabe38 100644 --- a/cmd/caplin/caplin1/run.go +++ b/cmd/caplin/caplin1/run.go @@ -39,6 +39,7 @@ import ( "github.com/erigontech/erigon/cl/clparams/initial_state" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" + "github.com/erigontech/erigon/cl/monitor" "github.com/erigontech/erigon/cl/rpc" "github.com/erigontech/erigon/cl/sentinel" "github.com/erigontech/erigon/cl/sentinel/service" @@ -119,7 +120,13 @@ func OpenCaplinDatabase(ctx context.Context, func RunCaplinPhase1(ctx context.Context, engine execution_client.ExecutionEngine, config *ethconfig.Config, networkConfig *clparams.NetworkConfig, beaconConfig *clparams.BeaconChainConfig, ethClock eth_clock.EthereumClock, state *state.CachingBeaconState, dirs datadir.Dirs, eth1Getter snapshot_format.ExecutionBlockReaderByNumber, - snDownloader proto_downloader.DownloaderClient, backfilling, blobBackfilling bool, states bool, indexDB kv.RwDB, blobStorage blob_storage.BlobStorage, creds credentials.TransportCredentials, snBuildSema *semaphore.Weighted, caplinOptions ...CaplinOption) error { + snDownloader proto_downloader.DownloaderClient, indexDB kv.RwDB, blobStorage blob_storage.BlobStorage, creds credentials.TransportCredentials, snBuildSema *semaphore.Weighted, caplinOptions ...CaplinOption) error { + var ( + backfilling = config.CaplinConfig.Backfilling + blobBackfilling = config.CaplinConfig.BlobBackfilling + states = config.CaplinConfig.Archive + ) + ctx, cn := context.WithCancel(ctx) defer cn() @@ -146,7 +153,7 @@ func RunCaplinPhase1(ctx context.Context, engine execution_client.ExecutionEngin syncedDataManager := synced_data.NewSyncedDataManager(true, beaconConfig) syncContributionPool := sync_contribution_pool.NewSyncContributionPool(beaconConfig) - emitters := beaconevents.NewEmitters() + emitters := beaconevents.NewEventEmitter() aggregationPool := aggregation.NewAggregationPool(ctx, beaconConfig, networkConfig, ethClock) forkChoice, err := forkchoice.NewForkChoiceStore(ethClock, state, engine, pool, fork_graph.NewForkGraphDisk(state, fcuFs, config.BeaconRouter), emitters, syncedDataManager, blobStorage) if err != nil { @@ -193,18 +200,19 @@ func RunCaplinPhase1(ctx context.Context, engine execution_client.ExecutionEngin if err != nil { return err } + validatorMonitor := monitor.NewValidatorMonitor(config.CaplinConfig.EnableValidatorMonitor, forkChoice, ethClock, beaconConfig, syncedDataManager) beaconRpc := rpc.NewBeaconRpcP2P(ctx, sentinel, beaconConfig, ethClock) committeeSub := committee_subscription.NewCommitteeSubscribeManagement(ctx, indexDB, beaconConfig, networkConfig, ethClock, sentinel, state, aggregationPool, syncedDataManager) // Define gossip services - blockService := services.NewBlockService(ctx, indexDB, forkChoice, syncedDataManager, ethClock, beaconConfig, emitters) - blobService := services.NewBlobSidecarService(ctx, beaconConfig, forkChoice, syncedDataManager, ethClock, false) + blockService := services.NewBlockService(ctx, indexDB, forkChoice, syncedDataManager, ethClock, beaconConfig, emitters, validatorMonitor) + blobService := services.NewBlobSidecarService(ctx, beaconConfig, forkChoice, syncedDataManager, ethClock, emitters, false) syncCommitteeMessagesService := services.NewSyncCommitteeMessagesService(beaconConfig, ethClock, syncedDataManager, syncContributionPool, false) - attestationService := services.NewAttestationService(ctx, forkChoice, committeeSub, ethClock, syncedDataManager, beaconConfig, networkConfig) + attestationService := services.NewAttestationService(ctx, forkChoice, committeeSub, ethClock, syncedDataManager, beaconConfig, networkConfig, emitters) syncContributionService := services.NewSyncContributionService(syncedDataManager, beaconConfig, syncContributionPool, ethClock, emitters, false) aggregateAndProofService := services.NewAggregateAndProofService(ctx, syncedDataManager, forkChoice, beaconConfig, pool, false) voluntaryExitService := services.NewVoluntaryExitService(pool, emitters, syncedDataManager, beaconConfig, ethClock) blsToExecutionChangeService := services.NewBLSToExecutionChangeService(pool, emitters, syncedDataManager, beaconConfig) - proposerSlashingService := services.NewProposerSlashingService(pool, syncedDataManager, beaconConfig, ethClock) + proposerSlashingService := services.NewProposerSlashingService(pool, syncedDataManager, beaconConfig, ethClock, emitters) // Create the gossip manager gossipManager := network.NewGossipReceiver(sentinel, forkChoice, beaconConfig, ethClock, emitters, committeeSub, blockService, blobService, syncCommitteeMessagesService, syncContributionService, aggregateAndProofService, @@ -315,6 +323,7 @@ func RunCaplinPhase1(ctx context.Context, engine execution_client.ExecutionEngin blsToExecutionChangeService, proposerSlashingService, option.builderClient, + validatorMonitor, ) go beacon.ListenAndServe(&beacon.LayeredBeaconHandler{ ArchiveApi: apiHandler, @@ -322,7 +331,28 @@ func RunCaplinPhase1(ctx context.Context, engine execution_client.ExecutionEngin log.Info("Beacon API started", "addr", config.BeaconRouter.Address) } - stageCfg := stages.ClStagesCfg(beaconRpc, antiq, ethClock, beaconConfig, state, engine, gossipManager, forkChoice, indexDB, csn, rcsn, dirs.Tmp, uint64(config.LoopBlockLimit), backfilling, blobBackfilling, syncedDataManager, emitters, blobStorage, attestationProducer) + stageCfg := stages.ClStagesCfg( + beaconRpc, + antiq, + ethClock, + beaconConfig, + state, + engine, + gossipManager, + forkChoice, + indexDB, + csn, + rcsn, + dirs, + uint64(config.LoopBlockLimit), + backfilling, + blobBackfilling, + syncedDataManager, + emitters, + blobStorage, + attestationProducer, + validatorMonitor, + ) sync := stages.ConsensusClStages(ctx, stageCfg) logger.Info("[Caplin] starting clstages loop") diff --git a/cmd/caplin/main.go b/cmd/caplin/main.go index a527cca4abc..0d25632e5df 100644 --- a/cmd/caplin/main.go +++ b/cmd/caplin/main.go @@ -29,7 +29,8 @@ import ( "github.com/erigontech/erigon-lib/common/mem" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cl/beacon/beacon_router_configuration" - "github.com/erigontech/erigon/cl/phase1/core" + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/phase1/core/checkpoint_sync" "github.com/erigontech/erigon/cl/phase1/core/state" execution_client2 "github.com/erigontech/erigon/cl/phase1/execution_client" "github.com/erigontech/erigon/cl/utils/eth_clock" @@ -92,7 +93,7 @@ func runCaplinNode(cliCtx *cli.Context) error { if cfg.InitialSync { state = cfg.InitalState } else { - state, err = core.RetrieveBeaconState(ctx, cfg.BeaconCfg, cfg.NetworkType) + state, err = checkpoint_sync.NewRemoteCheckpointSync(cfg.BeaconCfg, cfg.NetworkType).GetLatestBeaconState(ctx) if err != nil { return err } @@ -160,5 +161,6 @@ func runCaplinNode(cliCtx *cli.Context) error { CaplinDiscoveryPort: uint64(cfg.Port), CaplinDiscoveryTCPPort: uint64(cfg.ServerTcpPort), BeaconRouter: rcfg, - }, cfg.NetworkCfg, cfg.BeaconCfg, ethClock, state, cfg.Dirs, nil, nil, false, false, false, indiciesDB, blobStorage, nil, blockSnapBuildSema, options...) + CaplinConfig: clparams.CaplinConfig{}, + }, cfg.NetworkCfg, cfg.BeaconCfg, ethClock, state, cfg.Dirs, nil, nil, indiciesDB, blobStorage, nil, blockSnapBuildSema, options...) } diff --git a/cmd/commitment-prefix/main.go b/cmd/commitment-prefix/main.go index e192351d857..c4ae16623b1 100644 --- a/cmd/commitment-prefix/main.go +++ b/cmd/commitment-prefix/main.go @@ -17,12 +17,14 @@ package main import ( + "errors" "flag" "fmt" "io" "os" "path" "path/filepath" + "strconv" "sync" "github.com/c2h5oh/datasize" @@ -106,7 +108,7 @@ func proceedFiles(files []string) { panic(err) } } - outPath := path.Join(dir, fmt.Sprintf("%s.html", "analysis")) + outPath := path.Join(dir, "analysis.html") fmt.Printf("rendering total graph to %s\n", outPath) f, err := os.Create(outPath) @@ -184,7 +186,7 @@ func extractKVPairFromCompressed(filename string, keysSink chan commitment.Branc for getter.HasNext() { key, _ := getter.Next(nil) if !getter.HasNext() { - return fmt.Errorf("invalid key/value pair during decompression") + return errors.New("invalid key/value pair during decompression") } val, afterValPos := getter.Next(nil) cpair++ @@ -245,7 +247,7 @@ func processCommitmentFile(fpath string) (*overallStat, error) { func prefixLenCountChart(fname string, data *overallStat) *charts.Pie { items := make([]opts.PieData, 0) for prefSize, count := range data.prefCount { - items = append(items, opts.PieData{Name: fmt.Sprintf("%d", prefSize), Value: count}) + items = append(items, opts.PieData{Name: strconv.FormatUint(prefSize, 10), Value: count}) } pie := charts.NewPie() @@ -268,7 +270,7 @@ func fileContentsMapChart(fileName string, data *overallStat) *charts.TreeMap { TreeMap[keysIndex].Children = make([]opts.TreeMapNode, 0) for prefSize, stat := range data.prefixes { TreeMap[keysIndex].Children = append(TreeMap[keysIndex].Children, opts.TreeMapNode{ - Name: fmt.Sprintf("%d", prefSize), + Name: strconv.FormatUint(prefSize, 10), Value: int(stat.KeySize), }) } diff --git a/cmd/devnet/contracts/steps/l1l2transfers.go b/cmd/devnet/contracts/steps/l1l2transfers.go index 0a6d05d3558..08b0ffd0416 100644 --- a/cmd/devnet/contracts/steps/l1l2transfers.go +++ b/cmd/devnet/contracts/steps/l1l2transfers.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "math" "math/big" @@ -132,7 +133,7 @@ func GenerateSyncEvents(ctx context.Context, senderName string, numberOfTransfer } if !sendConfirmed { - return fmt.Errorf("No post sync log received") + return errors.New("No post sync log received") } auth.Nonce = (&big.Int{}).Add(auth.Nonce, big.NewInt(1)) diff --git a/cmd/devnet/devnetutils/utils.go b/cmd/devnet/devnetutils/utils.go index 6e89b5ee563..2a475b1fae4 100644 --- a/cmd/devnet/devnetutils/utils.go +++ b/cmd/devnet/devnetutils/utils.go @@ -132,7 +132,7 @@ func RandomInt(max int) int { func NamespaceAndSubMethodFromMethod(method string) (string, string, error) { parts := strings.SplitN(method, "_", 2) if len(parts) != 2 { - return "", "", fmt.Errorf("invalid string to split") + return "", "", errors.New("invalid string to split") } return parts[0], parts[1], nil } diff --git a/cmd/devnet/scenarios/errors.go b/cmd/devnet/scenarios/errors.go index 1aabb477cdb..2e7ce7ab0f1 100644 --- a/cmd/devnet/scenarios/errors.go +++ b/cmd/devnet/scenarios/errors.go @@ -16,10 +16,13 @@ package scenarios -import "fmt" +import ( + "errors" + "fmt" +) // ErrUndefined is returned in case if step definition was not found -var ErrUndefined = fmt.Errorf("step is undefined") +var ErrUndefined = errors.New("step is undefined") type ScenarioError struct { error diff --git a/cmd/devnet/services/accounts/faucet.go b/cmd/devnet/services/accounts/faucet.go index daf0233a754..f5a5c8c6b62 100644 --- a/cmd/devnet/services/accounts/faucet.go +++ b/cmd/devnet/services/accounts/faucet.go @@ -18,6 +18,7 @@ package accounts import ( "context" + "errors" "fmt" "math/big" "strings" @@ -173,7 +174,7 @@ func (f *Faucet) Send(ctx context.Context, destination *accounts.Account, eth fl } if f.transactOpts == nil { - return nil, libcommon.Hash{}, fmt.Errorf("faucet not initialized") + return nil, libcommon.Hash{}, errors.New("faucet not initialized") } node := devnet.SelectNode(ctx) diff --git a/cmd/devnet/services/polygon/checkpoint.go b/cmd/devnet/services/polygon/checkpoint.go index c1f9b7b1c6d..cbf809e4436 100644 --- a/cmd/devnet/services/polygon/checkpoint.go +++ b/cmd/devnet/services/polygon/checkpoint.go @@ -61,7 +61,7 @@ func (c CheckpointBlock) GetSignBytes() ([]byte, error) { } return sdk.SortJSON(b)*/ - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } type CheckpointAck struct { @@ -588,7 +588,7 @@ func (h *Heimdall) handleRootHeaderBlock(event *contracts.TestRootChainNewHeader if ack.StartBlock != h.pendingCheckpoint.StartBlock().Uint64() { h.logger.Error("Invalid start block", "startExpected", h.pendingCheckpoint.StartBlock, "startReceived", ack.StartBlock) - return fmt.Errorf("invalid Checkpoint Ack: Invalid start block") + return errors.New("invalid Checkpoint Ack: Invalid start block") } // Return err if start and end matches but contract root hash doesn't match @@ -603,7 +603,7 @@ func (h *Heimdall) handleRootHeaderBlock(event *contracts.TestRootChainNewHeader "rootRecieved", ack.RootHash.String(), ) - return fmt.Errorf("invalid Checkpoint Ack: Invalid root hash") + return errors.New("invalid Checkpoint Ack: Invalid root hash") } h.latestCheckpoint = &ack diff --git a/cmd/devnet/services/polygon/heimdall.go b/cmd/devnet/services/polygon/heimdall.go index 018a2cd0d15..c96c7fc10ee 100644 --- a/cmd/devnet/services/polygon/heimdall.go +++ b/cmd/devnet/services/polygon/heimdall.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "math/big" "net" "net/http" @@ -178,7 +177,7 @@ func (h *Heimdall) FetchSpan(ctx context.Context, spanID uint64) (*heimdall.Span nextSpan.StartBlock = 1 //256 } else { if spanID != uint64(h.currentSpan.Id+1) { - return nil, fmt.Errorf("can't initialize span: non consecutive span") + return nil, errors.New("can't initialize span: non consecutive span") } nextSpan.StartBlock = h.currentSpan.EndBlock + 1 @@ -202,7 +201,7 @@ func (h *Heimdall) FetchSpan(ctx context.Context, spanID uint64) (*heimdall.Span } func (h *Heimdall) FetchLatestSpan(ctx context.Context) (*heimdall.Span, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) currentSprintLength() int { @@ -216,51 +215,50 @@ func (h *Heimdall) currentSprintLength() int { func (h *Heimdall) getSpanOverrideHeight() uint64 { return 0 //MainChain: 8664000 - //MumbaiChain: 10205000 } func (h *Heimdall) FetchCheckpoint(ctx context.Context, number int64) (*heimdall.Checkpoint, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h *Heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*heimdall.Checkpoint, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) FetchMilestone(ctx context.Context, number int64) (*heimdall.Milestone, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) FetchMilestoneCount(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h *Heimdall) FetchFirstMilestoneNum(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h *Heimdall) FetchNoAckMilestone(ctx context.Context, milestoneID string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (h *Heimdall) FetchLastNoAckMilestone(ctx context.Context) (string, error) { - return "", fmt.Errorf("TODO") + return "", errors.New("TODO") } func (h *Heimdall) FetchMilestoneID(ctx context.Context, milestoneID string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (h *Heimdall) FetchStateSyncEvents(ctx context.Context, fromID uint64, to time.Time, limit int) ([]*heimdall.EventRecordWithTime, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) FetchStateSyncEvent(ctx context.Context, id uint64) (*heimdall.EventRecordWithTime, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h *Heimdall) Close() { diff --git a/cmd/devnet/services/polygon/heimdall_test.go b/cmd/devnet/services/polygon/heimdall_test.go index 29e31540ae7..26c9d350da2 100644 --- a/cmd/devnet/services/polygon/heimdall_test.go +++ b/cmd/devnet/services/polygon/heimdall_test.go @@ -40,14 +40,14 @@ func TestHeimdallServer(t *testing.T) { { EventRecord: heimdall.EventRecord{ ID: 1, - ChainID: "80001", + ChainID: "80002", }, Time: time.Now(), }, { EventRecord: heimdall.EventRecord{ ID: 2, - ChainID: "80001", + ChainID: "80002", }, Time: time.Now(), }, @@ -58,7 +58,7 @@ func TestHeimdallServer(t *testing.T) { Id: 1, StartBlock: 1000, EndBlock: 2000, - ChainID: "80001", + ChainID: "80002", } client.EXPECT().FetchSpan(gomock.Any(), gomock.Any()).AnyTimes().Return(span, nil) @@ -66,7 +66,7 @@ func TestHeimdallServer(t *testing.T) { Fields: heimdall.WaypointFields{ StartBlock: big.NewInt(1000), EndBlock: big.NewInt(1999), - ChainID: "80001", + ChainID: "80002", }, } client.EXPECT().FetchCheckpoint(gomock.Any(), gomock.Any()).AnyTimes().Return(checkpoint1, nil) diff --git a/cmd/devnet/services/polygon/proofgenerator.go b/cmd/devnet/services/polygon/proofgenerator.go index 4c8fb7c12d8..4fd7d98e3f5 100644 --- a/cmd/devnet/services/polygon/proofgenerator.go +++ b/cmd/devnet/services/polygon/proofgenerator.go @@ -83,7 +83,7 @@ func (pg *ProofGenerator) GenerateExitPayload(ctx context.Context, burnTxHash li logger := devnet.Logger(ctx) if pg.heimdall == nil || pg.heimdall.rootChainBinding == nil { - return nil, fmt.Errorf("ProofGenerator not initialized") + return nil, errors.New("ProofGenerator not initialized") } logger.Info("Checking for checkpoint status", "hash", burnTxHash) @@ -95,7 +95,7 @@ func (pg *ProofGenerator) GenerateExitPayload(ctx context.Context, burnTxHash li } if !isCheckpointed { - return nil, fmt.Errorf("eurn transaction has not been checkpointed yet") + return nil, errors.New("eurn transaction has not been checkpointed yet") } // build payload for exit @@ -106,11 +106,11 @@ func (pg *ProofGenerator) GenerateExitPayload(ctx context.Context, burnTxHash li return nil, fmt.Errorf("block not included: %w", err) } - return nil, fmt.Errorf("null receipt received") + return nil, errors.New("null receipt received") } if len(result) == 0 { - return nil, fmt.Errorf("null result received") + return nil, errors.New("null result received") } return result, nil @@ -165,11 +165,11 @@ func (pg *ProofGenerator) buildPayloadForExit(ctx context.Context, burnTxHash li node := devnet.SelectBlockProducer(ctx) if node == nil { - return nil, fmt.Errorf("no node available") + return nil, errors.New("no node available") } if index < 0 { - return nil, fmt.Errorf("index must not negative") + return nil, errors.New("index must not negative") } var receipt *types.Receipt @@ -183,7 +183,7 @@ func (pg *ProofGenerator) buildPayloadForExit(ctx context.Context, burnTxHash li } if lastChildBlockNum < txBlockNum { - return nil, fmt.Errorf("burn transaction has not been checkpointed as yet") + return nil, errors.New("burn transaction has not been checkpointed as yet") } // step 2- get transaction receipt from txhash and @@ -248,7 +248,7 @@ func (pg *ProofGenerator) buildPayloadForExit(ctx context.Context, burnTxHash li } if logIndex < 0 { - return nil, fmt.Errorf("log not found in receipt") + return nil, errors.New("log not found in receipt") } parentNodesBytes, err := rlp.EncodeToBytes(receiptProof.parentNodes) @@ -329,7 +329,7 @@ func getReceiptProof(ctx context.Context, node requests.RequestGenerator, receip result, parents, ok := receiptsTrie.FindPath(path) if !ok { - return nil, fmt.Errorf("node does not contain the key") + return nil, errors.New("node does not contain the key") } var nodeValue any diff --git a/cmd/devnet/services/polygon/proofgenerator_test.go b/cmd/devnet/services/polygon/proofgenerator_test.go index ecc01979643..9e5eaa5aeed 100644 --- a/cmd/devnet/services/polygon/proofgenerator_test.go +++ b/cmd/devnet/services/polygon/proofgenerator_test.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "crypto/ecdsa" + "errors" "fmt" "math" "math/big" @@ -200,7 +201,7 @@ func (rg *requestGenerator) GetTransactionReceipt(ctx context.Context, hash libc } } - return nil, fmt.Errorf("tx not found in block") + return nil, errors.New("tx not found in block") } type blockReader struct { @@ -213,7 +214,7 @@ func (reader blockReader) BlockByNumber(ctx context.Context, db kv.Tx, number ui return reader.chain.Blocks[number], nil } - return nil, fmt.Errorf("block not found") + return nil, errors.New("block not found") } func (reader blockReader) HeaderByNumber(ctx context.Context, txn kv.Getter, blockNum uint64) (*types.Header, error) { @@ -221,7 +222,7 @@ func (reader blockReader) HeaderByNumber(ctx context.Context, txn kv.Getter, blo return reader.chain.Headers[blockNum], nil } - return nil, fmt.Errorf("header not found") + return nil, errors.New("header not found") } func TestMerkle(t *testing.T) { diff --git a/cmd/devnet/transactions/block.go b/cmd/devnet/transactions/block.go index 47eb2a2bfab..30e59c48fe5 100644 --- a/cmd/devnet/transactions/block.go +++ b/cmd/devnet/transactions/block.go @@ -18,6 +18,7 @@ package transactions import ( "context" + "errors" "fmt" "time" @@ -67,7 +68,7 @@ func searchBlockForHashes( logger := devnet.Logger(ctx) if len(hashmap) == 0 { - return nil, fmt.Errorf("no hashes to search for") + return nil, errors.New("no hashes to search for") } txToBlock := make(map[libcommon.Hash]uint64, len(hashmap)) @@ -76,7 +77,7 @@ func searchBlockForHashes( // get a block from the new heads channel if headsSub == nil { - return nil, fmt.Errorf("no block heads subscription") + return nil, errors.New("no block heads subscription") } var blockCount int @@ -104,7 +105,7 @@ func searchBlockForHashes( logger.Error("Missing Tx", "txHash", h) } - return nil, fmt.Errorf("timeout when searching for tx") + return nil, errors.New("timeout when searching for tx") } } } diff --git a/cmd/diag/main.go b/cmd/diag/main.go index fab1ad3e178..1224d408fa5 100644 --- a/cmd/diag/main.go +++ b/cmd/diag/main.go @@ -31,6 +31,7 @@ import ( "github.com/erigontech/erigon/cmd/diag/db" "github.com/erigontech/erigon/cmd/diag/downloader" "github.com/erigontech/erigon/cmd/diag/stages" + sinfo "github.com/erigontech/erigon/cmd/diag/sysinfo" "github.com/erigontech/erigon/cmd/diag/ui" "github.com/erigontech/erigon/cmd/snapshots/sync" "github.com/erigontech/erigon/cmd/utils" @@ -52,6 +53,7 @@ func main() { &stages.Command, &db.Command, &ui.Command, + &sinfo.Command, } app.Flags = []cli.Flag{} diff --git a/cmd/diag/sysinfo/sysinfo.go b/cmd/diag/sysinfo/sysinfo.go new file mode 100644 index 00000000000..f4e776d7a6d --- /dev/null +++ b/cmd/diag/sysinfo/sysinfo.go @@ -0,0 +1,144 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package sysinfo + +import ( + "fmt" + "strconv" + "strings" + + "github.com/urfave/cli/v2" + + "github.com/erigontech/erigon-lib/diagnostics" + "github.com/erigontech/erigon/cmd/diag/flags" + "github.com/erigontech/erigon/cmd/diag/util" +) + +var ( + ExportPathFlag = cli.StringFlag{ + Name: "export.path", + Aliases: []string{"ep"}, + Usage: "Path to folder for export result", + Required: true, + Value: "", + } + + ExportFileNameFlag = cli.StringFlag{ + Name: "export.file", + Aliases: []string{"ef"}, + Usage: "File name to export result default is sysinfo.txt", + Required: false, + Value: "sysinfo.txt", + } +) + +var Command = cli.Command{ + Name: "sysinfo", + Aliases: []string{"sinfo"}, + ArgsUsage: "", + Action: collectInfo, + Flags: []cli.Flag{ + &flags.DebugURLFlag, + &ExportPathFlag, + &ExportFileNameFlag, + }, + Description: "Collect information about system and save it to file in order to provide to support person", +} + +func collectInfo(cliCtx *cli.Context) error { + data, err := getData(cliCtx) + if err != nil { + util.RenderError(err) + } + + var builder strings.Builder + builder.WriteString("Disk info:\n") + builder.WriteString(data.Disk.Details) + builder.WriteString("\n\n") + builder.WriteString("CPU info:\n") + writeCPUToStringBuilder(data.CPU, &builder) + + // Save data to file + err = util.SaveDataToFile(cliCtx.String(ExportPathFlag.Name), cliCtx.String(ExportFileNameFlag.Name), builder.String()) + if err != nil { + util.RenderError(err) + } + + return nil +} + +func writeCPUToStringBuilder(cpuInfo []diagnostics.CPUInfo, builder *strings.Builder) { + spacing := calculateSpacing([]string{"CPU", "VendorID", "Family", "Model", "Stepping", "PhysicalID", "CoreID", "Cores", "ModelName", "Mhz", "CacheSize", "Flags", "Microcode"}) + + for _, cpu := range cpuInfo { + writeStringToBuilder(builder, "CPU", strconv.Itoa(int(cpu.CPU)), spacing) + writeStringToBuilder(builder, "VendorID", cpu.VendorID, spacing) + writeStringToBuilder(builder, "Family", cpu.Family, spacing) + writeStringToBuilder(builder, "Model", cpu.Model, spacing) + writeStringToBuilder(builder, "Stepping", strconv.Itoa(int(cpu.Stepping)), spacing) + writeStringToBuilder(builder, "PhysicalID", cpu.PhysicalID, spacing) + writeStringToBuilder(builder, "CoreID", cpu.CoreID, spacing) + writeStringToBuilder(builder, "Cores", strconv.Itoa(int(cpu.Cores)), spacing) + writeStringToBuilder(builder, "ModelName", cpu.ModelName, spacing) + writeStringToBuilder(builder, "Mhz", fmt.Sprintf("%g", cpu.Mhz), spacing) + writeStringToBuilder(builder, "CacheSize", strconv.Itoa(int(cpu.CacheSize)), spacing) + writeStringToBuilder(builder, "Flags", strings.Join(cpu.Flags, ", "), spacing) + writeStringToBuilder(builder, "Microcode", cpu.Microcode, spacing) + } +} + +func calculateSpacing(keysArray []string) int { + max := 0 + for _, key := range keysArray { + if len(key) > max { + max = len(key) + } + } + + return max + 3 +} + +func writeStringToBuilder(result *strings.Builder, name string, value string, spacing int) { + marging := 3 + if value == "" { + value = "N/A" + } + + writeSpacesToBuilder(result, marging) + result.WriteString(name) + result.WriteString(":") + writeSpacesToBuilder(result, spacing-len(name)-1) + result.WriteString(value) + result.WriteString("\n") +} + +func writeSpacesToBuilder(result *strings.Builder, spaces int) { + result.WriteString(strings.Repeat(" ", spaces)) +} + +func getData(cliCtx *cli.Context) (diagnostics.HardwareInfo, error) { + var data diagnostics.HardwareInfo + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/hardware-info" + + err := util.MakeHttpGetCall(cliCtx.Context, url, &data) + + if err != nil { + return data, err + } + + return data, nil +} diff --git a/cmd/diag/ui/ui.go b/cmd/diag/ui/ui.go index e8c7532f781..434251f7fac 100644 --- a/cmd/diag/ui/ui.go +++ b/cmd/diag/ui/ui.go @@ -128,8 +128,8 @@ func runUI(cli *cli.Context) error { } }() - uiUrl := fmt.Sprintf("http://%s", listenUrl) - fmt.Println(text.Hyperlink(uiUrl, fmt.Sprintf("UI running on %s", uiUrl))) + uiUrl := "http://" + listenUrl + fmt.Println(text.Hyperlink(uiUrl, "UI running on "+uiUrl)) wg.Wait() // Wait for the server goroutine to finish return nil diff --git a/cmd/diag/util/util.go b/cmd/diag/util/util.go index 1f571098773..b27307db9e6 100644 --- a/cmd/diag/util/util.go +++ b/cmd/diag/util/util.go @@ -19,6 +19,7 @@ package util import ( "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -43,7 +44,7 @@ func MakeHttpGetCall(ctx context.Context, url string, data interface{}) error { resp, err := client.Do(req) if err != nil { if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("it looks like the Erigon node is not running, is running incorrectly, or you have specified the wrong diagnostics URL. If you run the Erigon node with the '--diagnostics.endpoint.addr' or '--diagnostics.endpoint.port' flags, you must also specify the '--debug.addr' flag with the same address and port") + return errors.New("it looks like the Erigon node is not running, is running incorrectly, or you have specified the wrong diagnostics URL. If you run the Erigon node with the '--diagnostics.endpoint.addr' or '--diagnostics.endpoint.port' flags, you must also specify the '--debug.addr' flag with the same address and port") } return err } @@ -57,7 +58,7 @@ func MakeHttpGetCall(ctx context.Context, url string, data interface{}) error { err = json.Unmarshal(body, &data) if err != nil { if err.Error() == "invalid character 'p' after top-level value" { - return fmt.Errorf("diagnostics was not initialized yet. Please try again in a few seconds") + return errors.New("diagnostics was not initialized yet. Please try again in a few seconds") } return err @@ -111,3 +112,36 @@ func RenderError(err error) { txt := text.Colors{text.FgWhite, text.BgRed} fmt.Printf("%s %s\n", txt.Sprint("[ERROR]"), err) } + +func SaveDataToFile(filePath string, fileName string, data string) error { + //check is folder exists + if _, err := os.Stat(filePath); os.IsNotExist(err) { + err := os.MkdirAll(filePath, 0755) + if err != nil { + return err + } + } + + fullPath := MakePath(filePath, fileName) + + file, err := os.Create(fullPath) + if err != nil { + return err + } + defer file.Close() + + _, err = file.WriteString(fmt.Sprintf("%v\n", data)) + if err != nil { + return err + } + + return nil +} + +func MakePath(filePath string, fileName string) string { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + return fmt.Sprintf("%s/%s", filePath, fileName) +} diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index c79cc9fd8c2..63cf24af3fa 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -101,6 +101,7 @@ var ( disableIPV4 bool seedbox bool dbWritemap bool + all bool ) func init() { @@ -132,6 +133,7 @@ func init() { withFile(createTorrent) withChainFlag(createTorrent) rootCmd.AddCommand(createTorrent) + createTorrent.Flags().BoolVar(&all, "all", false, "Produce all possible .torrent files") rootCmd.AddCommand(torrentCat) rootCmd.AddCommand(torrentMagnet) @@ -252,6 +254,10 @@ func Downloader(ctx context.Context, logger log.Logger) error { cfg.AddTorrentsFromDisk = true // always true unless using uploader - which wants control of torrent files + if seedbox { + snapcfg.LoadRemotePreverified() + } + d, err := downloader.New(ctx, cfg, logger, log.LvlInfo, seedbox) if err != nil { return err @@ -299,10 +305,10 @@ func Downloader(ctx context.Context, logger log.Logger) error { var createTorrent = &cobra.Command{ Use: "torrent_create", - Example: "go run ./cmd/downloader torrent_create --datadir= --file=", + Example: "go run ./cmd/downloader torrent_create --datadir= --file= ", RunE: func(cmd *cobra.Command, args []string) error { dirs := datadir.New(datadirCli) - createdAmount, err := downloader.BuildTorrentFilesIfNeed(cmd.Context(), dirs, downloader.NewAtomicTorrentFS(dirs.Snap), chain, nil) + createdAmount, err := downloader.BuildTorrentFilesIfNeed(cmd.Context(), dirs, downloader.NewAtomicTorrentFS(dirs.Snap), chain, nil, all) if err != nil { return err } @@ -353,7 +359,7 @@ var torrentCat = &cobra.Command{ Example: "go run ./cmd/downloader torrent_cat ", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { - return fmt.Errorf("please pass .torrent file path by first argument") + return errors.New("please pass .torrent file path by first argument") } fPath := args[0] mi, err := metainfo.LoadFromFile(fPath) @@ -413,7 +419,7 @@ var torrentMagnet = &cobra.Command{ Example: "go run ./cmd/downloader torrent_magnet ", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { - return fmt.Errorf("please pass .torrent file path by first argument") + return errors.New("please pass .torrent file path by first argument") } fPath := args[0] mi, err := metainfo.LoadFromFile(fPath) @@ -486,7 +492,7 @@ func manifestVerify(ctx context.Context, logger log.Logger) error { func manifest(ctx context.Context, logger log.Logger) error { dirs := datadir.New(datadirCli) - files, err := downloader.SeedableFiles(dirs, chain) + files, err := downloader.SeedableFiles(dirs, chain, all) if err != nil { return err } @@ -552,7 +558,7 @@ func doPrintTorrentHashes(ctx context.Context, logger log.Logger) error { return err } } - createdAmount, err := downloader.BuildTorrentFilesIfNeed(ctx, dirs, tf, chain, nil) + createdAmount, err := downloader.BuildTorrentFilesIfNeed(ctx, dirs, tf, chain, nil, all) if err != nil { return fmt.Errorf("BuildTorrentFilesIfNeed: %w", err) } diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index e2c6fae99f1..7261ee968ad 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -398,21 +398,21 @@ func getTransaction(txJson jsonrpc.RPCTransaction) (types.Transaction, error) { if txJson.Value != nil { value, overflow = uint256.FromBig(txJson.Value.ToInt()) if overflow { - return nil, fmt.Errorf("value field caused an overflow (uint256)") + return nil, errors.New("value field caused an overflow (uint256)") } } if txJson.GasPrice != nil { gasPrice, overflow = uint256.FromBig(txJson.GasPrice.ToInt()) if overflow { - return nil, fmt.Errorf("gasPrice field caused an overflow (uint256)") + return nil, errors.New("gasPrice field caused an overflow (uint256)") } } if txJson.ChainID != nil { chainId, overflow = uint256.FromBig(txJson.ChainID.ToInt()) if overflow { - return nil, fmt.Errorf("chainId field caused an overflow (uint256)") + return nil, errors.New("chainId field caused an overflow (uint256)") } } @@ -448,14 +448,14 @@ func getTransaction(txJson jsonrpc.RPCTransaction) (types.Transaction, error) { if txJson.Tip != nil { tip, overflow = uint256.FromBig(txJson.Tip.ToInt()) if overflow { - return nil, fmt.Errorf("maxPriorityFeePerGas field caused an overflow (uint256)") + return nil, errors.New("maxPriorityFeePerGas field caused an overflow (uint256)") } } if txJson.FeeCap != nil { feeCap, overflow = uint256.FromBig(txJson.FeeCap.ToInt()) if overflow { - return nil, fmt.Errorf("maxFeePerGas field caused an overflow (uint256)") + return nil, errors.New("maxFeePerGas field caused an overflow (uint256)") } } diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 70ca45700a4..380cae72f75 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -24,6 +24,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/erigontech/erigon-lib/common/datadir" "io" "math/big" "os" @@ -158,7 +159,7 @@ func runCmd(ctx *cli.Context) error { defer db.Close() if ctx.String(GenesisFlag.Name) != "" { gen := readGenesis(ctx.String(GenesisFlag.Name)) - core.MustCommitGenesis(gen, db, "", log.Root()) + core.MustCommitGenesis(gen, db, datadir.New(""), log.Root()) genesisConfig = gen chainConfig = gen.Config } else { diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 184274bc748..42349fe52b5 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -168,7 +168,7 @@ func aggregateResultsFromStateTests( // Run the test and aggregate the result result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} - statedb, root, err := test.Run(tx, st, cfg) + statedb, root, err := test.Run(tx, st, cfg, dirs) if err != nil { // Test failed, mark as so and dump any state to aid debugging result.Pass, result.Error = false, err.Error() diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index 10bef940668..9f742f1aca2 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -27,7 +27,7 @@ import ( "strings" "testing" - "github.com/docker/docker/pkg/reexec" + "github.com/erigontech/erigon/internal/reexec" "github.com/erigontech/erigon/turbo/cmdtest" ) diff --git a/cmd/hack/db/lmdb.go b/cmd/hack/db/lmdb.go index 52e64f2570d..a4a745641b4 100644 --- a/cmd/hack/db/lmdb.go +++ b/cmd/hack/db/lmdb.go @@ -27,6 +27,7 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "github.com/erigontech/erigon-lib/kv" @@ -122,14 +123,14 @@ func _64(page []byte, pos int) uint64 { func pagesToString(pages []uint32) (out string) { if len(pages) == 1 { - out += fmt.Sprint(pages[0]) + out += strconv.FormatUint(uint64(pages[0]), 10) return } if len(pages) == 2 { - out += fmt.Sprint(pages[0]) + out += strconv.FormatUint(uint64(pages[0]), 10) out += ", " - out += fmt.Sprint(pages[1]) + out += strconv.FormatUint(uint64(pages[1]), 10) return } @@ -166,7 +167,7 @@ func pagesToString(pages []uint32) (out string) { if i < len(container)-1 { out += fmt.Sprintf("%d, ", n) } else { - out += fmt.Sprintf("%d", n) + out += strconv.FormatUint(uint64(n), 10) } } @@ -189,7 +190,7 @@ func pagesToString(pages []uint32) (out string) { if i < len(container)-1 { out += fmt.Sprintf("%d, ", n) } else { - out += fmt.Sprintf("%d", n) + out += strconv.FormatUint(uint64(n), 10) } } @@ -1109,7 +1110,7 @@ func _conditions(f io.ReaderAt, visStream io.Writer, node *mdbx_node, _header *h for _, subNode := range subHeader.nodes { val := string(subNode.data[:subNode.ksize]) - *out += fmt.Sprintf("|%s", val) + *out += "|" + val } *out += "}" @@ -1144,7 +1145,7 @@ func readPages(f io.ReaderAt, visStream io.Writer, pgno uint32, blockID *int, pa *blockID++ pBlock := *blockID - fillcolor := "" + var fillcolor string if _isBranch { fillcolor = colors["purple"] } else { @@ -1240,7 +1241,7 @@ func freeDBPages(f io.ReaderAt, visStream io.Writer, freeRoot uint32) error { out += fmt.Sprintf("txid(%v)", txnID) out += fmt.Sprintf("(ON %d OVERFLOW PAGES)=", overflowPages) for i := 0; i < overflowPages; i++ { - out += fmt.Sprintf("%d", int(node.pgno)+i) + out += strconv.Itoa(int(node.pgno) + i) if i+1 < overflowPages { out += ", " } diff --git a/cmd/hack/flow/flow.go b/cmd/hack/flow/flow.go index 2a964c253eb..8486203ae37 100644 --- a/cmd/hack/flow/flow.go +++ b/cmd/hack/flow/flow.go @@ -286,11 +286,11 @@ func batchServer() { } func si64(n int64) string { - return fmt.Sprintf("%v", n) + return strconv.FormatInt(n, 10) } func sui64(n uint64) string { - return fmt.Sprintf("%v", n) + return strconv.FormatUint(n, 10) } /* @@ -742,11 +742,11 @@ type cfgJobResult struct { } func sb(b bool) string { - return fmt.Sprintf("%v", b) + return strconv.FormatBool(b) } func si(i int) string { - return fmt.Sprintf("%v", i) + return strconv.Itoa(i) } func percent(n int, d int) string { diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 1215c7cba18..2943df39794 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -24,7 +24,6 @@ import ( "encoding/json" "flag" "fmt" - "math/big" "net/http" _ "net/http/pprof" //nolint:gosec "os" @@ -53,7 +52,6 @@ import ( "github.com/erigontech/erigon/cmd/hack/flow" "github.com/erigontech/erigon/cmd/hack/tool" "github.com/erigontech/erigon/common" - "github.com/erigontech/erigon/common/paths" "github.com/erigontech/erigon/core" "github.com/erigontech/erigon/core/rawdb" "github.com/erigontech/erigon/core/rawdb/blockio" @@ -142,7 +140,7 @@ func printCurrentBlockNumber(chaindata string) { } func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { - br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 0, log.New()), nil /* BorSnapshots */) + br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, "", 0, log.New()), nil /* BorSnapshots */) bw := blockio.NewBlockWriter() return br, bw } @@ -168,56 +166,6 @@ func printTxHashes(chaindata string, block uint64) error { return nil } -func repairCurrent() { - historyDb := mdbx.MustOpen("/Volumes/tb4/erigon/ropsten/geth/chaindata") - defer historyDb.Close() - currentDb := mdbx.MustOpen("statedb") - defer currentDb.Close() - tool.Check(historyDb.Update(context.Background(), func(tx kv.RwTx) error { - return tx.ClearBucket(kv.HashedStorage) - })) - tool.Check(historyDb.Update(context.Background(), func(tx kv.RwTx) error { - newB, err := tx.RwCursor(kv.HashedStorage) - if err != nil { - return err - } - count := 0 - if err := currentDb.View(context.Background(), func(ctx kv.Tx) error { - c, err := ctx.Cursor(kv.HashedStorage) - if err != nil { - return err - } - for k, v, err := c.First(); k != nil; k, v, err = c.Next() { - if err != nil { - return err - } - tool.Check(newB.Put(k, v)) - count++ - if count == 10000 { - fmt.Printf("Copied %d storage items\n", count) - } - } - return nil - }); err != nil { - return err - } - return nil - })) -} - -func dumpStorage() { - db := mdbx.MustOpen(paths.DefaultDataDir() + "/geth/chaindata") - defer db.Close() - if err := db.View(context.Background(), func(tx kv.Tx) error { - return tx.ForEach(kv.E2StorageHistory, nil, func(k, v []byte) error { - fmt.Printf("%x %x\n", k, v) - return nil - }) - }); err != nil { - panic(err) - } -} - func printBucket(chaindata string) { db := mdbx.MustOpen(chaindata) defer db.Close() @@ -334,7 +282,6 @@ func extractHeaders(chaindata string, block uint64, blockTotalOrOffset int64) er func extractBodies(datadir string) error { snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ - Enabled: true, KeepBlocks: true, ProduceE2: false, }, filepath.Join(datadir, "snapshots"), 0, log.New()) @@ -469,62 +416,6 @@ func snapSizes(chaindata string) error { return nil } -func fixTd(chaindata string) error { - db := mdbx.MustOpen(chaindata) - defer db.Close() - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - c, err1 := tx.RwCursor(kv.Headers) - if err1 != nil { - return err1 - } - defer c.Close() - var k, v []byte - for k, v, err = c.First(); err == nil && k != nil; k, v, err = c.Next() { - hv, herr := tx.GetOne(kv.HeaderTD, k) - if herr != nil { - return herr - } - if hv == nil { - fmt.Printf("Missing TD record for %x, fixing\n", k) - var header types.Header - if err = rlp.DecodeBytes(v, &header); err != nil { - return fmt.Errorf("decoding header from %x: %w", v, err) - } - if header.Number.Uint64() == 0 { - continue - } - var parentK [40]byte - binary.BigEndian.PutUint64(parentK[:], header.Number.Uint64()-1) - copy(parentK[8:], header.ParentHash[:]) - var parentTdRec []byte - if parentTdRec, err = tx.GetOne(kv.HeaderTD, parentK[:]); err != nil { - return fmt.Errorf("reading parentTd Rec for %d: %w", header.Number.Uint64(), err) - } - var parentTd big.Int - if err = rlp.DecodeBytes(parentTdRec, &parentTd); err != nil { - return fmt.Errorf("decoding parent Td record for block %d, from %x: %w", header.Number.Uint64(), parentTdRec, err) - } - var td big.Int - td.Add(&parentTd, header.Difficulty) - var newHv []byte - if newHv, err = rlp.EncodeToBytes(&td); err != nil { - return fmt.Errorf("encoding td record for block %d: %w", header.Number.Uint64(), err) - } - if err = tx.Put(kv.HeaderTD, k, newHv); err != nil { - return err - } - } - } - if err != nil { - return err - } - return tx.Commit() -} - func advanceExec(chaindata string) error { db := mdbx.MustOpen(chaindata) defer db.Close() @@ -782,7 +673,7 @@ func chainConfig(name string) error { if chainConfig == nil { return fmt.Errorf("unknown name: %s", name) } - f, err := os.Create(filepath.Join("params", "chainspecs", fmt.Sprintf("%s.json", name))) + f, err := os.Create(filepath.Join("params", "chainspecs", name+".json")) if err != nil { return err } @@ -955,25 +846,6 @@ func iterate(filename string, prefix string) error { return nil } -func readSeg(chaindata string) error { - vDecomp, err := seg.NewDecompressor(chaindata) - if err != nil { - return err - } - defer vDecomp.Close() - g := vDecomp.MakeGetter() - var buf []byte - var count int - var offset, nextPos uint64 - for g.HasNext() { - buf, nextPos = g.Next(buf[:0]) - fmt.Printf("offset: %d, val: %x\n", offset, buf) - offset = nextPos - count++ - } - return nil -} - func main() { debug.RaiseFdLimit() flag.Parse() @@ -1006,9 +878,6 @@ func main() { case "testBlockHashes": testBlockHashes(*chaindata, *block, libcommon.HexToHash(*hash)) - case "dumpStorage": - dumpStorage() - case "current": printCurrentBlockNumber(*chaindata) @@ -1033,18 +902,12 @@ func main() { case "extractBodies": err = extractBodies(*chaindata) - case "repairCurrent": - repairCurrent() - case "printTxHashes": printTxHashes(*chaindata, uint64(*block)) case "snapSizes": err = snapSizes(*chaindata) - case "fixTd": - err = fixTd(*chaindata) - case "advanceExec": err = advanceExec(*chaindata) @@ -1070,8 +933,6 @@ func main() { err = iterate(*chaindata, *account) case "rmSnKey": err = rmSnKey(*chaindata) - case "readSeg": - err = readSeg(*chaindata) } if err != nil { diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go index 8721bec5367..520e5c30bf0 100644 --- a/cmd/integration/commands/refetence_db.go +++ b/cmd/integration/commands/refetence_db.go @@ -20,6 +20,7 @@ import ( "bufio" "bytes" "context" + "encoding/hex" "errors" "fmt" "os" @@ -499,7 +500,7 @@ MainLoop: case <-ctx.Done(): return ctx.Err() case <-commitEvery.C: - logger.Info("Progress", "bucket", bucket, "key", fmt.Sprintf("%x", k)) + logger.Info("Progress", "bucket", bucket, "key", hex.EncodeToString(k)) } } err = fileScanner.Err() diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index 8019fff864b..7958504f3d2 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -138,8 +138,8 @@ func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, borSn *freezeblo } fmt.Fprintf(w, "--\n") fmt.Fprintf(w, "prune distance: %s\n\n", pm.String()) - fmt.Fprintf(w, "blocks.v2: %t, segments=%d, indices=%d\n", snapshots.Cfg().Enabled, snapshots.SegmentsMax(), snapshots.IndicesMax()) - fmt.Fprintf(w, "blocks.bor.v2: segments=%d, indices=%d\n\n", borSn.SegmentsMax(), borSn.IndicesMax()) + fmt.Fprintf(w, "blocks: segments=%d, indices=%d\n", snapshots.SegmentsMax(), snapshots.IndicesMax()) + fmt.Fprintf(w, "blocks.bor: segments=%d, indices=%d\n\n", borSn.SegmentsMax(), borSn.IndicesMax()) _, lastBlockInHistSnap, _ := rawdbv3.TxNums.FindBlockNum(tx, agg.EndTxNumMinimax()) _lb, _lt, _ := rawdbv3.TxNums.Last(tx) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index a74c50a03bd..a570c99a03d 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -80,7 +80,6 @@ import ( "github.com/erigontech/erigon/turbo/services" "github.com/erigontech/erigon/turbo/shards" "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" - "github.com/erigontech/erigon/turbo/snapshotsync/snap" stages2 "github.com/erigontech/erigon/turbo/stages" ) @@ -447,42 +446,6 @@ var cmdSetPrune = &cobra.Command{ }, } -var cmdSetSnap = &cobra.Command{ - Use: "force_set_snap", - Short: "Override existing --snapshots flag value (if you know what you are doing)", - Run: func(cmd *cobra.Command, args []string) { - logger := debug.SetupCobra(cmd, "integration") - db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) - if err != nil { - logger.Error("Opening DB", "error", err) - return - } - defer db.Close() - sn, borSn, agg, _ := allSnapshots(cmd.Context(), db, logger) - defer sn.Close() - defer borSn.Close() - defer agg.Close() - - cfg := sn.Cfg() - flags := cmd.Flags() - if flags.Lookup("snapshots") != nil { - cfg.Enabled, err = flags.GetBool("snapshots") - if err != nil { - panic(err) - } - } - - if err := db.Update(context.Background(), func(tx kv.RwTx) error { - return snap.ForceSetFlags(tx, cfg) - }); err != nil { - if !errors.Is(err, context.Canceled) { - logger.Error(err.Error()) - } - return - } - }, -} - func init() { withConfig(cmdPrintStages) withDataDir(cmdPrintStages) @@ -601,13 +564,6 @@ func init() { withHeimdall(cmdRunMigrations) rootCmd.AddCommand(cmdRunMigrations) - withConfig(cmdSetSnap) - withDataDir2(cmdSetSnap) - withChain(cmdSetSnap) - cmdSetSnap.Flags().Bool("snapshots", false, "") - must(cmdSetSnap.MarkFlagRequired("snapshots")) - rootCmd.AddCommand(cmdSetSnap) - withConfig(cmdSetPrune) withDataDir(cmdSetPrune) withChain(cmdSetPrune) @@ -805,7 +761,7 @@ func stageBorHeimdall(db kv.RwDB, ctx context.Context, unwindTypes []string, log } unwindState := sync.NewUnwindState(stages.BorHeimdall, stageState.BlockNumber-unwind, stageState.BlockNumber, true, false) - cfg := stagedsync.StageBorHeimdallCfg(db, nil, miningState, *chainConfig, nil, nil, nil, nil, nil, nil, nil, false, unwindTypes) + cfg := stagedsync.StageBorHeimdallCfg(db, nil, miningState, *chainConfig, nil, nil, nil, nil, nil, nil, false, unwindTypes) if err := stagedsync.BorHeimdallUnwind(unwindState, ctx, stageState, tx, cfg); err != nil { return err } @@ -834,7 +790,7 @@ func stageBorHeimdall(db kv.RwDB, ctx context.Context, unwindTypes []string, log recents = bor.Recents signatures = bor.Signatures } - cfg := stagedsync.StageBorHeimdallCfg(db, snapDb, miningState, *chainConfig, heimdallClient, blockReader, nil, nil, nil, recents, signatures, false, unwindTypes) + cfg := stagedsync.StageBorHeimdallCfg(db, snapDb, miningState, *chainConfig, heimdallClient, blockReader, nil, nil, recents, signatures, false, unwindTypes) stageState := stage(sync, tx, nil, stages.BorHeimdall) if err := stagedsync.BorHeimdallForward(stageState, sync, ctx, tx, cfg, logger); err != nil { @@ -865,11 +821,11 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { if unwind > 0 { if unwind > s.BlockNumber { - return fmt.Errorf("cannot unwind past 0") + return errors.New("cannot unwind past 0") } u := sync.NewUnwindState(stages.Bodies, s.BlockNumber-unwind, s.BlockNumber, true, false) - cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, nil, 0, *chainConfig, br, bw, nil) + cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, nil, 0, *chainConfig, br, bw) if err := stagedsync.UnwindBodiesStage(u, tx, cfg, ctx); err != nil { return err } @@ -959,7 +915,7 @@ func stageSenders(db kv.RwDB, ctx context.Context, logger log.Logger) error { return err } - cfg := stagedsync.StageSendersCfg(db, chainConfig, sync.Cfg(), false, tmpdir, pm, br, nil, nil) + cfg := stagedsync.StageSendersCfg(db, chainConfig, sync.Cfg(), false, tmpdir, pm, br, nil) if unwind > 0 { u := sync.NewUnwindState(stages.Senders, s.BlockNumber-unwind, s.BlockNumber, true, false) if err = stagedsync.UnwindSendersStage(u, tx, cfg, ctx); err != nil { @@ -1029,7 +985,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { br, _ := blocksIO(db, logger) cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, nil, /*stateStream=*/ false, - /*badBlockHalt=*/ true, dirs, br, nil, genesis, syncCfg, agg, nil) + /*badBlockHalt=*/ true, dirs, br, nil, genesis, syncCfg, nil) if unwind > 0 { if err := db.View(ctx, func(tx kv.Tx) error { @@ -1309,15 +1265,9 @@ var _aggSingleton *libstate.Aggregator func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.Aggregator, *freezeblocks.CaplinSnapshots) { openSnapshotOnce.Do(func() { - var useSnapshots bool - _ = db.View(context.Background(), func(tx kv.Tx) error { - useSnapshots, _ = snap.Enabled(tx) - return nil - }) dirs := datadir.New(datadirCli) - //useSnapshots = true - snapCfg := ethconfig.NewSnapCfg(useSnapshots, true, true, true) + snapCfg := ethconfig.NewSnapCfg(true, true, true) _allSnapshotsSingleton = freezeblocks.NewRoSnapshots(snapCfg, dirs.Snap, 0, logger) _allBorSnapshotsSingleton = freezeblocks.NewBorRoSnapshots(snapCfg, dirs.Snap, 0, logger) @@ -1330,57 +1280,55 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl _aggSingleton.SetProduceMod(snapCfg.ProduceE3) - if useSnapshots { - g := &errgroup.Group{} - g.Go(func() error { - _allSnapshotsSingleton.OptimisticalyReopenFolder() - return nil - }) - g.Go(func() error { - _allBorSnapshotsSingleton.OptimisticalyReopenFolder() - return nil - }) - g.Go(func() error { return _aggSingleton.OpenFolder() }) - g.Go(func() error { - chainConfig := fromdb.ChainConfig(db) - var beaconConfig *clparams.BeaconChainConfig - _, beaconConfig, _, err = clparams.GetConfigsByNetworkName(chainConfig.ChainName) - if err == nil { - _allCaplinSnapshotsSingleton = freezeblocks.NewCaplinSnapshots(snapCfg, beaconConfig, dirs, logger) - if err = _allCaplinSnapshotsSingleton.ReopenFolder(); err != nil { - return err - } - _allCaplinSnapshotsSingleton.LogStat("caplin") + g := &errgroup.Group{} + g.Go(func() error { + _allSnapshotsSingleton.OptimisticalyReopenFolder() + return nil + }) + g.Go(func() error { + _allBorSnapshotsSingleton.OptimisticalyReopenFolder() + return nil + }) + g.Go(func() error { return _aggSingleton.OpenFolder() }) + g.Go(func() error { + chainConfig := fromdb.ChainConfig(db) + var beaconConfig *clparams.BeaconChainConfig + _, beaconConfig, _, err = clparams.GetConfigsByNetworkName(chainConfig.ChainName) + if err == nil { + _allCaplinSnapshotsSingleton = freezeblocks.NewCaplinSnapshots(snapCfg, beaconConfig, dirs, logger) + if err = _allCaplinSnapshotsSingleton.ReopenFolder(); err != nil { + return err } - return nil - }) + _allCaplinSnapshotsSingleton.LogStat("caplin") + } + return nil + }) - g.Go(func() error { - ls, er := os.Stat(filepath.Join(dirs.Snap, downloader.ProhibitNewDownloadsFileName)) - mtime := time.Time{} - if er == nil { - mtime = ls.ModTime() - } - logger.Info("[downloads]", "locked", er == nil, "at", mtime.Format("02 Jan 06 15:04 2006")) - return nil - }) - err := g.Wait() - if err != nil { - panic(err) + g.Go(func() error { + ls, er := os.Stat(filepath.Join(dirs.Snap, downloader.ProhibitNewDownloadsFileName)) + mtime := time.Time{} + if er == nil { + mtime = ls.ModTime() } + logger.Info("[downloads]", "locked", er == nil, "at", mtime.Format("02 Jan 06 15:04 2006")) + return nil + }) + err = g.Wait() + if err != nil { + panic(err) + } - _allSnapshotsSingleton.LogStat("blocks") - _allBorSnapshotsSingleton.LogStat("bor") - _ = db.View(context.Background(), func(tx kv.Tx) error { - ac := _aggSingleton.BeginFilesRo() - defer ac.Close() - ac.LogStats(tx, func(endTxNumMinimax uint64) (uint64, error) { - _, histBlockNumProgress, err := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) - return histBlockNumProgress, err - }) - return nil + _allSnapshotsSingleton.LogStat("blocks") + _allBorSnapshotsSingleton.LogStat("bor") + _ = db.View(context.Background(), func(tx kv.Tx) error { + ac := _aggSingleton.BeginFilesRo() + defer ac.Close() + ac.LogStats(tx, func(endTxNumMinimax uint64) (uint64, error) { + _, histBlockNumProgress, err := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) + return histBlockNumProgress, err }) - } + return nil + }) }) return _allSnapshotsSingleton, _allBorSnapshotsSingleton, _aggSingleton, _allCaplinSnapshotsSingleton } @@ -1408,7 +1356,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, events := shards.NewEvents() genesis := core.GenesisBlockByChainName(chain) - chainConfig, genesisBlock, genesisErr := core.CommitGenesisBlock(db, genesis, "", logger) + chainConfig, genesisBlock, genesisErr := core.CommitGenesisBlock(db, genesis, dirs, logger) if _, ok := genesisErr.(*chain2.ConfigCompatError); genesisErr != nil && !ok { panic(genesisErr) } @@ -1500,7 +1448,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, cfg.Sync, stagedsync.MiningStages(ctx, stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, dirs.Tmp, blockReader), - stagedsync.StageBorHeimdallCfg(db, snapDb, miner, *chainConfig, heimdallClient, blockReader, nil, nil, nil, recents, signatures, false, unwindTypes), + stagedsync.StageBorHeimdallCfg(db, snapDb, miner, *chainConfig, heimdallClient, blockReader, nil, nil, recents, signatures, false, unwindTypes), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -1516,10 +1464,9 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, sentryControlServer.Hd, cfg.Genesis, cfg.Sync, - agg, nil, ), - stagedsync.StageSendersCfg(db, sentryControlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, sentryControlServer.Hd, nil), + stagedsync.StageSendersCfg(db, sentryControlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, sentryControlServer.Hd), stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, dirs.Tmp, nil, 0, nil, nil, blockReader), stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, miningCancel, blockReader, builder.NewLatestBlockBuiltStore()), ), @@ -1586,5 +1533,5 @@ func initConsensusEngine(ctx context.Context, cc *chain2.Config, dir string, db consensusConfig = &config.Ethash } return ethconsensusconfig.CreateConsensusEngine(ctx, &nodecfg.Config{Dirs: datadir.New(dir)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, - heimdallClient, config.WithoutHeimdall, blockReader, db.ReadOnly(), logger, nil), heimdallClient + heimdallClient, config.WithoutHeimdall, blockReader, db.ReadOnly(), logger, nil, nil), heimdallClient } diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index 974d082fe15..37c958ef560 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -179,7 +179,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. syncCfg.ReconWorkerCount = int(reconWorkers) br, _ := blocksIO(db, logger1) - execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, changesAcc, false, true, dirs, br, nil, genesis, syncCfg, agg, nil) + execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, changesAcc, false, true, dirs, br, nil, genesis, syncCfg, nil) execUntilFunc := func(execToBlock uint64) stagedsync.ExecFunc { return func(badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { @@ -410,7 +410,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) initialCycle := false br, _ := blocksIO(db, logger) - cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, nil, false, true, dirs, br, nil, genesis, syncCfg, agg, nil) + cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, nil, false, true, dirs, br, nil, genesis, syncCfg, nil) // set block limit of execute stage sync.MockExecFunc(stages.Execution, func(badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { diff --git a/cmd/pics/pics.go b/cmd/pics/pics.go index 0487fe88900..64ba7fe454a 100644 --- a/cmd/pics/pics.go +++ b/cmd/pics/pics.go @@ -22,6 +22,7 @@ import ( "os" "os/exec" "sort" + "strconv" libcommon "github.com/erigontech/erigon-lib/common" @@ -61,7 +62,7 @@ func prefixGroups1() { visual.StartGraph(f, false) for i, key := range keys { visual.QuadVertical(f, []byte(key), len(key), fmt.Sprintf("q_%x", key)) - visual.Circle(f, fmt.Sprintf("e_%d", i), fmt.Sprintf("%d", i), false) + visual.Circle(f, fmt.Sprintf("e_%d", i), strconv.Itoa(i), false) fmt.Fprintf(f, `q_%x -> e_%d; `, key, i) @@ -90,7 +91,7 @@ func prefixGroups2() { visual.StartGraph(f, false) for i, key := range keys { visual.QuadVertical(f, []byte(key), len(key), fmt.Sprintf("q_%x", key)) - visual.Circle(f, fmt.Sprintf("e_%d", i), fmt.Sprintf("%d", i), false) + visual.Circle(f, fmt.Sprintf("e_%d", i), strconv.Itoa(i), false) fmt.Fprintf(f, `q_%x -> e_%d; `, key, i) @@ -176,7 +177,7 @@ q_%x->q_%x; } // Display the key visual.QuadVertical(f, []byte(key), len(key), fmt.Sprintf("q_%x", key)) - visual.Circle(f, fmt.Sprintf("e_%d", i), fmt.Sprintf("%d", i), false) + visual.Circle(f, fmt.Sprintf("e_%d", i), strconv.Itoa(i), false) fmt.Fprintf(f, `q_%x -> e_%d; `, key, i) @@ -218,7 +219,7 @@ func prefixGroups4() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) } @@ -258,7 +259,7 @@ func prefixGroups5() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) folds = append(folds, hexKey) @@ -300,7 +301,7 @@ func prefixGroups6() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) folds = append(folds, hexKey) @@ -343,7 +344,7 @@ func prefixGroups7() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) folds = append(folds, hexKey) @@ -388,7 +389,7 @@ func prefixGroups8() { for j := 0; j < len(hexKey); j++ { hexKey[j] = key[2*j+1] | (key[2*j] << 4) } - vs := fmt.Sprintf("%d", i) + vs := strconv.Itoa(i) tr.Update(hexKey, []byte(vs)) hightlights = append(hightlights, []byte(key)) switch i { diff --git a/cmd/release/go.mod b/cmd/release/go.mod deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index ba9d8f8c83d..52249b5bb84 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -82,8 +82,6 @@ import ( "github.com/erigontech/erigon/turbo/rpchelper" "github.com/erigontech/erigon/turbo/services" "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" - "github.com/erigontech/erigon/turbo/snapshotsync/snap" - // Force-load native and js packages, to trigger registration _ "github.com/erigontech/erigon/eth/tracers/js" _ "github.com/erigontech/erigon/eth/tracers/native" @@ -324,7 +322,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger stateCache kvcache.Cache, blockReader services.FullBlockReader, engine consensus.EngineReader, ff *rpchelper.Filters, err error) { if !cfg.WithDatadir && cfg.PrivateApiAddr == "" { - return nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("either remote db or local db must be specified") + return nil, nil, nil, nil, nil, nil, nil, ff, errors.New("either remote db or local db must be specified") } creds, err := grpcutil.TLS(cfg.TLSCACert, cfg.TLSCertfile, cfg.TLSKeyFile) if err != nil { @@ -380,20 +378,12 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger if err != nil { return err } - cfg.Snap.Enabled, err = snap.Enabled(tx) - if err != nil { - return err - } return nil }); err != nil { return nil, nil, nil, nil, nil, nil, nil, ff, err } if cc == nil { - return nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("chain config not found in db. Need start erigon at least once on this db") - } - cfg.Snap.Enabled = cfg.Snap.Enabled || cfg.Sync.UseSnapshots - if !cfg.Snap.Enabled { - logger.Info("Use --snapshots=false") + return nil, nil, nil, nil, nil, nil, nil, ff, errors.New("chain config not found in db. Need start erigon at least once on this db") } // Configure sapshots diff --git a/cmd/rpcdaemon/graphql/graph/schema.resolvers.go b/cmd/rpcdaemon/graphql/graph/schema.resolvers.go index 52b79fb03d6..9932c43bd13 100644 --- a/cmd/rpcdaemon/graphql/graph/schema.resolvers.go +++ b/cmd/rpcdaemon/graphql/graph/schema.resolvers.go @@ -20,7 +20,7 @@ import ( // SendRawTransaction is the resolver for the sendRawTransaction field. func (r *mutationResolver) SendRawTransaction(ctx context.Context, data string) (string, error) { - panic(fmt.Errorf("not implemented: SendRawTransaction - sendRawTransaction")) + panic("not implemented: SendRawTransaction - sendRawTransaction") } // Block is the resolver for the block field. @@ -208,32 +208,32 @@ func (r *queryResolver) Blocks(ctx context.Context, from *uint64, to *uint64) ([ // Pending is the resolver for the pending field. func (r *queryResolver) Pending(ctx context.Context) (*model.Pending, error) { - panic(fmt.Errorf("not implemented: Pending - pending")) + panic("not implemented: Pending - pending") } // Transaction is the resolver for the transaction field. func (r *queryResolver) Transaction(ctx context.Context, hash string) (*model.Transaction, error) { - panic(fmt.Errorf("not implemented: Transaction - transaction")) + panic("not implemented: Transaction - transaction") } // Logs is the resolver for the logs field. func (r *queryResolver) Logs(ctx context.Context, filter model.FilterCriteria) ([]*model.Log, error) { - panic(fmt.Errorf("not implemented: Logs - logs")) + panic("not implemented: Logs - logs") } // GasPrice is the resolver for the gasPrice field. func (r *queryResolver) GasPrice(ctx context.Context) (string, error) { - panic(fmt.Errorf("not implemented: GasPrice - gasPrice")) + panic("not implemented: GasPrice - gasPrice") } // MaxPriorityFeePerGas is the resolver for the maxPriorityFeePerGas field. func (r *queryResolver) MaxPriorityFeePerGas(ctx context.Context) (string, error) { - panic(fmt.Errorf("not implemented: MaxPriorityFeePerGas - maxPriorityFeePerGas")) + panic("not implemented: MaxPriorityFeePerGas - maxPriorityFeePerGas") } // Syncing is the resolver for the syncing field. func (r *queryResolver) Syncing(ctx context.Context) (*model.SyncState, error) { - panic(fmt.Errorf("not implemented: Syncing - syncing")) + panic("not implemented: Syncing - syncing") } // ChainID is the resolver for the chainID field. diff --git a/cmd/rpcdaemon/health/check_block.go b/cmd/rpcdaemon/health/check_block.go index b6cab8ed2b1..877a515d131 100644 --- a/cmd/rpcdaemon/health/check_block.go +++ b/cmd/rpcdaemon/health/check_block.go @@ -18,6 +18,7 @@ package health import ( "context" + "errors" "fmt" "github.com/erigontech/erigon/rpc" @@ -25,7 +26,7 @@ import ( func checkBlockNumber(blockNumber rpc.BlockNumber, api EthAPI) error { if api == nil { - return fmt.Errorf("no connection to the Erigon server or `eth` namespace isn't enabled") + return errors.New("no connection to the Erigon server or `eth` namespace isn't enabled") } data, err := api.GetBlockByNumber(context.TODO(), blockNumber, false) if err != nil { diff --git a/cmd/rpcdaemon/health/check_peers.go b/cmd/rpcdaemon/health/check_peers.go index e5b0587525c..d073fd76ca8 100644 --- a/cmd/rpcdaemon/health/check_peers.go +++ b/cmd/rpcdaemon/health/check_peers.go @@ -28,7 +28,7 @@ var ( func checkMinPeers(minPeerCount uint, api NetAPI) error { if api == nil { - return fmt.Errorf("no connection to the Erigon server or `net` namespace isn't enabled") + return errors.New("no connection to the Erigon server or `net` namespace isn't enabled") } peerCount, err := api.PeerCount(context.TODO()) diff --git a/cmd/rpcdaemon/main.go b/cmd/rpcdaemon/main.go index 5a6fb2acbc4..2f8453b1926 100644 --- a/cmd/rpcdaemon/main.go +++ b/cmd/rpcdaemon/main.go @@ -22,12 +22,13 @@ import ( "fmt" "os" + "github.com/spf13/cobra" + "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon/cmd/rpcdaemon/cli" "github.com/erigontech/erigon/rpc" "github.com/erigontech/erigon/turbo/debug" "github.com/erigontech/erigon/turbo/jsonrpc" - "github.com/spf13/cobra" _ "github.com/erigontech/erigon/core/snaptype" //hack _ "github.com/erigontech/erigon/polygon/bor/snaptype" //hack @@ -50,7 +51,7 @@ func main() { defer engine.Close() // ToDo @blxdyx support query blob data in Rpcdaemon - apiList := jsonrpc.APIList(db, backend, txPool, mining, ff, stateCache, blockReader, cfg, engine, logger) + apiList := jsonrpc.APIList(db, backend, txPool, mining, ff, stateCache, blockReader, cfg, engine, logger, nil) rpc.PreAllocateRPCMetricLabels(apiList) if err := cli.StartRpcServer(ctx, cfg, apiList, logger); err != nil { logger.Error(err.Error()) diff --git a/cmd/rpcdaemon/rpcservices/eth_backend.go b/cmd/rpcdaemon/rpcservices/eth_backend.go index 8356ab2279e..b58ae045789 100644 --- a/cmd/rpcdaemon/rpcservices/eth_backend.go +++ b/cmd/rpcdaemon/rpcservices/eth_backend.go @@ -331,7 +331,7 @@ func (back *RemoteBackend) Span(ctx context.Context, tx kv.Getter, spanId uint64 } func (r *RemoteBackend) LastMilestoneId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBackend) Milestone(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) { @@ -339,7 +339,7 @@ func (r *RemoteBackend) Milestone(ctx context.Context, tx kv.Getter, spanId uint } func (r *RemoteBackend) LastCheckpointId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBackend) Checkpoint(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) { diff --git a/cmd/rpctest/rpctest/bench1.go b/cmd/rpctest/rpctest/bench1.go index d394c681439..cf9632b0ed7 100644 --- a/cmd/rpctest/rpctest/bench1.go +++ b/cmd/rpctest/rpctest/bench1.go @@ -19,6 +19,7 @@ package rpctest import ( "bytes" "encoding/base64" + "errors" "fmt" "net/http" "os" @@ -161,7 +162,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro printStorageRange(sm) fmt.Printf("================smg\n") printStorageRange(smg) - return fmt.Errorf("Storage range different\n") + return errors.New("Storage range different\n") } } } @@ -229,7 +230,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro fmt.Printf("Different receipts block %d, txn %s\n", bn, txn.Hash) print(client, routes[Geth], reqGen.getTransactionReceipt(txn.Hash)) print(client, routes[Erigon], reqGen.getTransactionReceipt(txn.Hash)) - return fmt.Errorf("Receipts are different\n") + return errors.New("Receipts are different\n") } } } @@ -323,7 +324,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro fmt.Printf("Different next page keys: %x geth %x", page, pageGeth) } if !compareAccountRanges(accRangeErigon, accRangeGeth) { - return fmt.Errorf("Different in account ranges tx\n") + return errors.New("Different in account ranges tx\n") } } } diff --git a/cmd/rpctest/rpctest/bench3.go b/cmd/rpctest/rpctest/bench3.go index b9241f1ea47..e283b9ae3e0 100644 --- a/cmd/rpctest/rpctest/bench3.go +++ b/cmd/rpctest/rpctest/bench3.go @@ -18,6 +18,7 @@ package rpctest import ( "encoding/base64" + "errors" "fmt" "net/http" "time" @@ -80,7 +81,7 @@ func Bench3(erigon_url, geth_url string) error { } if !compareAccountRanges(accRangeTG, accRangeGeth) { - return fmt.Errorf("Different in account ranges tx\n") + return errors.New("Different in account ranges tx\n") } fmt.Println("debug_accountRanges... OK!") @@ -164,7 +165,7 @@ func Bench3(erigon_url, geth_url string) error { } fmt.Printf("storageRange g: %d\n", len(smg)) if !compareStorageRanges(sm, smg) { - return fmt.Errorf("Different in storage ranges tx\n") + return errors.New("Different in storage ranges tx\n") } return nil diff --git a/cmd/rpctest/rpctest/bench7.go b/cmd/rpctest/rpctest/bench7.go index 8167b1c6211..a4eee0e8414 100644 --- a/cmd/rpctest/rpctest/bench7.go +++ b/cmd/rpctest/rpctest/bench7.go @@ -17,6 +17,7 @@ package rpctest import ( + "errors" "fmt" "net/http" "time" @@ -90,7 +91,7 @@ func Bench7(erigonURL, gethURL string) error { printStorageRange(sm) fmt.Printf("================smg\n") printStorageRange(smg) - return fmt.Errorf("storage are different") + return errors.New("storage are different") } fmt.Printf("storageRanges: %d\n", len(sm)) return nil diff --git a/cmd/sentinel/main.go b/cmd/sentinel/main.go index ccfcdc5a1bb..6416044190f 100644 --- a/cmd/sentinel/main.go +++ b/cmd/sentinel/main.go @@ -23,7 +23,7 @@ import ( "github.com/erigontech/erigon-lib/common/disk" "github.com/erigontech/erigon-lib/common/mem" - "github.com/erigontech/erigon/cl/phase1/core" + "github.com/erigontech/erigon/cl/phase1/core/checkpoint_sync" "github.com/erigontech/erigon/cl/sentinel" "github.com/erigontech/erigon/cl/sentinel/service" "github.com/erigontech/erigon/cl/utils/eth_clock" @@ -60,7 +60,7 @@ func runSentinelNode(cliCtx *cli.Context) error { go mem.LogMemStats(cliCtx.Context, log.Root()) go disk.UpdateDiskStats(cliCtx.Context, log.Root()) - bs, err := core.RetrieveBeaconState(context.Background(), cfg.BeaconCfg, cfg.NetworkType) + bs, err := checkpoint_sync.NewRemoteCheckpointSync(cfg.BeaconCfg, cfg.NetworkType).GetLatestBeaconState(cliCtx.Context) if err != nil { return err } diff --git a/cmd/sentinel/sentinelcli/cliSettings.go b/cmd/sentinel/sentinelcli/cliSettings.go index f65c85e0f89..0a155340d9d 100644 --- a/cmd/sentinel/sentinelcli/cliSettings.go +++ b/cmd/sentinel/sentinelcli/cliSettings.go @@ -17,6 +17,7 @@ package sentinelcli import ( + "errors" "fmt" "github.com/erigontech/erigon/cl/clparams" @@ -60,7 +61,7 @@ func SetupSentinelCli(ctx *cli.Context) (*SentinelCliCfg, error) { return nil, err } if ctx.String(sentinelflags.GenesisSSZFlag.Name) == "" { - return nil, fmt.Errorf("no genesis file provided") + return nil, errors.New("no genesis file provided") } } diff --git a/cmd/snapshots/cmp/cmp.go b/cmd/snapshots/cmp/cmp.go index f34fca1fed0..c66c13493db 100644 --- a/cmd/snapshots/cmp/cmp.go +++ b/cmd/snapshots/cmp/cmp.go @@ -19,6 +19,7 @@ package cmp import ( "bytes" "context" + "errors" "fmt" "io/fs" "os" @@ -221,11 +222,11 @@ func cmp(cliCtx *cli.Context) error { } if session1 == nil { - return fmt.Errorf("no first session established") + return errors.New("no first session established") } if session1 == nil { - return fmt.Errorf("no second session established") + return errors.New("no second session established") } logger.Info(fmt.Sprintf("Starting compare: %s==%s", loc1.String(), loc2.String()), "first", firstBlock, "last", lastBlock, "types", snapTypes, "dir", tempDir) @@ -439,7 +440,7 @@ func (c comparitor) compareHeaders(ctx context.Context, f1ents []fs.DirEntry, f2 g.SetLimit(2) g.Go(func() error { - logger.Info(fmt.Sprintf("Downloading %s", ent1.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + logger.Info("Downloading ", ent1.Name(), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) startTime := time.Now() defer func() { atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) @@ -460,7 +461,7 @@ func (c comparitor) compareHeaders(ctx context.Context, f1ents []fs.DirEntry, f2 atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent2.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents)), "size", datasize.ByteSize(ent2Info.Size())) + logger.Info("Downloading "+ent2.Name(), "entry", fmt.Sprint(i2+1, "/", len(f2ents)), "size", datasize.ByteSize(ent2Info.Size())) err := c.session2.Download(gctx, ent2.Name()) if err != nil { @@ -477,7 +478,6 @@ func (c comparitor) compareHeaders(ctx context.Context, f1ents []fs.DirEntry, f2 info1, _, _ := snaptype.ParseFileName(c.session1.LocalFsRoot(), ent1.Name()) f1snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ - Enabled: true, ProduceE2: false, NoDownloader: true, }, info1.Dir(), info1.From, logger) @@ -487,7 +487,6 @@ func (c comparitor) compareHeaders(ctx context.Context, f1ents []fs.DirEntry, f2 info2, _, _ := snaptype.ParseFileName(c.session2.LocalFsRoot(), ent1.Name()) f2snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ - Enabled: true, ProduceE2: false, NoDownloader: true, }, info2.Dir(), info2.From, logger) @@ -615,7 +614,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent1.Body.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + logger.Info("Downloading "+ent1.Body.Name(), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) return c.session1.Download(ctx, ent1.Body.Name()) }() @@ -631,7 +630,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Indexing %s", ent1.Body.Name())) + logger.Info("Indexing " + ent1.Body.Name()) return coresnaptype.Bodies.BuildIndexes(ctx, info, c.chainConfig(), c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) }) @@ -647,7 +646,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en defer func() { atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent1.Transactions.Name()), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) + logger.Info("Downloading "+ent1.Transactions.Name(), "entry", fmt.Sprint(i1+1, "/", len(f1ents))) return c.session1.Download(ctx, ent1.Transactions.Name()) }() @@ -670,7 +669,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Indexing %s", ent1.Transactions.Name())) + logger.Info("Indexing " + ent1.Transactions.Name()) return coresnaptype.Transactions.BuildIndexes(ctx, info, c.chainConfig(), c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) }) @@ -690,7 +689,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent2.Body.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) + logger.Info("Downloading "+ent2.Body.Name(), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) return c.session2.Download(ctx, ent2.Body.Name()) }() @@ -706,7 +705,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Indexing %s", ent2.Body.Name())) + logger.Info("Indexing " + ent2.Body.Name()) return coresnaptype.Bodies.BuildIndexes(ctx, info, c.chainConfig(), c.session1.LocalFsRoot(), nil, log.LvlDebug, logger) }) @@ -724,7 +723,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&downloadTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Downloading %s", ent2.Transactions.Name()), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) + logger.Info("Downloading "+ent2.Transactions.Name(), "entry", fmt.Sprint(i2+1, "/", len(f2ents))) return c.session2.Download(ctx, ent2.Transactions.Name()) }() @@ -747,7 +746,7 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en atomic.AddUint64(&indexTime, uint64(time.Since(startTime))) }() - logger.Info(fmt.Sprintf("Indexing %s", ent2.Transactions.Name())) + logger.Info("Indexing " + ent2.Transactions.Name()) return coresnaptype.Transactions.BuildIndexes(ctx, info, c.chainConfig(), c.session2.LocalFsRoot(), nil, log.LvlDebug, logger) }) @@ -758,7 +757,6 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en info1, _, _ := snaptype.ParseFileName(c.session1.LocalFsRoot(), ent1.Body.Name()) f1snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ - Enabled: true, ProduceE2: false, NoDownloader: true, }, info1.Dir(), info1.From, logger) @@ -768,7 +766,6 @@ func (c comparitor) compareBodies(ctx context.Context, f1ents []*BodyEntry, f2en info2, _, _ := snaptype.ParseFileName(c.session2.LocalFsRoot(), ent2.Body.Name()) f2snaps := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ - Enabled: true, ProduceE2: false, NoDownloader: true, }, info2.Dir(), info2.From, logger) diff --git a/cmd/snapshots/copy/copy.go b/cmd/snapshots/copy/copy.go index f3b714c5762..99c722c9360 100644 --- a/cmd/snapshots/copy/copy.go +++ b/cmd/snapshots/copy/copy.go @@ -18,6 +18,7 @@ package copy import ( "context" + "errors" "fmt" "io/fs" "path/filepath" @@ -125,7 +126,7 @@ func copy(cliCtx *cli.Context) error { switch dst.LType { case sync.TorrentFs: - return fmt.Errorf("can't copy to torrent - need intermediate local fs") + return errors.New("can't copy to torrent - need intermediate local fs") case sync.RemoteFs: if rcCli == nil { @@ -238,26 +239,26 @@ func copy(cliCtx *cli.Context) error { } func torrentToLocal(torrentCli *sync.TorrentClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func torrentToRemote(torrentCli *sync.TorrentClient, rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func localToRemote(rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func localToLocal(src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func remoteToLocal(ctx context.Context, rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { logger := sync.Logger(ctx) if rcCli == nil { - return fmt.Errorf("no remote downloader") + return errors.New("no remote downloader") } session, err := rcCli.NewSession(ctx, dst.Root, src.Src+":"+src.Root, nil) @@ -281,7 +282,7 @@ func remoteToLocal(ctx context.Context, rcCli *downloader.RCloneClient, src *syn } func remoteToRemote(rcCli *downloader.RCloneClient, src *sync.Locator, dst *sync.Locator, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } type sinf struct { diff --git a/cmd/snapshots/manifest/manifest.go b/cmd/snapshots/manifest/manifest.go index 2e84d0c892d..617b442a19b 100644 --- a/cmd/snapshots/manifest/manifest.go +++ b/cmd/snapshots/manifest/manifest.go @@ -20,6 +20,7 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "io/fs" "os" @@ -99,7 +100,7 @@ func manifest(cliCtx *cli.Context, command string) error { pos := 0 if cliCtx.Args().Len() == 0 { - return fmt.Errorf("missing manifest location") + return errors.New("missing manifest location") } arg := cliCtx.Args().Get(pos) @@ -144,7 +145,7 @@ func manifest(cliCtx *cli.Context, command string) error { } if src != nil && srcSession == nil { - return fmt.Errorf("no src session established") + return errors.New("no src session established") } logger.Debug("Starting manifest " + command) diff --git a/cmd/snapshots/sync/sync.go b/cmd/snapshots/sync/sync.go index fdec668f448..a4f87d06f8d 100644 --- a/cmd/snapshots/sync/sync.go +++ b/cmd/snapshots/sync/sync.go @@ -19,6 +19,7 @@ package sync import ( "bufio" "context" + "errors" "fmt" "io/fs" "os" @@ -139,7 +140,7 @@ func ParseLocator(value string) (*Locator, error) { }, nil } - return nil, fmt.Errorf("Invalid locator syntax") + return nil, errors.New("Invalid locator syntax") } type TorrentClient struct { diff --git a/cmd/snapshots/torrents/torrents.go b/cmd/snapshots/torrents/torrents.go index b5ba219e168..49e20614fa8 100644 --- a/cmd/snapshots/torrents/torrents.go +++ b/cmd/snapshots/torrents/torrents.go @@ -18,6 +18,7 @@ package torrents import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -136,7 +137,7 @@ func torrents(cliCtx *cli.Context, command string) error { } if src == nil { - return fmt.Errorf("missing data source") + return errors.New("missing data source") } var rcCli *downloader.RCloneClient @@ -188,7 +189,7 @@ func torrents(cliCtx *cli.Context, command string) error { } if src != nil && srcSession == nil { - return fmt.Errorf("no src session established") + return errors.New("no src session established") } logger.Debug("Starting torrents " + command) @@ -199,14 +200,14 @@ func torrents(cliCtx *cli.Context, command string) error { case "update": startTime := time.Now() - logger.Info(fmt.Sprintf("Starting update: %s", src.String()), "first", firstBlock, "last", lastBlock, "dir", tempDir) + logger.Info("Starting update: "+src.String(), "first", firstBlock, "last", lastBlock, "dir", tempDir) err := updateTorrents(cliCtx.Context, srcSession, firstBlock, lastBlock, logger) if err == nil { - logger.Info(fmt.Sprintf("Finished update: %s", src.String()), "elapsed", time.Since(startTime)) + logger.Info("Finished update: "+src.String(), "elapsed", time.Since(startTime)) } else { - logger.Info(fmt.Sprintf("Aborted update: %s", src.String()), "err", err) + logger.Info("Aborted update: "+src.String(), "err", err) } return err @@ -214,14 +215,14 @@ func torrents(cliCtx *cli.Context, command string) error { case "verify": startTime := time.Now() - logger.Info(fmt.Sprintf("Starting verify: %s", src.String()), "first", firstBlock, "last", lastBlock, "dir", tempDir) + logger.Info("Starting verify: "+src.String(), "first", firstBlock, "last", lastBlock, "dir", tempDir) err := verifyTorrents(cliCtx.Context, srcSession, firstBlock, lastBlock, logger) if err == nil { - logger.Info(fmt.Sprintf("Verified: %s", src.String()), "elapsed", time.Since(startTime)) + logger.Info("Verified: "+src.String(), "elapsed", time.Since(startTime)) } else { - logger.Info(fmt.Sprintf("Verification failed: %s", src.String()), "err", err) + logger.Info("Verification failed: "+src.String(), "err", err) } return err @@ -388,7 +389,7 @@ func updateTorrents(ctx context.Context, srcSession *downloader.RCloneSession, f } } - logger.Info(fmt.Sprintf("Updating %s", file+".torrent")) + logger.Info("Updating " + file + ".torrent") err := srcSession.Download(gctx, file) @@ -445,7 +446,7 @@ func verifyTorrents(ctx context.Context, srcSession *downloader.RCloneSession, f } } - logger.Info(fmt.Sprintf("Validating %s", file+".torrent")) + logger.Info("Validating " + file + ".torrent") var mi *metainfo.MetaInfo diff --git a/cmd/snapshots/verify/verify.go b/cmd/snapshots/verify/verify.go index a1e6099ea22..06f76b81a04 100644 --- a/cmd/snapshots/verify/verify.go +++ b/cmd/snapshots/verify/verify.go @@ -17,6 +17,7 @@ package verify import ( + "errors" "fmt" "os" "path/filepath" @@ -249,11 +250,11 @@ func verify(cliCtx *cli.Context) error { } if src != nil && srcSession == nil { - return fmt.Errorf("no src session established") + return errors.New("no src session established") } if dstSession == nil { - return fmt.Errorf("no dst session established") + return errors.New("no dst session established") } if srcSession == nil { @@ -264,5 +265,5 @@ func verify(cliCtx *cli.Context) error { } func verifySnapshots(srcSession sync.DownloadSession, rcSession sync.DownloadSession, from uint64, to uint64, snapTypes []snaptype.Type, torrents, hashes, manifest bool) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index 0fa5b1ea3c8..b07352d6e75 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -200,7 +200,7 @@ func (ot *opcodeTracer) captureStartOrEnter(from, to libcommon.Address, create b ot.txsInDepth = append(ot.txsInDepth, 0) ls := len(ot.stack) - txnAddr := "" + var txnAddr string if ls > 0 { txnAddr = ot.stack[ls-1].TxnAddr + "-" + strconv.Itoa(int(ot.txsInDepth[ot.depth])) // fmt.Sprintf("%s-%d", ot.stack[ls-1].TxAddr, ot.txsInDepth[depth]) } else { @@ -250,7 +250,7 @@ func (ot *opcodeTracer) captureEndOrExit(err error) { } } - errstr := "" + var errstr string if err != nil { errstr = err.Error() currentEntry.Fault = errstr @@ -441,7 +441,7 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num defer historyTx.Rollback() dirs := datadir2.New(filepath.Dir(chainDb.(*mdbx.MdbxKV).Path())) - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) chainConfig := genesis.Config vmConfig := vm.Config{Tracer: ot, Debug: true} @@ -491,7 +491,6 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num if fopsWriter != nil { fopsWriter.Flush() fops.Close() - fops = nil } lo := len(chanOpcodes) @@ -562,7 +561,6 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num } fWriter.Flush() f.Close() - f = nil } lsp := len(chanSegPrefix) diff --git a/cmd/state/exec3/historical_trace_worker.go b/cmd/state/exec3/historical_trace_worker.go index 27f446c711b..2b961ef403f 100644 --- a/cmd/state/exec3/historical_trace_worker.go +++ b/cmd/state/exec3/historical_trace_worker.go @@ -149,7 +149,7 @@ func (rw *HistoricalTraceWorker) RunTxTask(txTask *state.TxTask) { case txTask.TxIndex == -1: if txTask.BlockNum == 0 { // Genesis block - _, ibs, err = core.GenesisToBlock(rw.execArgs.Genesis, rw.execArgs.Dirs.Tmp, rw.logger) + _, ibs, err = core.GenesisToBlock(rw.execArgs.Genesis, rw.execArgs.Dirs, rw.logger) if err != nil { panic(fmt.Errorf("GenesisToBlock: %w", err)) } diff --git a/cmd/state/exec3/state.go b/cmd/state/exec3/state.go index 6d434ac4cab..57202d94744 100644 --- a/cmd/state/exec3/state.go +++ b/cmd/state/exec3/state.go @@ -201,7 +201,7 @@ func (rw *Worker) RunTxTaskNoLock(txTask *state.TxTask) { if txTask.BlockNum == 0 { //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) - _, ibs, err = core.GenesisToBlock(rw.genesis, rw.dirs.Tmp, rw.logger) + _, ibs, err = core.GenesisToBlock(rw.genesis, rw.dirs, rw.logger) if err != nil { panic(err) } diff --git a/cmd/state/exec3/state_recon.go b/cmd/state/exec3/state_recon.go index 9e2962d5439..c8f5658ad5f 100644 --- a/cmd/state/exec3/state_recon.go +++ b/cmd/state/exec3/state_recon.go @@ -20,6 +20,7 @@ import ( "context" "encoding/binary" "fmt" + "github.com/erigontech/erigon-lib/common/datadir" "github.com/erigontech/erigon/core/systemcontracts" "sync" @@ -250,8 +251,9 @@ type ReconWorker struct { isPoSA bool posa consensus.PoSA - evm *vm.EVM - ibs *state.IntraBlockState + evm *vm.EVM + ibs *state.IntraBlockState + dirs datadir.Dirs } func NewReconWorker(lock sync.Locker, ctx context.Context, rs *state.ReconState, @@ -288,6 +290,10 @@ func (rw *ReconWorker) SetChainTx(chainTx kv.Tx) { rw.stateWriter.SetChainTx(chainTx) } +func (rw *ReconWorker) SetDirs(dirs datadir.Dirs) { + rw.dirs = dirs +} + func (rw *ReconWorker) Run() error { for txTask, ok, err := rw.rs.Schedule(rw.ctx); ok || err != nil; txTask, ok, err = rw.rs.Schedule(rw.ctx) { if err != nil { @@ -316,7 +322,7 @@ func (rw *ReconWorker) runTxTask(txTask *state.TxTask) error { if txTask.BlockNum == 0 && txTask.TxIndex == -1 { //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) // Genesis block - _, ibs, err = core.GenesisToBlock(rw.genesis, "", rw.logger) + _, ibs, err = core.GenesisToBlock(rw.genesis, rw.dirs, rw.logger) if err != nil { return err } diff --git a/cmd/state/verify/verify_txlookup.go b/cmd/state/verify/verify_txlookup.go index 4a7bb50ddf0..cc23ee1c30a 100644 --- a/cmd/state/verify/verify_txlookup.go +++ b/cmd/state/verify/verify_txlookup.go @@ -39,7 +39,7 @@ import ( func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { dirs := datadir2.New(filepath.Dir(db.(*mdbx.MdbxKV).Path())) - br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) + br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) bw := blockio.NewBlockWriter() return br, bw } diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index 5f92d7518e9..f1b93709eae 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -76,6 +76,8 @@ var ( noTxGossip bool + mdbxWriteMap bool + commitEvery time.Duration ) @@ -103,6 +105,7 @@ func init() { rootCmd.PersistentFlags().Uint64Var(&blobPriceBump, "txpool.blobpricebump", txpoolcfg.DefaultConfig.BlobPriceBump, "Price bump percentage to replace an existing blob (type-3) transaction") rootCmd.PersistentFlags().DurationVar(&commitEvery, utils.TxPoolCommitEveryFlag.Name, utils.TxPoolCommitEveryFlag.Value, utils.TxPoolCommitEveryFlag.Usage) rootCmd.PersistentFlags().BoolVar(&noTxGossip, utils.TxPoolGossipDisableFlag.Name, utils.TxPoolGossipDisableFlag.Value, utils.TxPoolGossipDisableFlag.Usage) + rootCmd.PersistentFlags().BoolVar(&mdbxWriteMap, utils.DbWriteMapFlag.Name, utils.DbWriteMapFlag.Value, utils.DbWriteMapFlag.Usage) rootCmd.Flags().StringSliceVar(&traceSenders, utils.TxPoolTraceSendersFlag.Name, []string{}, utils.TxPoolTraceSendersFlag.Usage) } @@ -171,6 +174,7 @@ func doTxpool(ctx context.Context, logger log.Logger) error { cfg.PriceBump = priceBump cfg.BlobPriceBump = blobPriceBump cfg.NoGossip = noTxGossip + cfg.MdbxWriteMap = mdbxWriteMap cacheConfig := kvcache.DefaultCoherentConfig cacheConfig.MetricsLabel = "txpool" diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c6477a2d5d5..a4df41a1aea 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -766,7 +766,7 @@ var ( } DbWriteMapFlag = cli.BoolFlag{ Name: "db.writemap", - Usage: "Enable WRITE_MAP feauture for fast database writes and fast commit times", + Usage: "Enable WRITE_MAP feature for fast database writes and fast commit times", Value: true, } @@ -856,6 +856,11 @@ var ( Usage: "MEV relay endpoint. Caplin runs in builder mode if this is set", Value: "", } + CaplinValidatorMonitorFlag = cli.BoolFlag{ + Name: "caplin.validator-monitor", + Usage: "Enable caplin validator monitoring metrics", + Value: false, + } SentinelAddrFlag = cli.StringFlag{ Name: "sentinel.addr", @@ -996,6 +1001,11 @@ var ( Usage: "disable blob pruning in caplin", Value: false, } + CaplinDisableCheckpointSyncFlag = cli.BoolFlag{ + Name: "caplin.checkpoint-sync.disable", + Usage: "disable checkpoint sync in caplin", + Value: false, + } CaplinArchiveFlag = cli.BoolFlag{ Name: "caplin.archive", Usage: "enables archival node in caplin", @@ -1024,7 +1034,7 @@ var ( DiagEndpointAddrFlag = cli.StringFlag{ Name: "diagnostics.endpoint.addr", Usage: "Diagnostics HTTP server listening interface", - Value: "0.0.0.0", + Value: "127.0.0.1", } DiagEndpointPortFlag = cli.UintFlag{ Name: "diagnostics.endpoint.port", @@ -1407,6 +1417,8 @@ func setDataDir(ctx *cli.Context, cfg *nodecfg.Config) { } else { cfg.Dirs = datadir.New(paths.DataDirForNetwork(paths.DefaultDataDir(), ctx.String(ChainFlag.Name))) } + snapcfg.LoadRemotePreverified() + cfg.MdbxPageSize = flags.DBPageSizeFlagUnmarshal(ctx, DbPageSizeFlag.Name, DbPageSizeFlag.Usage) if err := cfg.MdbxDBSizeLimit.UnmarshalText([]byte(ctx.String(DbSizeLimitFlag.Name))); err != nil { panic(err) @@ -1522,6 +1534,9 @@ func setTxPool(ctx *cli.Context, fullCfg *ethconfig.Config) { if ctx.IsSet(TxPoolBlobPriceBumpFlag.Name) { fullCfg.TxPool.BlobPriceBump = ctx.Uint64(TxPoolBlobPriceBumpFlag.Name) } + if ctx.IsSet(DbWriteMapFlag.Name) { + fullCfg.TxPool.MdbxWriteMap = ctx.Bool(DbWriteMapFlag.Name) + } cfg.CommitEvery = common2.RandomizeDuration(ctx.Duration(TxPoolCommitEveryFlag.Name)) } @@ -1694,8 +1709,10 @@ func setCaplin(ctx *cli.Context, cfg *ethconfig.Config) { // More granularity here. cfg.CaplinConfig.BlobBackfilling = ctx.Bool(CaplinBlobBackfillingFlag.Name) cfg.CaplinConfig.BlobPruningDisabled = ctx.Bool(CaplinDisableBlobPruningFlag.Name) + cfg.CaplinConfig.DisabledCheckpointSync = ctx.Bool(CaplinDisableCheckpointSyncFlag.Name) cfg.CaplinConfig.Archive = ctx.Bool(CaplinArchiveFlag.Name) cfg.CaplinConfig.MevRelayUrl = ctx.String(CaplinMevRelayUrl.Name) + cfg.CaplinConfig.EnableValidatorMonitor = ctx.Bool(CaplinValidatorMonitorFlag.Name) if checkpointUrls := ctx.StringSlice(CaplinCheckpointSyncUrlFlag.Name); len(checkpointUrls) > 0 { clparams.ConfigurableCheckpointsURLs = checkpointUrls } diff --git a/cmd/utils/flags/helpers.go b/cmd/utils/flags/helpers.go index 42ba29d30f7..5d2aecefc79 100644 --- a/cmd/utils/flags/helpers.go +++ b/cmd/utils/flags/helpers.go @@ -92,7 +92,7 @@ func doMigrateFlags(ctx *cli.Context) { for _, parent := range ctx.Lineage()[1:] { if parent.IsSet(name) { // When iterating across the lineage, we will be served both - // the 'canon' and alias formats of all commmands. In most cases, + // the 'canon' and alias formats of all commands. In most cases, // it's fine to set it in the ctx multiple times (one for each // name), however, the Slice-flags are not fine. // The slice-flags accumulate, so if we set it once as diff --git a/common/paths/paths.go b/common/paths/paths.go index 14ce226a3e6..772c33f7457 100644 --- a/common/paths/paths.go +++ b/common/paths/paths.go @@ -100,8 +100,6 @@ func DataDirForNetwork(datadir string, network string) string { return "" // unless explicitly requested, use memory databases case networkname.HoleskyChainName: return networkDataDirCheckingLegacy(datadir, "holesky") - case networkname.MumbaiChainName: - return networkDataDirCheckingLegacy(datadir, "mumbai") case networkname.AmoyChainName: return networkDataDirCheckingLegacy(datadir, "amoy") case networkname.BorMainnetChainName: diff --git a/common/types.go b/common/types.go index 8f8dfee50f4..ae31348b168 100644 --- a/common/types.go +++ b/common/types.go @@ -24,7 +24,6 @@ import ( "encoding/hex" "encoding/json" "errors" - "fmt" "reflect" "strings" @@ -104,9 +103,9 @@ func (ma *MixedcaseAddress) UnmarshalJSON(input []byte) error { // MarshalJSON marshals the original value func (ma *MixedcaseAddress) MarshalJSON() ([]byte, error) { if strings.HasPrefix(ma.original, "0x") || strings.HasPrefix(ma.original, "0X") { - return json.Marshal(fmt.Sprintf("0x%s", ma.original[2:])) + return json.Marshal("0x" + ma.original[2:]) } - return json.Marshal(fmt.Sprintf("0x%s", ma.original)) + return json.Marshal("0x" + ma.original) } // Address returns the address @@ -117,9 +116,9 @@ func (ma *MixedcaseAddress) Address() libcommon.Address { // String implements fmt.Stringer func (ma *MixedcaseAddress) String() string { if ma.ValidChecksum() { - return fmt.Sprintf("%s [chksum ok]", ma.original) + return ma.original + " [chksum ok]" } - return fmt.Sprintf("%s [chksum INVALID]", ma.original) + return ma.original + " [chksum INVALID]" } // ValidChecksum returns true if the address has valid checksum diff --git a/consensus/aura/aura.go b/consensus/aura/aura.go index 9918bbecc91..bd0be5fad59 100644 --- a/consensus/aura/aura.go +++ b/consensus/aura/aura.go @@ -251,11 +251,11 @@ func NewAuRa(spec *chain.AuRaConfig, db kv.RwDB) (*AuRa, error) { } if _, ok := auraParams.StepDurations[0]; !ok { - return nil, fmt.Errorf("authority Round step 0 duration is undefined") + return nil, errors.New("authority Round step 0 duration is undefined") } for _, v := range auraParams.StepDurations { if v == 0 { - return nil, fmt.Errorf("authority Round step duration cannot be 0") + return nil, errors.New("authority Round step duration cannot be 0") } } //shouldTimeout := auraParams.StartStep == nil @@ -276,7 +276,7 @@ func NewAuRa(spec *chain.AuRaConfig, db kv.RwDB) (*AuRa, error) { dur := auraParams.StepDurations[time] step, t, ok := nextStepTimeDuration(durInfo, time) if !ok { - return nil, fmt.Errorf("timestamp overflow") + return nil, errors.New("timestamp overflow") } durInfo.TransitionStep = step durInfo.TransitionTimestamp = t @@ -1059,7 +1059,7 @@ func (c *AuRa) epochSet(chain consensus.ChainHeaderReader, e *NonTransactionalEp finalityChecker, epochTransitionNumber, ok := c.EpochManager.zoomToAfter(chain, e, c.cfg.Validators, h.ParentHash, call) if !ok { - return nil, 0, fmt.Errorf("unable to zoomToAfter to epoch") + return nil, 0, errors.New("unable to zoomToAfter to epoch") } return finalityChecker.signers, epochTransitionNumber, nil } diff --git a/consensus/aura/aura_test.go b/consensus/aura/aura_test.go index 5aae977aefb..af080be3825 100644 --- a/consensus/aura/aura_test.go +++ b/consensus/aura/aura_test.go @@ -17,6 +17,7 @@ package aura_test import ( + "github.com/erigontech/erigon-lib/common/datadir" "math/big" "strings" "testing" @@ -41,7 +42,7 @@ import ( func TestEmptyBlock(t *testing.T) { require := require.New(t) genesis := core.GnosisGenesisBlock() - genesisBlock, _, err := core.GenesisToBlock(genesis, "", log.Root()) + genesisBlock, _, err := core.GenesisToBlock(genesis, datadir.New(t.TempDir()), log.Root()) require.NoError(err) genesis.Config.TerminalTotalDifficultyPassed = false diff --git a/consensus/aura/rolling_finality.go b/consensus/aura/rolling_finality.go index 3071fefafe7..24d2b7a8aa3 100644 --- a/consensus/aura/rolling_finality.go +++ b/consensus/aura/rolling_finality.go @@ -18,6 +18,7 @@ package aura import ( "container/list" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -72,7 +73,7 @@ func (f *RollingFinality) clear() { func (f *RollingFinality) push(head libcommon.Hash, num uint64, signers []libcommon.Address) (newlyFinalized []unAssembledHeader, err error) { for i := range signers { if !f.hasSigner(signers[i]) { - return nil, fmt.Errorf("unknown validator") + return nil, errors.New("unknown validator") } } diff --git a/consensus/aura/validators.go b/consensus/aura/validators.go index a3c63044037..1fa742f73da 100644 --- a/consensus/aura/validators.go +++ b/consensus/aura/validators.go @@ -18,6 +18,7 @@ package aura import ( "container/list" + "errors" "fmt" "math" "sort" @@ -333,7 +334,7 @@ func (s *SimpleList) defaultCaller(blockHash libcommon.Hash) (Call, error) { } func (s *SimpleList) getWithCaller(parentHash libcommon.Hash, nonce uint, caller consensus.Call) (libcommon.Address, error) { if len(s.validators) == 0 { - return libcommon.Address{}, fmt.Errorf("cannot operate with an empty validator set") + return libcommon.Address{}, errors.New("cannot operate with an empty validator set") } return s.validators[nonce%uint(len(s.validators))], nil } diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go index 7c57e90587b..49573930c55 100644 --- a/consensus/clique/snapshot.go +++ b/consensus/clique/snapshot.go @@ -24,6 +24,8 @@ import ( "context" "errors" "fmt" + "maps" + "slices" "sort" "time" @@ -327,27 +329,15 @@ func (s *Snapshot) apply(sigcache *lru.ARCCache[libcommon.Hash, libcommon.Addres // copy creates a deep copy of the snapshot, though not the individual votes. func (s *Snapshot) copy() *Snapshot { - cpy := &Snapshot{ + return &Snapshot{ config: s.config, Number: s.Number, Hash: s.Hash, - Signers: make(map[libcommon.Address]struct{}), - Recents: make(map[uint64]libcommon.Address), - Votes: make([]*Vote, len(s.Votes)), - Tally: make(map[libcommon.Address]Tally), - } - for signer := range s.Signers { - cpy.Signers[signer] = struct{}{} + Signers: maps.Clone(s.Signers), + Recents: maps.Clone(s.Recents), + Votes: slices.Clone(s.Votes), + Tally: maps.Clone(s.Tally), } - for block, signer := range s.Recents { - cpy.Recents[block] = signer - } - for address, tally := range s.Tally { - cpy.Tally[address] = tally - } - copy(cpy.Votes, s.Votes) - - return cpy } // signers retrieves the list of authorized signers in ascending order. diff --git a/consensus/merge/merge.go b/consensus/merge/merge.go index 020238df37c..8b0abf97101 100644 --- a/consensus/merge/merge.go +++ b/consensus/merge/merge.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "github.com/erigontech/erigon-lib/kv" + "math" "math/big" "reflect" @@ -209,13 +210,13 @@ func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *stat return nil, nil, nil, fmt.Errorf("error: invalid requests root hash in header, expected: %v, got :%v", header.RequestsRoot, rh) } if !reflect.DeepEqual(requestsInBlock.Deposits(), depositReqs.Deposits()) { - return nil, nil, nil, fmt.Errorf("error: invalid EIP-6110 Deposit Requests in block") + return nil, nil, nil, errors.New("error: invalid EIP-6110 Deposit Requests in block") } if !reflect.DeepEqual(requestsInBlock.Withdrawals(), withdrawalReqs.Withdrawals()) { - return nil, nil, nil, fmt.Errorf("error: invalid EIP-7002 Withdrawal requests in block") + return nil, nil, nil, errors.New("error: invalid EIP-7002 Withdrawal requests in block") } if !reflect.DeepEqual(requestsInBlock.Consolidations(), consolidations.Consolidations()) { - return nil, nil, nil, fmt.Errorf("error: invalid EIP-7251 Consolidation requests in block") + return nil, nil, nil, errors.New("error: invalid EIP-7251 Consolidation requests in block") } } } @@ -302,13 +303,13 @@ func (s *Merge) verifyHeader(chain consensus.ChainHeaderReader, header, parent * // Verify existence / non-existence of withdrawalsHash shanghai := chain.Config().IsShanghai(header.Number.Uint64(), header.Time) if shanghai && header.WithdrawalsHash == nil { - return fmt.Errorf("missing withdrawalsHash") + return errors.New("missing withdrawalsHash") } if !shanghai && header.WithdrawalsHash != nil { return consensus.ErrUnexpectedWithdrawals } - if !chain.Config().IsCancun(0, header.Time) { + if !chain.Config().IsCancun(math.MaxUint64, header.Time) { return misc.VerifyAbsenceOfCancunHeaderFields(header) } if err := misc.VerifyPresenceOfCancunHeaderFields(header); err != nil { @@ -322,7 +323,7 @@ func (s *Merge) verifyHeader(chain consensus.ChainHeaderReader, header, parent * // Verify existence / non-existence of requestsRoot prague := chain.Config().IsPrague(header.Time) if prague && header.RequestsRoot == nil { - return fmt.Errorf("missing requestsRoot") + return errors.New("missing requestsRoot") } if !prague && header.RequestsRoot != nil { return consensus.ErrUnexpectedRequests @@ -352,7 +353,7 @@ func (s *Merge) Initialize(config *chain.Config, chain consensus.ChainHeaderRead if !misc.IsPoSHeader(header) { s.eth1Engine.Initialize(config, chain, header, state, syscall, logger, tracer) } - if chain.Config().IsCancun(0, header.Time) { + if chain.Config().IsCancun(math.MaxUint64, header.Time) { misc.ApplyBeaconRootEip4788(header.ParentBeaconBlockRoot, func(addr libcommon.Address, data []byte) ([]byte, error) { return syscall(addr, data, state, header, false /* constCall */) }, tracer) diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559.go index 47a0bb03a9a..26e5776ae1e 100644 --- a/consensus/misc/eip1559.go +++ b/consensus/misc/eip1559.go @@ -20,6 +20,7 @@ package misc import ( + "errors" "fmt" "math/big" @@ -50,7 +51,7 @@ func VerifyEip1559Header(config *chain.Config, parent, header *types.Header, ski } // Verify the header is not malformed if header.BaseFee == nil { - return fmt.Errorf("header is missing baseFee") + return errors.New("header is missing baseFee") } // Verify the baseFee is correct based on the parent header. expectedBaseFee := CalcBaseFee(config, parent) @@ -69,7 +70,7 @@ func (f eip1559Calculator) CurrentFees(chainConfig *chain.Config, db kv.Getter) hash := rawdb.ReadHeadHeaderHash(db) if hash == (common.Hash{}) { - return 0, 0, 0, 0, fmt.Errorf("can't get head header hash") + return 0, 0, 0, 0, errors.New("can't get head header hash") } currentHeader, err := rawdb.ReadHeaderByHash(db, hash) diff --git a/consensus/misc/eip4844.go b/consensus/misc/eip4844.go index 37e15667778..aff753b4b9d 100644 --- a/consensus/misc/eip4844.go +++ b/consensus/misc/eip4844.go @@ -79,13 +79,13 @@ func FakeExponential(factor, denom *uint256.Int, excessBlobGas uint64) (*uint256 // VerifyPresenceOfCancunHeaderFields checks that the fields introduced in Cancun (EIP-4844, EIP-4788) are present. func VerifyPresenceOfCancunHeaderFields(header *types.Header) error { if header.BlobGasUsed == nil { - return fmt.Errorf("header is missing blobGasUsed") + return errors.New("header is missing blobGasUsed") } if header.ExcessBlobGas == nil { - return fmt.Errorf("header is missing excessBlobGas") + return errors.New("header is missing excessBlobGas") } if header.ParentBeaconBlockRoot != nil { - return fmt.Errorf("header has no nil ParentBeaconBlockRoot") + return errors.New("header has no nil ParentBeaconBlockRoot") } if header.WithdrawalsHash == nil || *header.WithdrawalsHash != types.EmptyRootHash { return errors.New("header has wrong WithdrawalsHash") diff --git a/core/allocs/mumbai.json b/core/allocs/mumbai.json deleted file mode 100644 index e90415e3193..00000000000 --- a/core/allocs/mumbai.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "0000000000000000000000000000000000001000": { - "balance": "0x0", - "code": "0x608060405234801561001057600080fd5b50600436106101f05760003560e01c806360c8614d1161010f578063af26aa96116100a2578063d5b844eb11610071578063d5b844eb14610666578063dcf2793a14610684578063e3b7c924146106b6578063f59cf565146106d4576101f0565b8063af26aa96146105c7578063b71d7a69146105e7578063b7ab4db514610617578063c1b3c91914610636576101f0565b806370ba5707116100de57806370ba57071461052b57806398ab2b621461055b5780639d11b80714610579578063ae756451146105a9576101f0565b806360c8614d1461049c57806365b3a1e2146104bc57806366332354146104db578063687a9bd6146104f9576101f0565b80633434735f1161018757806344d6528f1161015657806344d6528f146103ee5780634dbc959f1461041e57806355614fcc1461043c578063582a8d081461046c576101f0565b80633434735f1461035257806335ddfeea1461037057806343ee8213146103a057806344c15cb1146103be576101f0565b806323f2a73f116101c357806323f2a73f146102a45780632bc06564146102d45780632de3a180146102f25780632eddf35214610322576101f0565b8063047a6c5b146101f55780630c35b1cb146102275780631270b5741461025857806323c2a2b414610288575b600080fd5b61020f600480360361020a9190810190612b24565b610706565b60405161021e93929190613463565b60405180910390f35b610241600480360361023c9190810190612b24565b61075d565b60405161024f929190613284565b60405180910390f35b610272600480360361026d9190810190612b4d565b610939565b60405161027f91906132bb565b60405180910390f35b6102a2600480360361029d9190810190612c2c565b610a91565b005b6102be60048036036102b99190810190612b4d565b61112a565b6040516102cb91906132bb565b60405180910390f35b6102dc611281565b6040516102e99190613411565b60405180910390f35b61030c60048036036103079190810190612a81565b611286565b60405161031991906132d6565b60405180910390f35b61033c60048036036103379190810190612b24565b611307565b6040516103499190613411565b60405180910390f35b61035a611437565b6040516103679190613269565b60405180910390f35b61038a60048036036103859190810190612abd565b61144f565b60405161039791906132bb565b60405180910390f35b6103a861151a565b6040516103b591906132d6565b60405180910390f35b6103d860048036036103d39190810190612b89565b611531565b6040516103e59190613411565b60405180910390f35b61040860048036036104039190810190612b4d565b611619565b60405161041591906133f6565b60405180910390f35b610426611781565b6040516104339190613411565b60405180910390f35b61045660048036036104519190810190612a06565b611791565b60405161046391906132bb565b60405180910390f35b61048660048036036104819190810190612a2f565b6117ab565b60405161049391906132d6565b60405180910390f35b6104a4611829565b6040516104b393929190613463565b60405180910390f35b6104c461189d565b6040516104d2929190613284565b60405180910390f35b6104e3611b6e565b6040516104f09190613411565b60405180910390f35b610513600480360361050e9190810190612bf0565b611b73565b6040516105229392919061342c565b60405180910390f35b61054560048036036105409190810190612a06565b611bd7565b60405161055291906132bb565b60405180910390f35b610563611bf1565b60405161057091906132d6565b60405180910390f35b610593600480360361058e9190810190612b24565b611c08565b6040516105a09190613411565b60405180910390f35b6105b1611d39565b6040516105be91906132d6565b60405180910390f35b6105cf611d50565b6040516105de93929190613463565b60405180910390f35b61060160048036036105fc9190810190612b24565b611db1565b60405161060e9190613411565b60405180910390f35b61061f611eb1565b60405161062d929190613284565b60405180910390f35b610650600480360361064b9190810190612b24565b611ec5565b60405161065d9190613411565b60405180910390f35b61066e611ee6565b60405161067b919061349a565b60405180910390f35b61069e60048036036106999190810190612bf0565b611eeb565b6040516106ad9392919061342c565b60405180910390f35b6106be611f4f565b6040516106cb9190613411565b60405180910390f35b6106ee60048036036106e99190810190612b24565b611f61565b6040516106fd93929190613463565b60405180910390f35b60008060006002600085815260200190815260200160002060000154600260008681526020019081526020016000206001015460026000878152602001908152602001600020600201549250925092509193909250565b60608060ff83116107795761077061189d565b91509150610934565b600061078484611db1565b9050606060016000838152602001908152602001600020805490506040519080825280602002602001820160405280156107cd5781602001602082028038833980820191505090505b509050606060016000848152602001908152602001600020805490506040519080825280602002602001820160405280156108175781602001602082028038833980820191505090505b50905060008090505b60016000858152602001908152602001600020805490508110156109295760016000858152602001908152602001600020818154811061085c57fe5b906000526020600020906003020160020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1683828151811061089a57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506001600085815260200190815260200160002081815481106108f257fe5b90600052602060002090600302016001015482828151811061091057fe5b6020026020010181815250508080600101915050610820565b508181945094505050505b915091565b6000606060016000858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015610a0c578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190610970565b50505050905060008090505b8151811015610a84578373ffffffffffffffffffffffffffffffffffffffff16828281518110610a4457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff161415610a7757600192505050610a8b565b8080600101915050610a18565b5060009150505b92915050565b73fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610b13576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b0a906133d6565b60405180910390fd5b6000610b1d611781565b90506000811415610b3157610b30611f8b565b5b610b456001826122ac90919063ffffffff16565b8814610b86576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b7d90613356565b60405180910390fd5b868611610bc8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610bbf906133b6565b60405180910390fd5b6000604060018989030181610bd957fe5b0614610c1a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c1190613396565b60405180910390fd5b8660026000838152602001908152602001600020600101541115610c73576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c6a90613336565b60405180910390fd5b6000600260008a81526020019081526020016000206000015414610ccc576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610cc390613376565b60405180910390fd5b604051806060016040528089815260200188815260200187815250600260008a8152602001908152602001600020600082015181600001556020820151816001015560408201518160020155905050600388908060018154018082558091505090600182039060005260206000200160009091929091909150555060008060008a815260200190815260200160002081610d669190612800565b506000600160008a815260200190815260200160002081610d879190612800565b506060610ddf610dda87878080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506122cb565b6122f9565b905060008090505b8151811015610f51576060610e0e838381518110610e0157fe5b60200260200101516122f9565b90506000808c81526020019081526020016000208054809190600101610e349190612800565b506040518060600160405280610e5d83600081518110610e5057fe5b60200260200101516123d6565b8152602001610e7f83600181518110610e7257fe5b60200260200101516123d6565b8152602001610ea183600281518110610e9457fe5b6020026020010151612447565b73ffffffffffffffffffffffffffffffffffffffff168152506000808d81526020019081526020016000208381548110610ed757fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610de7565b506060610fa9610fa486868080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506122cb565b6122f9565b905060008090505b815181101561111d576060610fd8838381518110610fcb57fe5b60200260200101516122f9565b9050600160008d81526020019081526020016000208054809190600101610fff9190612800565b5060405180606001604052806110288360008151811061101b57fe5b60200260200101516123d6565b815260200161104a8360018151811061103d57fe5b60200260200101516123d6565b815260200161106c8360028151811061105f57fe5b6020026020010151612447565b73ffffffffffffffffffffffffffffffffffffffff16815250600160008e815260200190815260200160002083815481106110a357fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610fb1565b5050505050505050505050565b60006060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156111fc578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611160565b50505050905060008090505b8151811015611274578373ffffffffffffffffffffffffffffffffffffffff1682828151811061123457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff1614156112675760019250505061127b565b8080600101915050611208565b5060009150505b92915050565b604081565b60006002600160f81b84846040516020016112a3939291906131d6565b6040516020818303038152906040526040516112bf9190613213565b602060405180830381855afa1580156112dc573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506112ff9190810190612a58565b905092915050565b60006060600080848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156113d9578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815250508152602001906001019061133d565b505050509050600080905060008090505b825181101561142c5761141d83828151811061140257fe5b602002602001015160200151836122ac90919063ffffffff16565b915080806001019150506113ea565b508092505050919050565b73fffffffffffffffffffffffffffffffffffffffe81565b600080600080859050600060218087518161146657fe5b04029050600081111561147f5761147c876117ab565b91505b6000602190505b818111611509576000600182038801519050818801519550806000602081106114ab57fe5b1a60f81b9450600060f81b857effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614156114f0576114e98685611286565b93506114fd565b6114fa8487611286565b93505b50602181019050611486565b508782149450505050509392505050565b60405161152690613254565b604051809103902081565b60008060009050600080905060008090505b84518167ffffffffffffffff16101561160c57606061156e868367ffffffffffffffff16604161246a565b9050600061158582896124f690919063ffffffff16565b905061158f612832565b6115998a83611619565b90506115a58a8361112a565b80156115dc57508473ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16115b156115fe578194506115fb8160200151876122ac90919063ffffffff16565b95505b505050604181019050611543565b5081925050509392505050565b611621612832565b6060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156116f1578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611655565b50505050905060008090505b8151811015611779578373ffffffffffffffffffffffffffffffffffffffff1682828151811061172957fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff16141561176c5781818151811061175d57fe5b60200260200101519250611779565b80806001019150506116fd565b505092915050565b600061178c43611db1565b905090565b60006117a461179e611781565b8361112a565b9050919050565b60006002600060f81b836040516020016117c69291906131aa565b6040516020818303038152906040526040516117e29190613213565b602060405180830381855afa1580156117ff573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506118229190810190612a58565b9050919050565b60008060008061184a600161183c611781565b6122ac90919063ffffffff16565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b606080606060056040519080825280602002602001820160405280156118d25781602001602082028038833980820191505090505b50905073c26880a0af2ea0c7e8130e6ec47af756465452e8816000815181106118f757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073be188d6641e8b680743a4815dfa0f6208038960f8160018151811061195357fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073c275dc8be39f50d12f66b6a63629c39da5bae5bd816002815181106119af57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073f903ba9e006193c1527bfbe65fe2123704ea3f9981600381518110611a0b57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073928ed6a3e94437bbd316ccad78479f1d163a6a8c81600481518110611a6757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505060606005604051908082528060200260200182016040528015611ad35781602001602082028038833980820191505090505b50905061271081600081518110611ae657fe5b60200260200101818152505061271081600181518110611b0257fe5b60200260200101818152505061271081600281518110611b1e57fe5b60200260200101818152505061271081600381518110611b3a57fe5b60200260200101818152505061271081600481518110611b5657fe5b60200260200101818152505081819350935050509091565b60ff81565b60016020528160005260406000208181548110611b8c57fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b6000611bea611be4611781565b83610939565b9050919050565b604051611bfd9061322a565b604051809103902081565b6000606060016000848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015611cdb578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611c3f565b505050509050600080905060008090505b8251811015611d2e57611d1f838281518110611d0457fe5b602002602001015160200151836122ac90919063ffffffff16565b91508080600101915050611cec565b508092505050919050565b604051611d459061323f565b604051809103902081565b600080600080611d5e611781565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b60008060038054905090505b6000811115611e7157611dce612869565b6002600060036001850381548110611de257fe5b906000526020600020015481526020019081526020016000206040518060600160405290816000820154815260200160018201548152602001600282015481525050905083816020015111158015611e3f57506000816040015114155b8015611e4f575080604001518411155b15611e6257806000015192505050611eac565b50808060019003915050611dbd565b5060006003805490501115611ea757600360016003805490500381548110611e9557fe5b90600052602060002001549050611eac565b600090505b919050565b606080611ebd4361075d565b915091509091565b60038181548110611ed257fe5b906000526020600020016000915090505481565b600281565b60006020528160005260406000208181548110611f0457fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b600060404381611f5b57fe5b04905090565b60026020528060005260406000206000915090508060000154908060010154908060020154905083565b606080611f9661189d565b8092508193505050600080905060405180606001604052808281526020016000815260200160ff81525060026000838152602001908152602001600020600082015181600001556020820151816001015560408201518160020155905050600381908060018154018082558091505090600182039060005260206000200160009091929091909150555060008060008381526020019081526020016000208161203f9190612800565b50600060016000838152602001908152602001600020816120609190612800565b5060008090505b83518110156121825760008083815260200190815260200160002080548091906001016120949190612800565b5060405180606001604052808281526020018483815181106120b257fe5b602002602001015181526020018583815181106120cb57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff16815250600080848152602001908152602001600020828154811061210957fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050612067565b5060008090505b83518110156122a6576001600083815260200190815260200160002080548091906001016121b79190612800565b5060405180606001604052808281526020018483815181106121d557fe5b602002602001015181526020018583815181106121ee57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff1681525060016000848152602001908152602001600020828154811061222d57fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050612189565b50505050565b6000808284019050838110156122c157600080fd5b8091505092915050565b6122d361288a565b600060208301905060405180604001604052808451815260200182815250915050919050565b606061230482612600565b61230d57600080fd5b60006123188361264e565b905060608160405190808252806020026020018201604052801561235657816020015b6123436128a4565b81526020019060019003908161233b5790505b509050600061236885602001516126bf565b8560200151019050600080600090505b848110156123c95761238983612748565b91506040518060400160405280838152602001848152508482815181106123ac57fe5b602002602001018190525081830192508080600101915050612378565b5082945050505050919050565b60008082600001511180156123f057506021826000015111155b6123f957600080fd5b600061240883602001516126bf565b9050600081846000015103905060008083866020015101905080519150602083101561243b57826020036101000a820491505b81945050505050919050565b6000601582600001511461245a57600080fd5b612463826123d6565b9050919050565b60608183018451101561247c57600080fd5b6060821560008114612499576040519150602082016040526124ea565b6040519150601f8416801560200281840101858101878315602002848b0101015b818310156124d757805183526020830192506020810190506124ba565b50868552601f19601f8301166040525050505b50809150509392505050565b600080600080604185511461251157600093505050506125fa565b602085015192506040850151915060ff6041860151169050601b8160ff16101561253c57601b810190505b601b8160ff16141580156125545750601c8160ff1614155b1561256557600093505050506125fa565b60006001878386866040516000815260200160405260405161258a94939291906132f1565b6020604051602081039080840390855afa1580156125ac573d6000803e3d6000fd5b505050602060405103519050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614156125f257600080fd5b809450505050505b92915050565b600080826000015114156126175760009050612649565b60008083602001519050805160001a915060c060ff168260ff16101561264257600092505050612649565b6001925050505b919050565b6000808260000151141561266557600090506126ba565b6000809050600061267984602001516126bf565b84602001510190506000846000015185602001510190505b808210156126b3576126a282612748565b820191508280600101935050612691565b8293505050505b919050565b600080825160001a9050608060ff168110156126df576000915050612743565b60b860ff16811080612704575060c060ff168110158015612703575060f860ff1681105b5b15612713576001915050612743565b60c060ff168110156127335760018060b80360ff16820301915050612743565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff1681101561276957600191506127f6565b60b860ff16811015612786576001608060ff1682030191506127f5565b60c060ff168110156127b65760b78103600185019450806020036101000a855104600182018101935050506127f4565b60f860ff168110156127d357600160c060ff1682030191506127f3565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b81548183558181111561282d5760030281600302836000526020600020918201910161282c91906128be565b5b505050565b60405180606001604052806000815260200160008152602001600073ffffffffffffffffffffffffffffffffffffffff1681525090565b60405180606001604052806000815260200160008152602001600081525090565b604051806040016040528060008152602001600081525090565b604051806040016040528060008152602001600081525090565b61291191905b8082111561290d5760008082016000905560018201600090556002820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506003016128c4565b5090565b90565b60008135905061292381613693565b92915050565b600081359050612938816136aa565b92915050565b60008151905061294d816136aa565b92915050565b60008083601f84011261296557600080fd5b8235905067ffffffffffffffff81111561297e57600080fd5b60208301915083600182028301111561299657600080fd5b9250929050565b600082601f8301126129ae57600080fd5b81356129c16129bc826134e2565b6134b5565b915080825260208301602083018583830111156129dd57600080fd5b6129e883828461363d565b50505092915050565b600081359050612a00816136c1565b92915050565b600060208284031215612a1857600080fd5b6000612a2684828501612914565b91505092915050565b600060208284031215612a4157600080fd5b6000612a4f84828501612929565b91505092915050565b600060208284031215612a6a57600080fd5b6000612a788482850161293e565b91505092915050565b60008060408385031215612a9457600080fd5b6000612aa285828601612929565b9250506020612ab385828601612929565b9150509250929050565b600080600060608486031215612ad257600080fd5b6000612ae086828701612929565b9350506020612af186828701612929565b925050604084013567ffffffffffffffff811115612b0e57600080fd5b612b1a8682870161299d565b9150509250925092565b600060208284031215612b3657600080fd5b6000612b44848285016129f1565b91505092915050565b60008060408385031215612b6057600080fd5b6000612b6e858286016129f1565b9250506020612b7f85828601612914565b9150509250929050565b600080600060608486031215612b9e57600080fd5b6000612bac868287016129f1565b9350506020612bbd86828701612929565b925050604084013567ffffffffffffffff811115612bda57600080fd5b612be68682870161299d565b9150509250925092565b60008060408385031215612c0357600080fd5b6000612c11858286016129f1565b9250506020612c22858286016129f1565b9150509250929050565b600080600080600080600060a0888a031215612c4757600080fd5b6000612c558a828b016129f1565b9750506020612c668a828b016129f1565b9650506040612c778a828b016129f1565b955050606088013567ffffffffffffffff811115612c9457600080fd5b612ca08a828b01612953565b9450945050608088013567ffffffffffffffff811115612cbf57600080fd5b612ccb8a828b01612953565b925092505092959891949750929550565b6000612ce88383612d0c565b60208301905092915050565b6000612d00838361317d565b60208301905092915050565b612d15816135b2565b82525050565b612d24816135b2565b82525050565b6000612d358261352e565b612d3f8185613569565b9350612d4a8361350e565b8060005b83811015612d7b578151612d628882612cdc565b9750612d6d8361354f565b925050600181019050612d4e565b5085935050505092915050565b6000612d9382613539565b612d9d818561357a565b9350612da88361351e565b8060005b83811015612dd9578151612dc08882612cf4565b9750612dcb8361355c565b925050600181019050612dac565b5085935050505092915050565b612def816135c4565b82525050565b612e06612e01826135d0565b61367f565b82525050565b612e15816135fc565b82525050565b612e2c612e27826135fc565b613689565b82525050565b6000612e3d82613544565b612e47818561358b565b9350612e5781856020860161364c565b80840191505092915050565b6000612e706004836135a7565b91507f766f7465000000000000000000000000000000000000000000000000000000006000830152600482019050919050565b6000612eb0602d83613596565b91507f537461727420626c6f636b206d7573742062652067726561746572207468616e60008301527f2063757272656e74207370616e000000000000000000000000000000000000006020830152604082019050919050565b6000612f16600f83613596565b91507f496e76616c6964207370616e20696400000000000000000000000000000000006000830152602082019050919050565b6000612f56601383613596565b91507f5370616e20616c726561647920657869737473000000000000000000000000006000830152602082019050919050565b6000612f96604583613596565b91507f446966666572656e6365206265747765656e20737461727420616e6420656e6460008301527f20626c6f636b206d75737420626520696e206d756c7469706c6573206f66207360208301527f7072696e740000000000000000000000000000000000000000000000000000006040830152606082019050919050565b6000613022602a83613596565b91507f456e6420626c6f636b206d7573742062652067726561746572207468616e207360008301527f7461727420626c6f636b000000000000000000000000000000000000000000006020830152604082019050919050565b6000613088601283613596565b91507f4e6f742053797374656d204164646573732100000000000000000000000000006000830152602082019050919050565b60006130c86005836135a7565b91507f38303030310000000000000000000000000000000000000000000000000000006000830152600582019050919050565b6000613108600e836135a7565b91507f6865696d64616c6c2d38303030310000000000000000000000000000000000006000830152600e82019050919050565b606082016000820151613151600085018261317d565b506020820151613164602085018261317d565b5060408201516131776040850182612d0c565b50505050565b61318681613626565b82525050565b61319581613626565b82525050565b6131a481613630565b82525050565b60006131b68285612df5565b6001820191506131c68284612e1b565b6020820191508190509392505050565b60006131e28286612df5565b6001820191506131f28285612e1b565b6020820191506132028284612e1b565b602082019150819050949350505050565b600061321f8284612e32565b915081905092915050565b600061323582612e63565b9150819050919050565b600061324a826130bb565b9150819050919050565b600061325f826130fb565b9150819050919050565b600060208201905061327e6000830184612d1b565b92915050565b6000604082019050818103600083015261329e8185612d2a565b905081810360208301526132b28184612d88565b90509392505050565b60006020820190506132d06000830184612de6565b92915050565b60006020820190506132eb6000830184612e0c565b92915050565b60006080820190506133066000830187612e0c565b613313602083018661319b565b6133206040830185612e0c565b61332d6060830184612e0c565b95945050505050565b6000602082019050818103600083015261334f81612ea3565b9050919050565b6000602082019050818103600083015261336f81612f09565b9050919050565b6000602082019050818103600083015261338f81612f49565b9050919050565b600060208201905081810360008301526133af81612f89565b9050919050565b600060208201905081810360008301526133cf81613015565b9050919050565b600060208201905081810360008301526133ef8161307b565b9050919050565b600060608201905061340b600083018461313b565b92915050565b6000602082019050613426600083018461318c565b92915050565b6000606082019050613441600083018661318c565b61344e602083018561318c565b61345b6040830184612d1b565b949350505050565b6000606082019050613478600083018661318c565b613485602083018561318c565b613492604083018461318c565b949350505050565b60006020820190506134af600083018461319b565b92915050565b6000604051905081810181811067ffffffffffffffff821117156134d857600080fd5b8060405250919050565b600067ffffffffffffffff8211156134f957600080fd5b601f19601f8301169050602081019050919050565b6000819050602082019050919050565b6000819050602082019050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b600082825260208201905092915050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600081905092915050565b60006135bd82613606565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b8381101561366a57808201518184015260208101905061364f565b83811115613679576000848401525b50505050565b6000819050919050565b6000819050919050565b61369c816135b2565b81146136a757600080fd5b50565b6136b3816135fc565b81146136be57600080fd5b50565b6136ca81613626565b81146136d557600080fd5b5056fea365627a7a723158208f52ee07630ffe523cc6ad3e15f437f973dcfa36729cd697f9b0fc4a145a48f06c6578706572696d656e74616cf564736f6c634300050b0040" - }, - "0000000000000000000000000000000000001001": { - "balance": "0x0", - "code": "0x608060405234801561001057600080fd5b50600436106100415760003560e01c806319494a17146100465780633434735f146100e15780635407ca671461012b575b600080fd5b6100c76004803603604081101561005c57600080fd5b81019080803590602001909291908035906020019064010000000081111561008357600080fd5b82018360208201111561009557600080fd5b803590602001918460018302840111640100000000831117156100b757600080fd5b9091929391929390505050610149565b604051808215151515815260200191505060405180910390f35b6100e961047a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610133610492565b6040518082815260200191505060405180910390f35b600073fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610200576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4e6f742053797374656d2041646465737321000000000000000000000000000081525060200191505060405180910390fd5b606061025761025285858080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050610498565b6104c6565b905060006102788260008151811061026b57fe5b60200260200101516105a3565b905080600160005401146102f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f537461746549647320617265206e6f742073657175656e7469616c000000000081525060200191505060405180910390fd5b600080815480929190600101919050555060006103248360018151811061031757fe5b6020026020010151610614565b905060606103458460028151811061033857fe5b6020026020010151610637565b9050610350826106c3565b1561046f576000624c4b409050606084836040516024018083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156103aa57808201518184015260208101905061038f565b50505050905090810190601f1680156103d75780820380516001836020036101000a031916815260200191505b5093505050506040516020818303038152906040527f26c53bea000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008082516020840160008887f1965050505b505050509392505050565b73fffffffffffffffffffffffffffffffffffffffe81565b60005481565b6104a0610943565b600060208301905060405180604001604052808451815260200182815250915050919050565b60606104d1826106dc565b6104da57600080fd5b60006104e58361072a565b905060608160405190808252806020026020018201604052801561052357816020015b61051061095d565b8152602001906001900390816105085790505b5090506000610535856020015161079b565b8560200151019050600080600090505b848110156105965761055683610824565b915060405180604001604052808381526020018481525084828151811061057957fe5b602002602001018190525081830192508080600101915050610545565b5082945050505050919050565b60008082600001511180156105bd57506021826000015111155b6105c657600080fd5b60006105d5836020015161079b565b9050600081846000015103905060008083866020015101905080519150602083101561060857826020036101000a820491505b81945050505050919050565b6000601582600001511461062757600080fd5b610630826105a3565b9050919050565b6060600082600001511161064a57600080fd5b6000610659836020015161079b565b905060008184600001510390506060816040519080825280601f01601f19166020018201604052801561069b5781602001600182028038833980820191505090505b50905060008160200190506106b78487602001510182856108dc565b81945050505050919050565b600080823b905060008163ffffffff1611915050919050565b600080826000015114156106f35760009050610725565b60008083602001519050805160001a915060c060ff168260ff16101561071e57600092505050610725565b6001925050505b919050565b600080826000015114156107415760009050610796565b60008090506000610755846020015161079b565b84602001510190506000846000015185602001510190505b8082101561078f5761077e82610824565b82019150828060010193505061076d565b8293505050505b919050565b600080825160001a9050608060ff168110156107bb57600091505061081f565b60b860ff168110806107e0575060c060ff1681101580156107df575060f860ff1681105b5b156107ef57600191505061081f565b60c060ff1681101561080f5760018060b80360ff1682030191505061081f565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff1681101561084557600191506108d2565b60b860ff16811015610862576001608060ff1682030191506108d1565b60c060ff168110156108925760b78103600185019450806020036101000a855104600182018101935050506108d0565b60f860ff168110156108af57600160c060ff1682030191506108cf565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b60008114156108ea5761093e565b5b602060ff16811061091a5782518252602060ff1683019250602060ff1682019150602060ff16810390506108eb565b6000600182602060ff16036101000a03905080198451168184511681811785525050505b505050565b604051806040016040528060008152602001600081525090565b60405180604001604052806000815260200160008152509056fea265627a7a7231582083fbdacb76f32b4112d0f7db9a596937925824798a0026ba0232322390b5263764736f6c634300050b0032" - }, - "0000000000000000000000000000000000001010": { - "balance": "0x204fcd4f31349d83b6e00000", - "code": "0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610e06565b005b3480156103eb57600080fd5b506103f4610f58565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610f61565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061111d565b005b3480156104e857600080fd5b506104f16111ec565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b50610548611212565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611238565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b50610604611259565b005b34801561061257600080fd5b5061061b611329565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050919291929050505061132f565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b506107586114b4565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af6114dd565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de611534565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e61156d565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506115aa565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b506109646115d0565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b810190808035906020019092919050505061165d565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803590602001909291908035906020019092919050505061167d565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a6561169d565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a906116a4565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb6116aa565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611737565b005b348015610b2e57600080fd5b50610b37611754565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b6000808511610c4857600080fd5b6000831480610c575750824311155b610cc9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f5369676e6174757265206973206578706972656400000000000000000000000081525060200191505060405180910390fd5b6000610cd73387878761167d565b9050600015156005600083815260200190815260200160002060009054906101000a900460ff16151514610d73576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600f8152602001807f536967206465616374697661746564000000000000000000000000000000000081525060200191505060405180910390fd5b60016005600083815260200190815260200160002060006101000a81548160ff021916908315150217905550610ded8189898080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505061132f565b9150610dfa82848861177a565b50509695505050505050565b60003390506000610e1682611238565b9050610e2d83600654611b3790919063ffffffff16565b600681905550600083118015610e4257508234145b610eb4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610f3087611238565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610f696114dd565b610f7257600080fd5b600081118015610faf5750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b611004576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611e636023913960400191505060405180910390fd5b600061100f83611238565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f1935050505015801561105c573d6000803e3d6000fd5b5061107283600654611b5790919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f685856110f489611238565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611183576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611e406023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506111e882611b76565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b6112616114dd565b61126a57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b600080600080604185511461134a57600093505050506114ae565b602085015192506040850151915060ff6041860151169050601b8160ff16101561137557601b810190505b601b8160ff161415801561138d5750601c8160ff1614155b1561139e57600093505050506114ae565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff1614156114aa576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600381526020017f013881000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b60008134146115bc57600090506115ca565b6115c733848461177a565b90505b92915050565b6040518060800160405280605b8152602001611ed8605b91396040516020018082805190602001908083835b6020831061161f57805182526020820191506020810190506020830392506115fc565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061169361168e86868686611c6e565b611d44565b9050949350505050565b6201388181565b60015481565b604051806080016040528060528152602001611e86605291396040516020018082805190602001908083835b602083106116f957805182526020820191506020810190506020830392506116d6565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b61173f6114dd565b61174857600080fd5b61175181611b76565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117fa57600080fd5b505afa15801561180e573d6000803e3d6000fd5b505050506040513d602081101561182457600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156118b657600080fd5b505afa1580156118ca573d6000803e3d6000fd5b505050506040513d60208110156118e057600080fd5b810190808051906020019092919050505090506118fe868686611d8e565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a0657600080fd5b505afa158015611a1a573d6000803e3d6000fd5b505050506040513d6020811015611a3057600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611abe57600080fd5b505afa158015611ad2573d6000803e3d6000fd5b505050506040513d6020811015611ae857600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b600082821115611b4657600080fd5b600082840390508091505092915050565b600080828401905083811015611b6c57600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415611bb057600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000806040518060800160405280605b8152602001611ed8605b91396040516020018082805190602001908083835b60208310611cc05780518252602082019150602081019050602083039250611c9d565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611dd4573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a723158208f81700133738d766ae3d68af591ad588b0125bd91449192179f460893f79f6b64736f6c634300050b0032" - }, - "C26880A0AF2EA0c7E8130e6EC47Af756465452E8": { - "balance": "0x3635c9adc5dea00000" - }, - "be188D6641E8b680743A4815dFA0f6208038960F": { - "balance": "0x3635c9adc5dea00000" - }, - "c275DC8bE39f50D12F66B6a63629C39dA5BAe5bd": { - "balance": "0x3635c9adc5dea00000" - }, - "F903ba9E006193c1527BfBe65fe2123704EA3F99": { - "balance": "0x3635c9adc5dea00000" - }, - "928Ed6A3e94437bbd316cCAD78479f1d163A6A8C": { - "balance": "0x3635c9adc5dea00000" - } - } - \ No newline at end of file diff --git a/core/blockchain.go b/core/blockchain.go index 1ab6392169f..0e46948aea6 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -534,6 +534,11 @@ func BlockPostValidation(gasUsed, blobGasUsed uint64, checkReceipts bool, receip return fmt.Errorf("receiptHash mismatch: %x != %x, headerNum=%d, %x", receiptHash, h.ReceiptHash, h.Number.Uint64(), h.Hash()) } + + lbloom := types.CreateBloom(receipts) + if lbloom != h.Bloom { + return fmt.Errorf("invalid bloom (remote: %x local: %x)", h.Bloom, lbloom) + } } return nil } diff --git a/core/chain_makers.go b/core/chain_makers.go index c38d4c92f8c..69ec7cc164e 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -22,6 +22,7 @@ package core import ( "context" "encoding/binary" + "errors" "fmt" "math/big" @@ -30,7 +31,6 @@ import ( "github.com/erigontech/erigon-lib/chain" libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/length" - "github.com/erigontech/erigon-lib/config3" "github.com/erigontech/erigon-lib/kv" libstate "github.com/erigontech/erigon-lib/state" "github.com/erigontech/erigon/consensus" @@ -317,7 +317,6 @@ func (cp *ChainPack) NumberOfPoWBlocks() int { // values. Inserting them into BlockChain requires use of FakePow or // a similar non-validating proof of work implementation. func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.Engine, db kv.RwDB, n int, gen func(int, *BlockGen)) (*ChainPack, error) { - histV3 := config3.EnableHistoryV4InTest if config == nil { config = params.TestChainConfig } @@ -331,28 +330,21 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E defer tx.Rollback() logger := log.New("generate-chain", config.ChainName) - var stateReader state.StateReader - var stateWriter state.StateWriter - var domains *libstate.SharedDomains - if histV3 { - var err error - domains, err = libstate.NewSharedDomains(tx, logger) - if err != nil { - return nil, err - } - defer domains.Close() - stateReader = state.NewReaderV4(domains) - stateWriter = state.NewWriterV4(domains) + domains, err := libstate.NewSharedDomains(tx, logger) + if err != nil { + return nil, err } + defer domains.Close() + stateReader := state.NewReaderV4(domains) + stateWriter := state.NewWriterV4(domains) + txNum := -1 setBlockNum := func(blockNum uint64) { domains.SetBlockNum(blockNum) } txNumIncrement := func() { txNum++ - if histV3 { - domains.SetTxNum(uint64(txNum)) - } + domains.SetTxNum(uint64(txNum)) } genblock := func(i int, parent *types.Block, ibs *state.IntraBlockState, stateReader state.StateReader, stateWriter state.StateWriter) (*types.Block, types.Receipts, error) { @@ -393,24 +385,20 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E } var err error - if histV3 { - //To use `CalcHashRootForTests` need flush before, but to use `domains.ComputeCommitment` need flush after - //if err = domains.Flush(ctx, tx); err != nil { - // return nil, nil, err - //} - //b.header.Root, err = CalcHashRootForTests(tx, b.header, histV3, true) - stateRoot, err := domains.ComputeCommitment(ctx, true, b.header.Number.Uint64(), "") - if err != nil { - return nil, nil, fmt.Errorf("call to CalcTrieRoot: %w", err) - } - if err = domains.Flush(ctx, tx); err != nil { - return nil, nil, err - } - b.header.Root = libcommon.BytesToHash(stateRoot) - } else { - b.header.Root, err = CalcHashRootForTests(tx, b.header, histV3, false) + //To use `CalcHashRootForTests` need flush before, but to use `domains.ComputeCommitment` need flush after + //if err = domains.Flush(ctx, tx); err != nil { + // return nil, nil, err + //} + //b.header.Root, err = CalcHashRootForTests(tx, b.header, histV3, true) + stateRoot, err := domains.ComputeCommitment(ctx, true, b.header.Number.Uint64(), "") + if err != nil { + return nil, nil, fmt.Errorf("call to CalcTrieRoot: %w", err) + } + if err = domains.Flush(ctx, tx); err != nil { + return nil, nil, err } - _ = err + b.header.Root = libcommon.BytesToHash(stateRoot) + // Recreating block to make sure Root makes it into the header block := types.NewBlock(b.header, b.txs, b.uncles, b.receipts, nil /* withdrawals */, nil /*requests*/) if config.IsCancun(block.Number().Uint64(), block.Time()) { @@ -418,7 +406,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E } return block, b.receipts, nil } - return nil, nil, fmt.Errorf("no engine to generate blocks") + return nil, nil, errors.New("no engine to generate blocks") } for i := 0; i < n; i++ { diff --git a/core/evm.go b/core/evm.go index 19a6a9af169..34dcd500316 100644 --- a/core/evm.go +++ b/core/evm.go @@ -20,7 +20,6 @@ package core import ( - "fmt" "math/big" "github.com/holiman/uint256" @@ -49,7 +48,7 @@ func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) libco if header.BaseFee != nil { overflow := baseFee.SetFromBig(header.BaseFee) if overflow { - panic(fmt.Errorf("header.BaseFee higher than 2^256-1")) + panic("header.BaseFee higher than 2^256-1") } } diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index d54e10cb6fe..4e1907f1415 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -138,19 +138,6 @@ func TestCreation(t *testing.T) { {10000000, 1800000000, ID{Hash: checksumToBytes(0x5fbc16bc), Next: 0}}, // Future Cancun block (mock) }, }, - // Mumbai test cases - { - params.MumbaiChainConfig, - params.MumbaiGenesisHash, - []testcase{ - {0, 0, ID{Hash: checksumToBytes(0xf6ef3fdf), Next: 2722000}}, - {2722000, 0, ID{Hash: checksumToBytes(0x8647df30), Next: 13996000}}, // First Istanbul block - {13996000, 0, ID{Hash: checksumToBytes(0x06cc1179), Next: 22640000}}, // First Berlin block - {22640000, 0, ID{Hash: checksumToBytes(0x9adf950e), Next: 41874000}}, // First London block - {41874000, 0, ID{Hash: checksumToBytes(0x0c015a91), Next: 45648608}}, // First Agra block - {45648608, 0, ID{Hash: checksumToBytes(0x0f2316c1), Next: 0}}, // First Napoli block - }, - }, // Amoy test cases { params.AmoyChainConfig, diff --git a/core/genesis_test.go b/core/genesis_test.go index 92f5d398a77..9462c595111 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -55,7 +55,7 @@ func TestGenesisBlockHashes(t *testing.T) { t.Fatal(err) } defer tx.Rollback() - _, block, err := core.WriteGenesisBlock(tx, genesis, nil, "", logger) + _, block, err := core.WriteGenesisBlock(tx, genesis, nil, datadir.New(t.TempDir()), logger) require.NoError(t, err) expect := params.GenesisHashByChainName(network) require.NotNil(t, expect, network) @@ -71,12 +71,12 @@ func TestGenesisBlockRoots(t *testing.T) { require := require.New(t) var err error - block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), "", log.Root()) + block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), datadir.New(t.TempDir()), log.Root()) if block.Hash() != params.MainnetGenesisHash { t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash) } - block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), "", log.Root()) + block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), datadir.New(t.TempDir()), log.Root()) require.NoError(err) if block.Root() != params.GnosisGenesisStateRoot { t.Errorf("wrong Gnosis Chain genesis state root, got %v, want %v", block.Root(), params.GnosisGenesisStateRoot) @@ -85,7 +85,7 @@ func TestGenesisBlockRoots(t *testing.T) { t.Errorf("wrong Gnosis Chain genesis hash, got %v, want %v", block.Hash(), params.GnosisGenesisHash) } - block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), "", log.Root()) + block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), datadir.New(t.TempDir()), log.Root()) require.NoError(err) if block.Root() != params.ChiadoGenesisStateRoot { t.Errorf("wrong Chiado genesis state root, got %v, want %v", block.Root(), params.ChiadoGenesisStateRoot) @@ -94,7 +94,7 @@ func TestGenesisBlockRoots(t *testing.T) { t.Errorf("wrong Chiado genesis hash, got %v, want %v", block.Hash(), params.ChiadoGenesisHash) } - block, _, err = core.GenesisToBlock(core.TestGenesisBlock(), "", log.Root()) + block, _, err = core.GenesisToBlock(core.TestGenesisBlock(), datadir.New(t.TempDir()), log.Root()) require.NoError(err) if block.Root() != params.TestGenesisStateRoot { t.Errorf("wrong Chiado genesis state root, got %v, want %v", block.Root(), params.TestGenesisStateRoot) @@ -113,13 +113,13 @@ func TestCommitGenesisIdempotency(t *testing.T) { defer tx.Rollback() genesis := core.GenesisBlockByChainName(networkname.MainnetChainName) - _, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger) + _, _, err = core.WriteGenesisBlock(tx, genesis, nil, datadir.New(t.TempDir()), logger) require.NoError(t, err) seq, err := tx.ReadSequence(kv.EthTx) require.NoError(t, err) require.Equal(t, uint64(2), seq) - _, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger) + _, _, err = core.WriteGenesisBlock(tx, genesis, nil, datadir.New(t.TempDir()), logger) require.NoError(t, err) seq, err = tx.ReadSequence(kv.EthTx) require.NoError(t, err) diff --git a/core/genesis_write.go b/core/genesis_write.go index 448a2131074..40d821a0534 100644 --- a/core/genesis_write.go +++ b/core/genesis_write.go @@ -24,9 +24,9 @@ import ( "crypto/ecdsa" "embed" "encoding/json" + "errors" "fmt" "math/big" - "os" "slices" "github.com/c2h5oh/datasize" @@ -74,17 +74,17 @@ var allocs embed.FS // error is a *params.ConfigCompatError and the new, unwritten config is returned. // // The returned chain configuration is never nil. -func CommitGenesisBlock(db kv.RwDB, genesis *types.Genesis, tmpDir string, logger log.Logger) (*chain.Config, *types.Block, error) { - return CommitGenesisBlockWithOverride(db, genesis, nil, tmpDir, logger) +func CommitGenesisBlock(db kv.RwDB, genesis *types.Genesis, dirs datadir.Dirs, logger log.Logger) (*chain.Config, *types.Block, error) { + return CommitGenesisBlockWithOverride(db, genesis, nil, dirs, logger) } -func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *types.Genesis, overridePragueTime *big.Int, tmpDir string, logger log.Logger) (*chain.Config, *types.Block, error) { +func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *types.Genesis, overridePragueTime *big.Int, dirs datadir.Dirs, logger log.Logger) (*chain.Config, *types.Block, error) { tx, err := db.BeginRw(context.Background()) if err != nil { return nil, nil, err } defer tx.Rollback() - c, b, err := WriteGenesisBlock(tx, genesis, overridePragueTime, tmpDir, logger) + c, b, err := WriteGenesisBlock(tx, genesis, overridePragueTime, dirs, logger) if err != nil { return c, b, err } @@ -95,11 +95,9 @@ func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *types.Genesis, override return c, b, nil } -func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *big.Int, tmpDir string, logger log.Logger) (*chain.Config, *types.Block, error) { - if genesis != nil { - if err := rawdb.WriteGenesis(tx, genesis); err != nil { - return nil, nil, err - } +func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *big.Int, dirs datadir.Dirs, logger log.Logger) (*chain.Config, *types.Block, error) { + if err := rawdb.WriteGenesis(tx, genesis); err != nil { + return nil, nil, err } var storedBlock *types.Block @@ -126,7 +124,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *b custom = false } applyOverrides(genesis.Config) - block, _, err1 := write(tx, genesis, tmpDir, logger) + block, _, err1 := write(tx, genesis, dirs, logger) if err1 != nil { return genesis.Config, nil, err1 } @@ -138,7 +136,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *b // Check whether the genesis block is already written. if genesis != nil { - block, _, err1 := GenesisToBlock(genesis, tmpDir, logger) + block, _, err1 := GenesisToBlock(genesis, dirs, logger) if err1 != nil { return genesis.Config, nil, err1 } @@ -195,8 +193,8 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *b return newCfg, storedBlock, nil } -func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { - block, statedb, err := GenesisToBlock(g, tmpDir, logger) +func WriteGenesisState(g *types.Genesis, tx kv.RwTx, dirs datadir.Dirs, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { + block, statedb, err := GenesisToBlock(g, dirs, logger) if err != nil { return nil, nil, err } @@ -205,7 +203,7 @@ func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.L stateWriter = state.NewNoopWriter() if block.Number().Sign() != 0 { - return nil, statedb, fmt.Errorf("can't commit genesis block with number > 0") + return nil, statedb, errors.New("can't commit genesis block with number > 0") } if err := statedb.CommitBlock(&chain.Rules{}, stateWriter); err != nil { return nil, statedb, fmt.Errorf("cannot write state: %w", err) @@ -214,13 +212,13 @@ func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.L return block, statedb, nil } -func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string, logger log.Logger) *types.Block { +func MustCommitGenesis(g *types.Genesis, db kv.RwDB, dirs datadir.Dirs, logger log.Logger) *types.Block { tx, err := db.BeginRw(context.Background()) if err != nil { panic(err) } defer tx.Rollback() - block, _, err := write(tx, g, tmpDir, logger) + block, _, err := write(tx, g, dirs, logger) if err != nil { panic(err) } @@ -233,8 +231,8 @@ func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string, logger log.L // Write writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. -func write(tx kv.RwTx, g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { - block, statedb, err2 := WriteGenesisState(g, tx, tmpDir, logger) +func write(tx kv.RwTx, g *types.Genesis, dirs datadir.Dirs, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { + block, statedb, err2 := WriteGenesisState(g, tx, dirs, logger) if err2 != nil { return block, statedb, err2 } @@ -291,9 +289,9 @@ func write(tx kv.RwTx, g *types.Genesis, tmpDir string, logger log.Logger) (*typ } // GenesisBlockForTesting creates and writes a block in which addr has the given wei balance. -func GenesisBlockForTesting(db kv.RwDB, addr libcommon.Address, balance *big.Int, tmpDir string, logger log.Logger) *types.Block { +func GenesisBlockForTesting(db kv.RwDB, addr libcommon.Address, balance *big.Int, dirs datadir.Dirs, logger log.Logger) *types.Block { g := types.Genesis{Alloc: types.GenesisAlloc{addr: {Balance: balance}}, Config: params.TestChainConfig} - block := MustCommitGenesis(&g, db, tmpDir, logger) + block := MustCommitGenesis(&g, db, dirs, logger) return block } @@ -302,14 +300,14 @@ type GenAccount struct { Balance *big.Int } -func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, tmpDir string, logger log.Logger) *types.Block { +func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, dirs datadir.Dirs, logger log.Logger) *types.Block { g := types.Genesis{Config: params.TestChainConfig} allocs := make(map[libcommon.Address]types.GenesisAccount) for _, acc := range accs { allocs[acc.Addr] = types.GenesisAccount{Balance: acc.Balance} } g.Alloc = allocs - block := MustCommitGenesis(&g, db, tmpDir, logger) + block := MustCommitGenesis(&g, db, dirs, logger) return block } @@ -398,20 +396,6 @@ func RialtoGenesisBlock() *types.Genesis { } } -// MumbaiGenesisBlock returns the Amoy network genesis block. -func MumbaiGenesisBlock() *types.Genesis { - return &types.Genesis{ - Config: params.MumbaiChainConfig, - Nonce: 0, - Timestamp: 1558348305, - GasLimit: 10000000, - Difficulty: big.NewInt(1), - Mixhash: libcommon.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), - Coinbase: libcommon.HexToAddress("0x0000000000000000000000000000000000000000"), - Alloc: readPrealloc("allocs/mumbai.json"), - } -} - // AmoyGenesisBlock returns the Amoy network genesis block. func AmoyGenesisBlock() *types.Genesis { return &types.Genesis{ @@ -510,7 +494,10 @@ func DeveloperGenesisBlock(period uint64, faucet libcommon.Address) *types.Genes // ToBlock creates the genesis block and writes state of a genesis specification // to the given database (or discards it if nil). -func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { +func GenesisToBlock(g *types.Genesis, dirs datadir.Dirs, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { + if dirs.SnapDomain == "" { + panic("empty `dirs` variable") + } _ = g.Alloc //nil-check head := &types.Header{ @@ -591,15 +578,12 @@ func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types. wg, ctx := errgroup.WithContext(ctx) // we may run inside write tx, can't open 2nd write tx in same goroutine wg.Go(func() error { - if tmpDir == "" { - tmpDir = os.TempDir() - } // some users creaing > 1Gb custome genesis by `erigon init` - genesisTmpDB := mdbx.NewMDBX(logger).InMem(tmpDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen() + genesisTmpDB := mdbx.NewMDBX(logger).InMem(dirs.DataDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen() defer genesisTmpDB.Close() cr := rawdb.NewCanonicalReader() - agg, err := state2.NewAggregator(context.Background(), datadir.New(tmpDir), config3.HistoryV3AggregationStep, genesisTmpDB, cr, logger) + agg, err := state2.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, genesisTmpDB, cr, logger) if err != nil { return err } @@ -729,8 +713,6 @@ func GenesisBlockByChainName(chain string) *types.Genesis { return ChapelGenesisBlock() case networkname.RialtoChainName: return RialtoGenesisBlock() - case networkname.MumbaiChainName: - return MumbaiGenesisBlock() case networkname.AmoyChainName: return AmoyGenesisBlock() case networkname.BorMainnetChainName: diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index b5603c81129..9be7e938bee 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -88,19 +88,6 @@ func TruncateCanonicalHash(tx kv.RwTx, blockFrom uint64, markChainAsBad bool) er return nil } -// IsCanonicalHashDeprecated determines whether a header with the given hash is on the canonical chain. -func IsCanonicalHashDeprecated(db kv.Getter, hash common.Hash) (bool, *uint64, error) { - number := ReadHeaderNumber(db, hash) - if number == nil { - return false, nil, nil - } - canonicalHash, err := ReadCanonicalHash(db, *number) - if err != nil { - return false, nil, err - } - return canonicalHash != (common.Hash{}) && canonicalHash == hash, number, nil -} - func IsCanonicalHash(db kv.Getter, hash common.Hash, number uint64) (bool, error) { canonicalHash, err := ReadCanonicalHash(db, number) if err != nil { @@ -1226,7 +1213,7 @@ func Transitioned(db kv.Getter, blockNum uint64, terminalTotalDifficulty *big.In return false, nil } - if terminalTotalDifficulty.Cmp(common.Big0) == 0 { + if terminalTotalDifficulty.Sign() == 0 { return true, nil } header := ReadHeaderByNumber(db, blockNum) @@ -1234,7 +1221,7 @@ func Transitioned(db kv.Getter, blockNum uint64, terminalTotalDifficulty *big.In return false, nil } - if header.Difficulty.Cmp(common.Big0) == 0 { + if header.Difficulty.Sign() == 0 { return true, nil } @@ -1256,7 +1243,7 @@ func IsPosBlock(db kv.Getter, blockHash common.Hash) (trans bool, err error) { return false, nil } - return header.Difficulty.Cmp(common.Big0) == 0, nil + return header.Difficulty.Sign() == 0, nil } var SnapshotsKey = []byte("snapshots") diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index a66fff468dd..6abd0cef448 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -257,7 +257,7 @@ func TestBlockStorage(t *testing.T) { } // prune: [1: N) - deleted := 0 + var deleted int deleted, err = bw.PruneBlocks(ctx, tx, 0, 1) require.NoError(err) require.Equal(0, deleted) @@ -732,7 +732,7 @@ func TestBlockWithdrawalsStorage(t *testing.T) { t.Fatalf("Could not write block: %v", err) } // prune: [1: N) - deleted := 0 + var deleted int deleted, err = bw.PruneBlocks(ctx, tx, 0, 1) require.NoError(err) require.Equal(0, deleted) diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go index 9970e4360b9..067f78297a8 100644 --- a/core/rawdb/rawdbreset/reset_stages.go +++ b/core/rawdb/rawdbreset/reset_stages.go @@ -19,13 +19,14 @@ package rawdbreset import ( "context" "fmt" + "github.com/erigontech/erigon/consensus" + "github.com/erigontech/erigon-lib/chain" "github.com/erigontech/erigon-lib/common/datadir" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/backup" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon-lib/state" - "github.com/erigontech/erigon/consensus" "github.com/erigontech/erigon/core/rawdb" "github.com/erigontech/erigon/core/rawdb/blockio" "github.com/erigontech/erigon/eth/stagedsync" @@ -86,7 +87,7 @@ func ResetBlocks(tx kv.RwTx, db kv.RoDB, agg *state.Aggregator, br services.Full return err } - if br.FreezingCfg().Enabled && br.FrozenBlocks() > 0 { + if br.FrozenBlocks() > 0 { logger.Info("filling db from snapshots", "blocks", br.FrozenBlocks()) if err := stagedsync.FillDBFromSnapshots("filling_db_from_snapshots", context.Background(), tx, dirs, br, agg, cc, engine, logger); err != nil { return err @@ -136,7 +137,7 @@ func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string, log cleanupList = append(cleanupList, stateV3Buckets...) return db.Update(ctx, func(tx kv.RwTx) error { - if err := clearStageProgress(tx, stages.Execution, stages.HashState, stages.IntermediateHashes); err != nil { + if err := clearStageProgress(tx, stages.Execution); err != nil { return err } @@ -162,10 +163,8 @@ func ResetTxLookup(tx kv.RwTx) error { } var Tables = map[stages.SyncStage][]string{ - stages.HashState: {kv.HashedAccounts, kv.HashedStorage, kv.ContractCode}, - stages.IntermediateHashes: {kv.TrieOfAccounts, kv.TrieOfStorage}, - stages.CustomTrace: {}, - stages.Finish: {}, + stages.CustomTrace: {}, + stages.Finish: {}, } var stateBuckets = []string{ kv.Epoch, kv.PendingEpoch, kv.BorReceipts, diff --git a/core/snaptype/block_types.go b/core/snaptype/block_types.go index 595b3a09a9b..a698d37fdc7 100644 --- a/core/snaptype/block_types.go +++ b/core/snaptype/block_types.go @@ -51,6 +51,7 @@ func init() { snapcfg.RegisterKnownTypes(networkname.ChiadoChainName, ethereumTypes) snapcfg.RegisterKnownTypes(networkname.BSCChainName, ethereumTypes) snapcfg.RegisterKnownTypes(networkname.ChapelChainName, ethereumTypes) + snapcfg.RegisterKnownTypes(networkname.HoleskyChainName, ethereumTypes) } var Enums = struct { @@ -258,7 +259,7 @@ var ( // TODO review this code, test pass with lhs+1 <= baseTxnID.U64()+ti for body.BaseTxnID.LastSystemTx(body.TxCount) < baseTxnID.U64()+ti { // skip empty blocks; ti here is not transaction index in one block, but total transaction index counter if !bodyGetter.HasNext() { - return fmt.Errorf("not enough bodies") + return errors.New("not enough bodies") } bodyBuf, _ = bodyGetter.Next(bodyBuf[:0]) diff --git a/core/state/cached_reader.go b/core/state/cached_reader.go index 22972493a2b..133571a6751 100644 --- a/core/state/cached_reader.go +++ b/core/state/cached_reader.go @@ -17,8 +17,6 @@ package state import ( - "bytes" - "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon/core/types/accounts" @@ -76,7 +74,7 @@ func (cr *CachedReader) ReadAccountStorage(address common.Address, incarnation u // ReadAccountCode is called when code of an account needs to be fetched from the state // Usually, one of (address;incarnation) or codeHash is enough to uniquely identify the code func (cr *CachedReader) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { - if bytes.Equal(codeHash[:], emptyCodeHash) { + if codeHash == emptyCodeHashH { return nil, nil } if c, ok := cr.cache.GetCode(address.Bytes(), incarnation); ok { diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index d28c36ae598..48e4cb69db5 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -21,6 +21,7 @@ package state import ( + "errors" "fmt" "sort" @@ -195,7 +196,7 @@ func (sdb *IntraBlockState) AddRefund(gas uint64) { func (sdb *IntraBlockState) SubRefund(gas uint64) { sdb.journal.append(refundChange{prev: sdb.refund}) if gas > sdb.refund { - sdb.setErrorUnsafe(fmt.Errorf("refund counter below zero")) + sdb.setErrorUnsafe(errors.New("refund counter below zero")) } sdb.refund -= gas } @@ -276,7 +277,7 @@ func (sdb *IntraBlockState) GetCodeHash(addr libcommon.Address) libcommon.Hash { if stateObject == nil || stateObject.deleted { return libcommon.Hash{} } - return libcommon.BytesToHash(stateObject.CodeHash()) + return stateObject.data.CodeHash } // GetState retrieves a value from the given account's storage trie. @@ -664,7 +665,7 @@ func printAccount(EIP161Enabled bool, addr libcommon.Address, stateObject *state if isDirty && (stateObject.createdContract || !stateObject.selfdestructed) && !emptyRemoval { // Write any contract code associated with the state object if stateObject.code != nil && stateObject.dirtyCode { - fmt.Printf("UpdateCode: %x,%x\n", addr, stateObject.CodeHash()) + fmt.Printf("UpdateCode: %x,%x\n", addr, stateObject.data.CodeHash) } if stateObject.createdContract { fmt.Printf("CreateContract: %x\n", addr) diff --git a/core/state/state_object.go b/core/state/state_object.go index 3fda0a464e7..cccba589c96 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -20,7 +20,6 @@ package state import ( - "bytes" "fmt" "github.com/erigontech/erigon-lib/common/hexutil" "io" @@ -96,7 +95,7 @@ type stateObject struct { // empty returns whether the account is considered empty. func (so *stateObject) empty() bool { - return so.data.Nonce == 0 && so.data.Balance.IsZero() && bytes.Equal(so.data.CodeHash[:], emptyCodeHash) + return so.data.Nonce == 0 && so.data.Balance.IsZero() && (so.data.CodeHash == emptyCodeHashH) } // newObject creates a state object. @@ -487,12 +486,12 @@ func (so *stateObject) Code() []byte { if so.code != nil { return so.code } - if bytes.Equal(so.CodeHash(), emptyCodeHash) { + if so.data.CodeHash == emptyCodeHashH { return nil } - code, err := so.db.StateReader.ReadAccountCode(so.Address(), so.data.Incarnation, libcommon.BytesToHash(so.CodeHash())) + code, err := so.db.StateReader.ReadAccountCode(so.Address(), so.data.Incarnation, so.data.CodeHash) if err != nil { - so.setError(fmt.Errorf("can't load code hash %x: %w", so.CodeHash(), err)) + so.setError(fmt.Errorf("can't load code hash %x: %w", so.data.CodeHash, err)) } so.code = code return code @@ -526,10 +525,6 @@ func (so *stateObject) setNonce(nonce uint64) { so.data.Nonce = nonce } -func (so *stateObject) CodeHash() []byte { - return so.data.CodeHash[:] -} - func (so *stateObject) Balance() *uint256.Int { return &so.data.Balance } diff --git a/core/state/state_test.go b/core/state/state_test.go index d857cd2727b..c0e62cd8ab2 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -339,8 +339,8 @@ func compareStateObjects(so0, so1 *stateObject, t *testing.T) { if so0.data.Root != so1.data.Root { t.Errorf("Root mismatch: have %x, want %x", so0.data.Root[:], so1.data.Root[:]) } - if !bytes.Equal(so0.CodeHash(), so1.CodeHash()) { - t.Fatalf("CodeHash mismatch: have %v, want %v", so0.CodeHash(), so1.CodeHash()) + if so0.data.CodeHash != so1.data.CodeHash { + t.Fatalf("CodeHash mismatch: have %v, want %v", so0.data.CodeHash, so1.data.CodeHash) } if !bytes.Equal(so0.code, so1.code) { t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code) diff --git a/core/system_contract_lookup.go b/core/system_contract_lookup.go index 7f37b88da17..77876d86bba 100644 --- a/core/system_contract_lookup.go +++ b/core/system_contract_lookup.go @@ -22,8 +22,6 @@ import ( "github.com/erigontech/erigon-lib/chain/networkname" libcommon "github.com/erigontech/erigon-lib/common" - _ "github.com/erigontech/erigon-lib/common/hexutility" - _ "github.com/erigontech/erigon/polygon/bor/borcfg" "github.com/erigontech/erigon/core/systemcontracts" "github.com/erigontech/erigon/core/types" @@ -32,7 +30,7 @@ import ( func init() { // Initialise SystemContractCodeLookup - for _, chainName := range []string{networkname.BSCChainName, networkname.ChapelChainName, networkname.RialtoChainName, networkname.BorMainnetChainName, networkname.MumbaiChainName, networkname.BorDevnetChainName} { + for _, chainName := range []string{networkname.BSCChainName, networkname.ChapelChainName, networkname.RialtoChainName, networkname.BorMainnetChainName, networkname.BorDevnetChainName} { byChain := map[libcommon.Address][]libcommon.CodeRecord{} systemcontracts.SystemContractCodeLookup[chainName] = byChain // Apply genesis with the block number 0 diff --git a/core/test/domains_restart_test.go b/core/test/domains_restart_test.go index fb9486d9933..9f40772a506 100644 --- a/core/test/domains_restart_test.go +++ b/core/test/domains_restart_test.go @@ -202,7 +202,6 @@ func Test_AggregatorV3_RestartOnDatadir_WithoutDB(t *testing.T) { domains.Close() agg.Close() db.Close() - db = nil // ======== delete DB, reset domains ======== ffs := os.DirFS(datadir) @@ -393,7 +392,6 @@ func Test_AggregatorV3_RestartOnDatadir_WithoutAnything(t *testing.T) { domains.Close() agg.Close() db.Close() - db = nil // ======== delete datadir and restart domains ======== err = os.RemoveAll(datadir) diff --git a/core/types/authorization.go b/core/types/authorization.go index 3cd545e59a4..93b1ce11062 100644 --- a/core/types/authorization.go +++ b/core/types/authorization.go @@ -100,7 +100,7 @@ func (ath *Authorization) RecoverSigner(data *bytes.Buffer, b []byte) (*libcommo } if !crypto.ValidateSignatureValues(sig[64], &ath.R, &ath.S, false) { - return nil, fmt.Errorf("invalid signature") + return nil, errors.New("invalid signature") } pubkey, err := crypto.Ecrecover(hash.Bytes(), sig[:]) diff --git a/core/types/blob_tx.go b/core/types/blob_tx.go index fad937dc37b..a2f4f78a660 100644 --- a/core/types/blob_tx.go +++ b/core/types/blob_tx.go @@ -82,7 +82,7 @@ func (stx *BlobTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (Me if baseFee != nil { overflow := msg.gasPrice.SetFromBig(baseFee) if overflow { - return msg, fmt.Errorf("gasPrice higher than 2^256-1") + return msg, errors.New("gasPrice higher than 2^256-1") } } msg.gasPrice.Add(&msg.gasPrice, stx.Tip) @@ -361,7 +361,7 @@ func (stx *BlobTx) DecodeRLP(s *rlp.Stream) error { return err } if len(stx.BlobVersionedHashes) == 0 { - return fmt.Errorf("a blob stx must contain at least one blob") + return errors.New("a blob stx must contain at least one blob") } // decode V if b, err = s.Uint256Bytes(); err != nil { diff --git a/core/types/blob_tx_wrapper.go b/core/types/blob_tx_wrapper.go index 458a8bde853..03264539187 100644 --- a/core/types/blob_tx_wrapper.go +++ b/core/types/blob_tx_wrapper.go @@ -17,6 +17,7 @@ package types import ( + "errors" "fmt" rlp2 "github.com/erigontech/erigon-lib/rlp" "github.com/ethereum/go-ethereum/common/hexutil" @@ -414,7 +415,7 @@ func (txw *BlobTxWrapper) ValidateBlobTransactionWrapper() error { blobTx := txw.Tx l1 := len(blobTx.BlobVersionedHashes) if l1 == 0 { - return fmt.Errorf("a blob txn must contain at least one blob") + return errors.New("a blob txn must contain at least one blob") } l2 := len(txw.Commitments) l3 := len(txw.Blobs) diff --git a/core/types/consolidation_request.go b/core/types/consolidation_request.go index c2e73b9dd67..a0b76fd2524 100644 --- a/core/types/consolidation_request.go +++ b/core/types/consolidation_request.go @@ -18,9 +18,13 @@ package types import ( "bytes" + "encoding/json" + "errors" "io" libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/hexutil" + "github.com/erigontech/erigon-lib/common/hexutility" rlp2 "github.com/erigontech/erigon-lib/rlp" "github.com/erigontech/erigon/rlp" ) @@ -32,6 +36,12 @@ type ConsolidationRequest struct { TargetPubKey [BLSPubKeyLen]byte } +type ConsolidationRequestJson struct { + SourceAddress libcommon.Address `json:"sourceAddress"` + SourcePubKey string `json:"sourcePubkey"` + TargetPubKey string `json:"targetPubkey"` +} + func (w *ConsolidationRequest) RequestType() byte { return ConsolidationRequestType } @@ -68,6 +78,42 @@ func (w *ConsolidationRequest) EncodeRLP(b io.Writer) (err error) { return } +func (d *ConsolidationRequest) MarshalJSON() ([]byte, error) { + tt := ConsolidationRequestJson{ + SourceAddress: d.SourceAddress, + SourcePubKey: hexutility.Encode(d.SourcePubKey[:]), + TargetPubKey: hexutility.Encode(d.TargetPubKey[:]), + } + return json.Marshal(tt) +} + +func (d *ConsolidationRequest) UnmarshalJSON(input []byte) error { + tt := ConsolidationRequestJson{} + err := json.Unmarshal(input, &tt) + if err != nil { + return err + } + sourceKey, err := hexutil.Decode(tt.SourcePubKey) + if err != nil { + return err + } + if len(sourceKey) != BLSPubKeyLen { + return errors.New("ConsolidationRequest SourcePubKey not equal to BLSPubkeyLen after UnmarshalJSON") + + } + targetKey, err := hexutil.Decode(tt.TargetPubKey) + if err != nil { + return err + } + if len(targetKey) != BLSPubKeyLen { + return errors.New("ConsolidationRequest TargetPubKey len not equal to BLSSiglen after UnmarshalJSON") + } + d.SourceAddress = tt.SourceAddress + d.SourcePubKey = [BLSPubKeyLen]byte(sourceKey) + d.TargetPubKey = [BLSPubKeyLen]byte(targetKey) + return nil +} + func (w *ConsolidationRequest) DecodeRLP(input []byte) error { return rlp.DecodeBytes(input[1:], w) } func (w *ConsolidationRequest) copy() Request { return &ConsolidationRequest{ diff --git a/core/types/deposit_request.go b/core/types/deposit_request.go index d8e3d339137..33638921322 100644 --- a/core/types/deposit_request.go +++ b/core/types/deposit_request.go @@ -19,10 +19,15 @@ package types import ( "bytes" "encoding/binary" + "encoding/json" + "errors" "fmt" "io" libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/hexutil" + "github.com/erigontech/erigon-lib/common/hexutility" + rlp2 "github.com/erigontech/erigon-lib/rlp" "github.com/erigontech/erigon/accounts/abi" "github.com/erigontech/erigon/rlp" @@ -48,11 +53,19 @@ var ( ) type DepositRequest struct { - Pubkey [BLSPubKeyLen]byte `json:"pubkey"` // public key of validator - WithdrawalCredentials libcommon.Hash `json:"withdrawalCredentials"` // beneficiary of the validator - Amount uint64 `json:"amount"` // deposit size in Gwei - Signature [BLSSigLen]byte `json:"signature"` // signature over deposit msg - Index uint64 `json:"index"` // deposit count value + Pubkey [BLSPubKeyLen]byte // public key of validator + WithdrawalCredentials libcommon.Hash // beneficiary of the validator + Amount uint64 // deposit size in Gwei + Signature [BLSSigLen]byte // signature over deposit msg + Index uint64 // deposit count value +} + +type DepositRequestJson struct { + Pubkey string `json:"pubkey"` + WithdrawalCredentials libcommon.Hash `json:"withdrawalCredentials"` + Amount hexutil.Uint64 `json:"amount"` + Signature string `json:"signature"` + Index hexutil.Uint64 `json:"index"` } func (d *DepositRequest) RequestType() byte { return DepositRequestType } @@ -110,6 +123,46 @@ func (d *DepositRequest) EncodingSize() (encodingSize int) { return } +func (d *DepositRequest) MarshalJSON() ([]byte, error) { + tt := DepositRequestJson{ + Pubkey: hexutility.Encode(d.Pubkey[:]), + WithdrawalCredentials: d.WithdrawalCredentials, + Amount: hexutil.Uint64(d.Amount), + Signature: hexutility.Encode(d.Signature[:]), + Index: hexutil.Uint64(d.Index), + } + return json.Marshal(tt) +} + +func (d *DepositRequest) UnmarshalJSON(input []byte) error { + tt := DepositRequestJson{} + err := json.Unmarshal(input, &tt) + if err != nil { + return err + } + pubkey, err := hexutil.Decode(tt.Pubkey) + if err != nil { + return err + } + if len(pubkey) != BLSPubKeyLen { + return errors.New("DepositRequest Pubkey len not equal to BLSPubkeyLen after UnmarshalJSON") + } + sig, err := hexutil.Decode(tt.Signature) + if err != nil { + return err + } + if len(sig) != BLSSigLen { + return errors.New("DepositRequest Signature len not equal to BLSSiglen after UnmarshalJSON") + } + + d.Pubkey = [BLSPubKeyLen]byte(pubkey) + d.Signature = [BLSSigLen]byte(sig) + d.WithdrawalCredentials = tt.WithdrawalCredentials + d.Amount = tt.Amount.Uint64() + d.Index = tt.Index.Uint64() + return nil +} + // field type overrides for abi upacking type depositUnpacking struct { Pubkey []byte diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go index 7cd85881e08..38b787bd7e6 100644 --- a/core/types/dynamic_fee_tx.go +++ b/core/types/dynamic_fee_tx.go @@ -358,7 +358,7 @@ func (tx *DynamicFeeTransaction) AsMessage(s Signer, baseFee *big.Int, rules *ch if baseFee != nil { overflow := msg.gasPrice.SetFromBig(baseFee) if overflow { - return msg, fmt.Errorf("gasPrice higher than 2^256-1") + return msg, errors.New("gasPrice higher than 2^256-1") } } msg.gasPrice.Add(&msg.gasPrice, tx.Tip) diff --git a/core/types/log_test.go b/core/types/log_test.go index 1ea5275c671..e90c9b693c0 100644 --- a/core/types/log_test.go +++ b/core/types/log_test.go @@ -21,7 +21,7 @@ package types import ( "encoding/json" - "fmt" + "errors" "reflect" "testing" @@ -104,7 +104,7 @@ var unmarshalLogTests = map[string]struct { }, "missing data": { input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","timestamp":"0x57a53d3a","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615","0x000000000000000000000000f9dff387dcb5cc4cca5b91adb07a95f54e9f1bb6"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, - wantError: fmt.Errorf("missing required field 'data' for Log"), + wantError: errors.New("missing required field 'data' for Log"), }, } diff --git a/core/types/receipt.go b/core/types/receipt.go index bfbfb0701a3..ed88a6df26e 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -456,7 +456,7 @@ func (r *Receipt) DeriveFieldsV3ForSingleReceipt(txnIdx int, blockHash libcommon sender, ok := txn.cachedSender() if !ok { - return fmt.Errorf("tx must have cached sender") + return errors.New("tx must have cached sender") } blockNumber := new(big.Int).SetUint64(blockNum) diff --git a/core/types/request.go b/core/types/request.go index c34aae02183..c8d7cfa6c22 100644 --- a/core/types/request.go +++ b/core/types/request.go @@ -42,7 +42,7 @@ type Request interface { func decode(data []byte) (Request, error) { if len(data) <= 1 { - return nil, fmt.Errorf("error: too short type request") + return nil, errors.New("error: too short type request") } var req Request switch data[0] { @@ -81,9 +81,9 @@ func (r *Requests) DecodeRLP(s *rlp.Stream) (err error) { } switch kind { case rlp.List: - return fmt.Errorf("error: untyped request (unexpected lit)") + return errors.New("error: untyped request (unexpected lit)") case rlp.Byte: - return fmt.Errorf("error: too short request") + return errors.New("error: too short request") default: var buf []byte if buf, err = s.Bytes(); err != nil { diff --git a/core/types/transaction.go b/core/types/transaction.go index 0b7a413d552..a5be3bef561 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -175,7 +175,7 @@ func DecodeTransaction(data []byte) (Transaction, error) { return nil, err } if s.Remaining() != 0 { - return nil, fmt.Errorf("trailing bytes after rlp encoded transaction") + return nil, errors.New("trailing bytes after rlp encoded transaction") } return tx, nil } @@ -211,7 +211,7 @@ func UnmarshalTransactionFromBinary(data []byte, blobTxnsAreWrappedWithBlobs boo return nil, err } if s.Remaining() != 0 { - return nil, fmt.Errorf("trailing bytes after rlp encoded transaction") + return nil, errors.New("trailing bytes after rlp encoded transaction") } return t, nil } diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go index ccd4263ef3c..d95cca228e1 100644 --- a/core/types/transaction_marshalling.go +++ b/core/types/transaction_marshalling.go @@ -296,21 +296,21 @@ func (tx *LegacyTx) UnmarshalJSON(input []byte) error { } overflow = tx.V.SetFromBig(dec.V.ToInt()) if overflow { - return fmt.Errorf("dec.V higher than 2^256-1") + return errors.New("dec.V higher than 2^256-1") } if dec.R == nil { return errors.New("missing required field 'r' in transaction") } overflow = tx.R.SetFromBig(dec.R.ToInt()) if overflow { - return fmt.Errorf("dec.R higher than 2^256-1") + return errors.New("dec.R higher than 2^256-1") } if dec.S == nil { return errors.New("missing required field 's' in transaction") } overflow = tx.S.SetFromBig(dec.S.ToInt()) if overflow { - return fmt.Errorf("dec.S higher than 2^256-1") + return errors.New("dec.S higher than 2^256-1") } if overflow { return errors.New("'s' in transaction does not fit in 256 bits") @@ -375,21 +375,21 @@ func (tx *AccessListTx) UnmarshalJSON(input []byte) error { } overflow = tx.V.SetFromBig(dec.V.ToInt()) if overflow { - return fmt.Errorf("dec.V higher than 2^256-1") + return errors.New("dec.V higher than 2^256-1") } if dec.R == nil { return errors.New("missing required field 'r' in transaction") } overflow = tx.R.SetFromBig(dec.R.ToInt()) if overflow { - return fmt.Errorf("dec.R higher than 2^256-1") + return errors.New("dec.R higher than 2^256-1") } if dec.S == nil { return errors.New("missing required field 's' in transaction") } overflow = tx.S.SetFromBig(dec.S.ToInt()) if overflow { - return fmt.Errorf("dec.S higher than 2^256-1") + return errors.New("dec.S higher than 2^256-1") } withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero() if withSignature { @@ -451,21 +451,21 @@ func (tx *DynamicFeeTransaction) unmarshalJson(dec txJSON) error { } overflow = tx.V.SetFromBig(dec.V.ToInt()) if overflow { - return fmt.Errorf("dec.V higher than 2^256-1") + return errors.New("dec.V higher than 2^256-1") } if dec.R == nil { return errors.New("missing required field 'r' in transaction") } overflow = tx.R.SetFromBig(dec.R.ToInt()) if overflow { - return fmt.Errorf("dec.R higher than 2^256-1") + return errors.New("dec.R higher than 2^256-1") } if dec.S == nil { return errors.New("missing required field 's' in transaction") } overflow = tx.S.SetFromBig(dec.S.ToInt()) if overflow { - return fmt.Errorf("dec.S higher than 2^256-1") + return errors.New("dec.S higher than 2^256-1") } if overflow { return errors.New("'s' in transaction does not fit in 256 bits") @@ -581,21 +581,21 @@ func UnmarshalBlobTxJSON(input []byte) (Transaction, error) { } overflow = tx.V.SetFromBig(dec.V.ToInt()) if overflow { - return nil, fmt.Errorf("dec.V higher than 2^256-1") + return nil, errors.New("dec.V higher than 2^256-1") } if dec.R == nil { return nil, errors.New("missing required field 'r' in transaction") } overflow = tx.R.SetFromBig(dec.R.ToInt()) if overflow { - return nil, fmt.Errorf("dec.R higher than 2^256-1") + return nil, errors.New("dec.R higher than 2^256-1") } if dec.S == nil { return nil, errors.New("missing required field 's' in transaction") } overflow = tx.S.SetFromBig(dec.S.ToInt()) if overflow { - return nil, fmt.Errorf("dec.S higher than 2^256-1") + return nil, errors.New("dec.S higher than 2^256-1") } withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero() diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index ee65bdb1fef..8cc8e225cd2 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -45,7 +45,7 @@ func MakeSigner(config *chain.Config, blockNumber uint64, blockTime uint64) *Sig if config.ChainID != nil { overflow := chainId.SetFromBig(config.ChainID) if overflow { - panic(fmt.Errorf("chainID higher than 2^256-1")) + panic("chainID higher than 2^256-1") } } signer.unprotected = true @@ -108,7 +108,7 @@ func LatestSigner(config *chain.Config) *Signer { signer.unprotected = true chainId, overflow := uint256.FromBig(config.ChainID) if overflow { - panic(fmt.Errorf("chainID higher than 2^256-1")) + panic("chainID higher than 2^256-1") } signer.chainID.Set(chainId) signer.chainIDMul.Mul(chainId, u256.Num2) @@ -147,7 +147,7 @@ func LatestSignerForChainID(chainID *big.Int) *Signer { } chainId, overflow := uint256.FromBig(chainID) if overflow { - panic(fmt.Errorf("chainID higher than 2^256-1")) + panic("chainID higher than 2^256-1") } signer.chainID.Set(chainId) signer.chainIDMul.Mul(chainId, u256.Num2) diff --git a/core/types/withdrawal_request.go b/core/types/withdrawal_request.go index e8e491811ea..5113f4450d7 100644 --- a/core/types/withdrawal_request.go +++ b/core/types/withdrawal_request.go @@ -18,10 +18,13 @@ package types import ( "bytes" - // "fmt" + "encoding/json" + "errors" "io" libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/hexutil" + "github.com/erigontech/erigon-lib/common/hexutility" rlp2 "github.com/erigontech/erigon-lib/rlp" "github.com/erigontech/erigon/rlp" ) @@ -33,6 +36,12 @@ type WithdrawalRequest struct { Amount uint64 } +type WithdrawalRequestJson struct { + SourceAddress libcommon.Address `json:"sourceAddress"` + ValidatorPubkey string `json:"validatorPubkey"` + Amount hexutil.Uint64 `json:"amount"` +} + func (w *WithdrawalRequest) RequestType() byte { return WithdrawalRequestType } @@ -81,6 +90,36 @@ func (w *WithdrawalRequest) copy() Request { } } +func (w *WithdrawalRequest) MarshalJSON() ([]byte, error) { + tt := WithdrawalRequestJson{ + SourceAddress: w.SourceAddress, + ValidatorPubkey: hexutility.Encode(w.ValidatorPubkey[:]), + Amount: hexutil.Uint64(w.Amount), + } + return json.Marshal(tt) +} + +func (w *WithdrawalRequest) UnmarshalJSON(input []byte) error { + tt := WithdrawalRequestJson{} + err := json.Unmarshal(input, &tt) + if err != nil { + return err + } + + validatorKey, err := hexutil.Decode(tt.ValidatorPubkey) + if err != nil { + return err + } + if len(validatorKey) != BLSPubKeyLen { + return errors.New("WithdrawalRequest ValidatorPubkey len after UnmarshalJSON doesn't match BLSKeyLen") + } + + w.ValidatorPubkey = [BLSPubKeyLen]byte(validatorKey) + w.Amount = tt.Amount.Uint64() + w.SourceAddress = tt.SourceAddress + return nil +} + type WithdrawalRequests []*WithdrawalRequest // Len returns the length of s. diff --git a/core/vm/absint_cfg.go b/core/vm/absint_cfg.go index 43eb4681496..d2a4c3cb183 100644 --- a/core/vm/absint_cfg.go +++ b/core/vm/absint_cfg.go @@ -291,7 +291,7 @@ func (state *astate) String(abbrev bool) string { if len(values) > 1 { e = fmt.Sprintf("{%v}", strings.Join(elm, ",")) } else { - e = fmt.Sprintf("%v", strings.Join(elm, ",")) + e = strings.Join(elm, ",") } elms = append(elms, e) } diff --git a/core/vm/eips.go b/core/vm/eips.go index 4ed10b4cf0d..ea0903297c8 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -22,6 +22,7 @@ package vm import ( "fmt" "sort" + "strconv" "github.com/holiman/uint256" @@ -66,7 +67,7 @@ func ValidEip(eipNum int) bool { func ActivateableEips() []string { var nums []string //nolint:prealloc for k := range activators { - nums = append(nums, fmt.Sprintf("%d", k)) + nums = append(nums, strconv.Itoa(k)) } sort.Strings(nums) return nums diff --git a/core/vm/evm.go b/core/vm/evm.go index bb3901ac230..7881b226e98 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -32,11 +32,10 @@ import ( "github.com/erigontech/erigon/core/vm/evmtypes" "github.com/erigontech/erigon/crypto" "github.com/erigontech/erigon/params" + "github.com/erigontech/erigon/turbo/trie" ) -// emptyCodeHash is used by create to ensure deployment is disallowed to already -// deployed contract addresses (relevant after the account abstraction). -var emptyCodeHash = crypto.Keccak256Hash(nil) +var emptyHash = libcommon.Hash{} func (evm *EVM) precompile(addr libcommon.Address) (PrecompiledContract, bool) { var precompiles map[libcommon.Address]PrecompiledContract @@ -363,7 +362,7 @@ func NewCodeAndHash(code []byte) *codeAndHash { } func (c *codeAndHash) Hash() libcommon.Hash { - if c.hash == (libcommon.Hash{}) { + if c.hash == emptyHash { c.hash = crypto.Keccak256Hash(c.code) } return c.hash @@ -421,7 +420,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gasRemainin } // Ensure there's no existing contract already at the designated address contractHash := evm.intraBlockState.GetCodeHash(address) - if evm.intraBlockState.GetNonce(address) != 0 || (contractHash != (libcommon.Hash{}) && contractHash != emptyCodeHash) { + if evm.intraBlockState.GetNonce(address) != 0 || (contractHash != (libcommon.Hash{}) && contractHash != trie.EmptyCodeHash) { err = ErrContractAddressCollision return nil, libcommon.Address{}, 0, err } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 247788fb0e2..02df0a3b41e 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -20,6 +20,7 @@ package vm import ( + "errors" "fmt" "math" @@ -205,21 +206,13 @@ func opByte(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt func opAddmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { x, y, z := scope.Stack.Pop(), scope.Stack.Pop(), scope.Stack.Peek() - if z.IsZero() { - z.Clear() - } else { - z.AddMod(&x, &y, z) - } + z.AddMod(&x, &y, z) return nil, nil } func opMulmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { x, y, z := scope.Stack.Pop(), scope.Stack.Pop(), scope.Stack.Peek() - if z.IsZero() { - z.Clear() - } else { - z.MulMod(&x, &y, z) - } + z.MulMod(&x, &y, z) return nil, nil } @@ -518,7 +511,7 @@ func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) var overflow bool v, overflow = uint256.FromBig(interpreter.evm.Context.Difficulty) if overflow { - return nil, fmt.Errorf("interpreter.evm.Context.Difficulty higher than 2^256-1") + return nil, errors.New("interpreter.evm.Context.Difficulty higher than 2^256-1") } } scope.Stack.Push(v) diff --git a/crypto/crypto.go b/crypto/crypto.go index a0e8ebc4e0e..01cd6c06ae4 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -157,11 +157,11 @@ func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) { // The priv.D must < N if priv.D.Cmp(secp256k1NBig) >= 0 { - return nil, fmt.Errorf("invalid private key, >=N") + return nil, errors.New("invalid private key, >=N") } // The priv.D must not be zero or negative. if priv.D.Sign() <= 0 { - return nil, fmt.Errorf("invalid private key, zero or negative") + return nil, errors.New("invalid private key, zero or negative") } priv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d) @@ -248,7 +248,7 @@ func LoadECDSA(file string) (*ecdsa.PrivateKey, error) { if err != nil { return nil, err } else if n != len(buf) { - return nil, fmt.Errorf("key file too short, want 64 hex characters") + return nil, errors.New("key file too short, want 64 hex characters") } if err := checkKeyFileEnd(r); err != nil { return nil, err diff --git a/crypto/ecies/ecies.go b/crypto/ecies/ecies.go index abb1c7ffa0e..40383f98eff 100644 --- a/crypto/ecies/ecies.go +++ b/crypto/ecies/ecies.go @@ -36,7 +36,7 @@ import ( "crypto/hmac" "crypto/subtle" "encoding/binary" - "fmt" + "errors" "hash" "io" "math/big" @@ -45,11 +45,11 @@ import ( ) var ( - ErrImport = fmt.Errorf("ecies: failed to import key") - ErrInvalidCurve = fmt.Errorf("ecies: invalid elliptic curve") - ErrInvalidPublicKey = fmt.Errorf("ecies: invalid public key") - ErrSharedKeyIsPointAtInfinity = fmt.Errorf("ecies: shared key is point at infinity") - ErrSharedKeyTooBig = fmt.Errorf("ecies: shared key params are too big") + ErrImport = errors.New("ecies: failed to import key") + ErrInvalidCurve = errors.New("ecies: invalid elliptic curve") + ErrInvalidPublicKey = errors.New("ecies: invalid public key") + ErrSharedKeyIsPointAtInfinity = errors.New("ecies: shared key is point at infinity") + ErrSharedKeyTooBig = errors.New("ecies: shared key params are too big") ) // PublicKey is a representation of an elliptic curve public key. @@ -140,8 +140,8 @@ func (prv *PrivateKey) GenerateShared(pub *PublicKey, skLen, macLen int) (sk []b } var ( - ErrSharedTooLong = fmt.Errorf("ecies: shared secret is too long") - ErrInvalidMessage = fmt.Errorf("ecies: invalid message") + ErrSharedTooLong = errors.New("ecies: shared secret is too long") + ErrInvalidMessage = errors.New("ecies: invalid message") ) // NIST SP 800-56 Concatenation Key Derivation Function (see section 5.8.1). diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go index a0780f2a637..c907e6ccb68 100644 --- a/crypto/ecies/ecies_test.go +++ b/crypto/ecies/ecies_test.go @@ -35,8 +35,8 @@ import ( "crypto/rand" "crypto/sha256" "encoding/hex" + "errors" "flag" - "fmt" "math/big" "os" "testing" @@ -74,7 +74,7 @@ func TestKDF(t *testing.T) { } } -var ErrBadSharedKeys = fmt.Errorf("ecies: shared keys don't match") +var ErrBadSharedKeys = errors.New("ecies: shared keys don't match") // cmpParams compares a set of ECIES parameters. We assume, as per the // docs, that AES is the only supported symmetric encryption algorithm. diff --git a/crypto/ecies/params.go b/crypto/ecies/params.go index 756f5343be4..5e69fa5e6bb 100644 --- a/crypto/ecies/params.go +++ b/crypto/ecies/params.go @@ -39,6 +39,7 @@ import ( "crypto/elliptic" "crypto/sha256" "crypto/sha512" + "errors" "fmt" "hash" @@ -47,8 +48,8 @@ import ( var ( DefaultCurve = ethcrypto.S256() - ErrUnsupportedECDHAlgorithm = fmt.Errorf("ecies: unsupported ECDH algorithm") - ErrUnsupportedECIESParameters = fmt.Errorf("ecies: unsupported ECIES parameters") + ErrUnsupportedECDHAlgorithm = errors.New("ecies: unsupported ECDH algorithm") + ErrUnsupportedECIESParameters = errors.New("ecies: unsupported ECIES parameters") ErrInvalidKeyLen = fmt.Errorf("ecies: invalid key size (> %d) in ECIESParams", maxKeyLen) ) diff --git a/crypto/signature_cgo.go b/crypto/signature_cgo.go index f45d44d2e06..e818aeb3680 100644 --- a/crypto/signature_cgo.go +++ b/crypto/signature_cgo.go @@ -24,6 +24,7 @@ package crypto import ( "crypto/ecdsa" "crypto/elliptic" + "errors" "fmt" "github.com/erigontech/secp256k1" @@ -80,7 +81,7 @@ func VerifySignature(pubkey, digestHash, signature []byte) bool { func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) { x, y := secp256k1.DecompressPubkey(pubkey) if x == nil { - return nil, fmt.Errorf("invalid public key") + return nil, errors.New("invalid public key") } return &ecdsa.PublicKey{X: x, Y: y, Curve: S256()}, nil } diff --git a/crypto/signature_nocgo.go b/crypto/signature_nocgo.go index 5708dff64fd..4b7f042681b 100644 --- a/crypto/signature_nocgo.go +++ b/crypto/signature_nocgo.go @@ -75,12 +75,12 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) { return nil, fmt.Errorf("hash is required to be exactly 32 bytes (%d)", len(hash)) } if prv.Curve != btcec.S256() { - return nil, fmt.Errorf("private key curve is not secp256k1") + return nil, errors.New("private key curve is not secp256k1") } // ecdsa.PrivateKey -> btcec.PrivateKey var priv btcec.PrivateKey if overflow := priv.Key.SetByteSlice(prv.D.Bytes()); overflow || priv.Key.IsZero() { - return nil, fmt.Errorf("invalid private key") + return nil, errors.New("invalid private key") } defer priv.Zero() sig, err := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey diff --git a/diagnostics/logs.go b/diagnostics/logs.go index 8cb090020e5..b2765ac5f04 100644 --- a/diagnostics/logs.go +++ b/diagnostics/logs.go @@ -119,7 +119,7 @@ func writeLogsRead(w http.ResponseWriter, r *http.Request, dirPath string) { } if fileInfo.IsDir() { - http.Error(w, fmt.Sprintf("%s is a directory, needs to be a file", file), http.StatusInternalServerError) + http.Error(w, file+" is a directory, needs to be a file", http.StatusInternalServerError) return } diff --git a/diagnostics/setup.go b/diagnostics/setup.go index 5958f81f009..9518e8c4e81 100644 --- a/diagnostics/setup.go +++ b/diagnostics/setup.go @@ -133,4 +133,5 @@ func SetupEndpoints(ctx *cli.Context, node *node.ErigonNode, diagMux *http.Serve SetupMemAccess(diagMux) SetupHeadersAccess(diagMux, diagnostic) SetupBodiesAccess(diagMux, diagnostic) + SetupSysInfoAccess(diagMux, diagnostic) } diff --git a/diagnostics/snapshot_sync.go b/diagnostics/snapshot_sync.go index a85b24eeaf6..b06a4457666 100644 --- a/diagnostics/snapshot_sync.go +++ b/diagnostics/snapshot_sync.go @@ -39,12 +39,6 @@ func SetupStagesAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient writeFilesList(w, diag) }) - metricsMux.HandleFunc("/hardware-info", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Content-Type", "application/json") - writeHardwareInfo(w, diag) - }) - metricsMux.HandleFunc("/resources-usage", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") @@ -80,10 +74,6 @@ func writeFilesList(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { diag.SnapshotFilesListJson(w) } -func writeHardwareInfo(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { - diag.HardwareInfoJson(w) -} - func writeSyncStages(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { diag.SyncStagesJson(w) } diff --git a/diagnostics/sysinfo.go b/diagnostics/sysinfo.go new file mode 100644 index 00000000000..4133a3ee610 --- /dev/null +++ b/diagnostics/sysinfo.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package diagnostics + +import ( + "net/http" + + diaglib "github.com/erigontech/erigon-lib/diagnostics" +) + +func SetupSysInfoAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient) { + if metricsMux == nil { + return + } + + metricsMux.HandleFunc("/hardware-info", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Content-Type", "application/json") + writeHardwareInfo(w, diag) + }) +} + +func writeHardwareInfo(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { + diag.HardwareInfoJson(w) +} diff --git a/docs/programmers_guide/dupsort.md b/docs/programmers_guide/dupsort.md index aa6086e96bd..d24bebc799f 100644 --- a/docs/programmers_guide/dupsort.md +++ b/docs/programmers_guide/dupsort.md @@ -154,7 +154,7 @@ This article target is to show tricky concepts on examples. Future reading [here](./db_walkthrough.MD#table-history-of-accounts) Erigon supports multiple typed cursors, see the [KV -Readme.md](https://github.com/erigontech/erigon-lib/tree/main/kv) +Readme.md](https://github.com/erigontech/erigon/tree/main/erigon-lib/kv) diff --git a/docs/programmers_guide/guide.md b/docs/programmers_guide/guide.md index 19606453f65..88dbf41049d 100644 --- a/docs/programmers_guide/guide.md +++ b/docs/programmers_guide/guide.md @@ -359,7 +359,7 @@ neighbours are 1 and 3. Therefore, this key will emit the opcode prefix group). The following, optional, part of the step only happens if the common prefix of the current key and the preceding key is -longer or equal than the common prefix of the current key and the succeeding key, in other words, if at least one prefix +longer than the common prefix of the current key and the succeeding key, in other words, if at least one prefix group needs to be "closed". Closing a prefix group means first emitting opcode `BRANCH` or `BRANCHHASH`. The value for the operand is taken from the item in the `groups` slice, which corresponds to the length of the prefix for this group. Once value is taken, `groups` slice is trimmed to remove the used item. Secondly, closing a prefix groups means invoking diff --git a/erigon-lib/chain/chain_config.go b/erigon-lib/chain/chain_config.go index a2a033b27d0..15a7bfa76f7 100644 --- a/erigon-lib/chain/chain_config.go +++ b/erigon-lib/chain/chain_config.go @@ -101,7 +101,7 @@ type Config struct { // (Optional) deposit contract of PoS chains // See also EIP-6110: Supply validator deposits on chain - DepositContract common.Address `json:"depositContract,omitempty"` + DepositContract common.Address `json:"depositContractAddress,omitempty"` // Various consensus engines Ethash *EthashConfig `json:"ethash,omitempty"` diff --git a/erigon-lib/chain/networkname/network_name.go b/erigon-lib/chain/networkname/network_name.go index ff25b57ba22..670ac88ae27 100644 --- a/erigon-lib/chain/networkname/network_name.go +++ b/erigon-lib/chain/networkname/network_name.go @@ -21,7 +21,6 @@ const ( HoleskyChainName = "holesky" SepoliaChainName = "sepolia" DevChainName = "dev" - MumbaiChainName = "mumbai" AmoyChainName = "amoy" BorMainnetChainName = "bor-mainnet" BorDevnetChainName = "bor-devnet" @@ -39,7 +38,6 @@ var All = []string{ MainnetChainName, HoleskyChainName, SepoliaChainName, - MumbaiChainName, AmoyChainName, BorMainnetChainName, BorDevnetChainName, diff --git a/erigon-lib/chain/snapcfg/util.go b/erigon-lib/chain/snapcfg/util.go index 8d029930d01..4c920d9db2b 100644 --- a/erigon-lib/chain/snapcfg/util.go +++ b/erigon-lib/chain/snapcfg/util.go @@ -38,7 +38,6 @@ import ( var ( Mainnet = fromToml(snapshothashes.Mainnet) Sepolia = fromToml(snapshothashes.Sepolia) - Mumbai = fromToml(snapshothashes.Mumbai) Amoy = fromToml(snapshothashes.Amoy) BorMainnet = fromToml(snapshothashes.BorMainnet) Gnosis = fromToml(snapshothashes.Gnosis) @@ -360,7 +359,6 @@ func (c Cfg) MergeLimit(t snaptype.Enum, fromBlock uint64) uint64 { var knownPreverified = map[string]Preverified{ networkname.MainnetChainName: Mainnet, networkname.SepoliaChainName: Sepolia, - networkname.MumbaiChainName: Mumbai, networkname.AmoyChainName: Amoy, networkname.BorMainnetChainName: BorMainnet, networkname.GnosisChainName: Gnosis, @@ -377,7 +375,7 @@ var knownTypes = map[string][]snaptype.Type{} func Seedable(networkName string, info snaptype.FileInfo) bool { if networkName == "" { - panic("empty network name") + return false } return KnownCfg(networkName).Seedable(info) } @@ -434,7 +432,6 @@ func VersionedCfg(networkName string, preferred snaptype.Version, min snaptype.V var KnownWebseeds = map[string][]string{ networkname.MainnetChainName: webseedsParse(webseed.Mainnet), networkname.SepoliaChainName: webseedsParse(webseed.Sepolia), - networkname.MumbaiChainName: webseedsParse(webseed.Mumbai), networkname.AmoyChainName: webseedsParse(webseed.Amoy), networkname.BorMainnetChainName: webseedsParse(webseed.BorMainnet), networkname.GnosisChainName: webseedsParse(webseed.Gnosis), @@ -454,3 +451,70 @@ func webseedsParse(in []byte) (res []string) { slices.Sort(res) return res } + +func LoadRemotePreverified() bool { + couldFetch := snapshothashes.LoadSnapshots() + + // Re-load the preverified hashes + Mainnet = fromToml(snapshothashes.Mainnet) + Sepolia = fromToml(snapshothashes.Sepolia) + Amoy = fromToml(snapshothashes.Amoy) + BorMainnet = fromToml(snapshothashes.BorMainnet) + Gnosis = fromToml(snapshothashes.Gnosis) + Chiado = fromToml(snapshothashes.Chiado) + Bsc = fromToml(snapshothashes.Bsc) + Chapel = fromToml(snapshothashes.Chapel) + // Update the known preverified hashes + KnownWebseeds = map[string][]string{ + networkname.MainnetChainName: webseedsParse(webseed.Mainnet), + networkname.SepoliaChainName: webseedsParse(webseed.Sepolia), + networkname.AmoyChainName: webseedsParse(webseed.Amoy), + networkname.BorMainnetChainName: webseedsParse(webseed.BorMainnet), + networkname.GnosisChainName: webseedsParse(webseed.Gnosis), + networkname.ChiadoChainName: webseedsParse(webseed.Chiado), + networkname.BSCChainName: webseedsParse(webseed.Bsc), + networkname.ChapelChainName: webseedsParse(webseed.Chapel), + } + + knownPreverified = map[string]Preverified{ + networkname.MainnetChainName: Mainnet, + networkname.SepoliaChainName: Sepolia, + networkname.AmoyChainName: Amoy, + networkname.BorMainnetChainName: BorMainnet, + networkname.GnosisChainName: Gnosis, + networkname.ChiadoChainName: Chiado, + networkname.BSCChainName: Bsc, + networkname.ChapelChainName: Chapel, + } + return couldFetch +} + +func SetToml(networkName string, toml []byte) { + if _, ok := knownPreverified[networkName]; !ok { + return + } + knownPreverified[networkName] = fromToml(toml) +} + +func GetToml(networkName string) []byte { + switch networkName { + case networkname.MainnetChainName: + return snapshothashes.Mainnet + case networkname.SepoliaChainName: + return snapshothashes.Sepolia + case networkname.AmoyChainName: + return snapshothashes.Amoy + case networkname.BorMainnetChainName: + return snapshothashes.BorMainnet + case networkname.GnosisChainName: + return snapshothashes.Gnosis + case networkname.ChiadoChainName: + return snapshothashes.Chiado + case networkname.BSCChainName: + return snapshothashes.Bsc + case networkname.ChapelChainName: + return snapshothashes.Chapel + default: + return nil + } +} diff --git a/erigon-lib/commitment/bin_patricia_hashed.go b/erigon-lib/commitment/bin_patricia_hashed.go index 2057234acd7..70aee49894f 100644 --- a/erigon-lib/commitment/bin_patricia_hashed.go +++ b/erigon-lib/commitment/bin_patricia_hashed.go @@ -16,1826 +16,1805 @@ package commitment -import ( - "bytes" - "context" - "encoding/binary" - "encoding/hex" - "fmt" - "io" - "math/bits" - "path/filepath" - "sort" - - "github.com/holiman/uint256" - "golang.org/x/crypto/sha3" - - "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/common/length" - "github.com/erigontech/erigon-lib/etl" - "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon-lib/rlp" -) - -const ( - maxKeySize = 512 - halfKeySize = maxKeySize / 2 - maxChild = 2 -) - -type bitstring []uint8 - -// converts slice of nibbles (lowest 4 bits of each byte) to bitstring -func hexToBin(hex []byte) bitstring { - bin := make([]byte, 4*len(hex)) - for i := range bin { - if hex[i/4]&(1<<(3-i%4)) != 0 { - bin[i] = 1 - } - } - return bin -} - -// encodes bitstring to its compact representation -func binToCompact(bin []byte) []byte { - compact := make([]byte, 2+common.BitLenToByteLen(len(bin))) - binary.BigEndian.PutUint16(compact, uint16(len(bin))) - for i := 0; i < len(bin); i++ { - if bin[i] != 0 { - compact[2+i/8] |= byte(1) << (i % 8) - } - } - return compact -} - -// decodes compact bitstring representation into actual bitstring -func compactToBin(compact []byte) []byte { - bin := make([]byte, binary.BigEndian.Uint16(compact)) - for i := 0; i < len(bin); i++ { - if compact[2+i/8]&(byte(1)<<(i%8)) == 0 { - bin[i] = 0 - } else { - bin[i] = 1 - } - } - return bin -} - -// BinHashed implements commitment based on patricia merkle tree with radix 16, -// with keys pre-hashed by keccak256 -type BinPatriciaHashed struct { - root BinaryCell // Root cell of the tree - // Rows of the grid correspond to the level of depth in the patricia tree - // Columns of the grid correspond to pointers to the nodes further from the root - grid [maxKeySize][maxChild]BinaryCell // First halfKeySize rows of this grid are for account trie, and next halfKeySize rows are for storage trie - // How many rows (starting from row 0) are currently active and have corresponding selected columns - // Last active row does not have selected column - activeRows int - // Length of the key that reflects current positioning of the grid. It maybe larger than number of active rows, - // if a account leaf cell represents multiple nibbles in the key - currentKeyLen int - currentKey [maxKeySize]byte // For each row indicates which column is currently selected - depths [maxKeySize]int // For each row, the depth of cells in that row - rootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked - rootTouched bool - rootPresent bool - branchBefore [maxKeySize]bool // For each row, whether there was a branch node in the database loaded in unfold - touchMap [maxKeySize]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted - afterMap [maxKeySize]uint16 // For each row, bitmap of cells that were present after modification - keccak keccakState - keccak2 keccakState - accountKeyLen int - trace bool - hashAuxBuffer [maxKeySize]byte // buffer to compute cell hash or write hash-related things - auxBuffer *bytes.Buffer // auxiliary buffer used during branch updates encoding - - branchEncoder *BranchEncoder - ctx PatriciaContext - - // Function used to fetch account with given plain key - accountFn func(plainKey []byte, cell *BinaryCell) error - // Function used to fetch account with given plain key - storageFn func(plainKey []byte, cell *BinaryCell) error -} - -func NewBinPatriciaHashed(accountKeyLen int, ctx PatriciaContext, tmpdir string) *BinPatriciaHashed { - bph := &BinPatriciaHashed{ - keccak: sha3.NewLegacyKeccak256().(keccakState), - keccak2: sha3.NewLegacyKeccak256().(keccakState), - accountKeyLen: accountKeyLen, - accountFn: wrapAccountStorageFn(ctx.GetAccount), - storageFn: wrapAccountStorageFn(ctx.GetStorage), - auxBuffer: bytes.NewBuffer(make([]byte, 8192)), - ctx: ctx, - } - bph.branchEncoder = NewBranchEncoder(1024, filepath.Join(tmpdir, "branch-encoder")) - - return bph - -} - -type BinaryCell struct { - h [length.Hash]byte // cell hash - hl int // Length of the hash (or embedded) - apk [length.Addr]byte // account plain key - apl int // length of account plain key - spk [length.Addr + length.Hash]byte // storage plain key - spl int // length of the storage plain key - downHashedKey [maxKeySize]byte - downHashedLen int - extension [halfKeySize]byte - extLen int - Nonce uint64 - Balance uint256.Int - CodeHash [length.Hash]byte // hash of the bytecode - Storage [length.Hash]byte - StorageLen int - Delete bool -} - -func (cell *BinaryCell) unwrapToHexCell() (cl *Cell) { - cl = new(Cell) - cl.Balance = *cell.Balance.Clone() - cl.Nonce = cell.Nonce - cl.StorageLen = cell.StorageLen - cl.accountPlainKeyLen = cell.apl - cl.storagePlainKeyLen = cell.spl - cl.HashLen = cell.hl - - copy(cl.accountPlainKey[:], cell.apk[:]) - copy(cl.storagePlainKey[:], cell.spk[:]) - copy(cl.hash[:], cell.h[:]) - - if cell.extLen > 0 { - compactedExt := binToCompact(cell.extension[:cell.extLen]) - copy(cl.extension[:], compactedExt) - cl.extLen = len(compactedExt) - } - if cell.downHashedLen > 0 { - compactedDHK := binToCompact(cell.downHashedKey[:cell.downHashedLen]) - copy(cl.downHashedKey[:], compactedDHK) - cl.downHashedLen = len(compactedDHK) - } - - copy(cl.CodeHash[:], cell.CodeHash[:]) - copy(cl.Storage[:], cell.Storage[:]) - cl.Delete = cell.Delete - return cl -} - -var ( // TODO REEAVL - EmptyBinRootHash, _ = hex.DecodeString("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - EmptyBinCodeHash, _ = hex.DecodeString("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") -) - -func (cell *BinaryCell) fillEmpty() { - cell.apl = 0 - cell.spl = 0 - cell.downHashedLen = 0 - cell.extLen = 0 - cell.hl = 0 - cell.Nonce = 0 - cell.Balance.Clear() - copy(cell.CodeHash[:], EmptyCodeHash) - cell.StorageLen = 0 - cell.Delete = false -} - -func (cell *BinaryCell) fillFromUpperCell(upBinaryCell *BinaryCell, depth, depthIncrement int) { - if upBinaryCell.downHashedLen >= depthIncrement { - cell.downHashedLen = upBinaryCell.downHashedLen - depthIncrement - } else { - cell.downHashedLen = 0 - } - if upBinaryCell.downHashedLen > depthIncrement { - copy(cell.downHashedKey[:], upBinaryCell.downHashedKey[depthIncrement:upBinaryCell.downHashedLen]) - } - if upBinaryCell.extLen >= depthIncrement { - cell.extLen = upBinaryCell.extLen - depthIncrement - } else { - cell.extLen = 0 - } - if upBinaryCell.extLen > depthIncrement { - copy(cell.extension[:], upBinaryCell.extension[depthIncrement:upBinaryCell.extLen]) - } - if depth <= halfKeySize { - cell.apl = upBinaryCell.apl - if upBinaryCell.apl > 0 { - copy(cell.apk[:], upBinaryCell.apk[:cell.apl]) - cell.Balance.Set(&upBinaryCell.Balance) - cell.Nonce = upBinaryCell.Nonce - copy(cell.CodeHash[:], upBinaryCell.CodeHash[:]) - cell.extLen = upBinaryCell.extLen - if upBinaryCell.extLen > 0 { - copy(cell.extension[:], upBinaryCell.extension[:upBinaryCell.extLen]) - } - } - } else { - cell.apl = 0 - } - cell.spl = upBinaryCell.spl - if upBinaryCell.spl > 0 { - copy(cell.spk[:], upBinaryCell.spk[:upBinaryCell.spl]) - cell.StorageLen = upBinaryCell.StorageLen - if upBinaryCell.StorageLen > 0 { - copy(cell.Storage[:], upBinaryCell.Storage[:upBinaryCell.StorageLen]) - } - } - cell.hl = upBinaryCell.hl - if upBinaryCell.hl > 0 { - copy(cell.h[:], upBinaryCell.h[:upBinaryCell.hl]) - } -} - -func (cell *BinaryCell) fillFromLowerBinaryCell(lowBinaryCell *BinaryCell, lowDepth int, preExtension []byte, nibble int) { - if lowBinaryCell.apl > 0 || lowDepth < halfKeySize { - cell.apl = lowBinaryCell.apl - } - if lowBinaryCell.apl > 0 { - copy(cell.apk[:], lowBinaryCell.apk[:cell.apl]) - cell.Balance.Set(&lowBinaryCell.Balance) - cell.Nonce = lowBinaryCell.Nonce - copy(cell.CodeHash[:], lowBinaryCell.CodeHash[:]) - } - cell.spl = lowBinaryCell.spl - if lowBinaryCell.spl > 0 { - copy(cell.spk[:], lowBinaryCell.spk[:cell.spl]) - cell.StorageLen = lowBinaryCell.StorageLen - if lowBinaryCell.StorageLen > 0 { - copy(cell.Storage[:], lowBinaryCell.Storage[:lowBinaryCell.StorageLen]) - } - } - if lowBinaryCell.hl > 0 { - if (lowBinaryCell.apl == 0 && lowDepth < halfKeySize) || (lowBinaryCell.spl == 0 && lowDepth > halfKeySize) { - // Extension is related to either accounts branch node, or storage branch node, we prepend it by preExtension | nibble - if len(preExtension) > 0 { - copy(cell.extension[:], preExtension) - } - cell.extension[len(preExtension)] = byte(nibble) - if lowBinaryCell.extLen > 0 { - copy(cell.extension[1+len(preExtension):], lowBinaryCell.extension[:lowBinaryCell.extLen]) - } - cell.extLen = lowBinaryCell.extLen + 1 + len(preExtension) - } else { - // Extension is related to a storage branch node, so we copy it upwards as is - cell.extLen = lowBinaryCell.extLen - if lowBinaryCell.extLen > 0 { - copy(cell.extension[:], lowBinaryCell.extension[:lowBinaryCell.extLen]) - } - } - } - cell.hl = lowBinaryCell.hl - if lowBinaryCell.hl > 0 { - copy(cell.h[:], lowBinaryCell.h[:lowBinaryCell.hl]) - } -} - -func (cell *BinaryCell) deriveHashedKeys(depth int, keccak keccakState, accountKeyLen int) error { - extraLen := 0 - if cell.apl > 0 { - if depth > halfKeySize { - return fmt.Errorf("deriveHashedKeys accountPlainKey present at depth > halfKeySize") - } - extraLen = halfKeySize - depth - } - if cell.spl > 0 { - if depth >= halfKeySize { - extraLen = maxKeySize - depth - } else { - extraLen += halfKeySize - } - } - if extraLen > 0 { - if cell.downHashedLen > 0 { - copy(cell.downHashedKey[extraLen:], cell.downHashedKey[:cell.downHashedLen]) - } - cell.downHashedLen += extraLen - var hashedKeyOffset, downOffset int - if cell.apl > 0 { - if err := binHashKey(keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil { - return err - } - downOffset = halfKeySize - depth - } - if cell.spl > 0 { - if depth >= halfKeySize { - hashedKeyOffset = depth - halfKeySize - } - if err := binHashKey(keccak, cell.spk[accountKeyLen:cell.spl], cell.downHashedKey[downOffset:], hashedKeyOffset); err != nil { - return err - } - } - } - return nil -} - -func (cell *BinaryCell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int, error) { - if fieldBits&HashedKeyPart != 0 { - l, n := binary.Uvarint(data[pos:]) - if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey len") - } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for hashedKey len") - } - pos += n - if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey exp %d got %d", pos+int(l), len(data)) - } - cell.downHashedLen = int(l) - cell.extLen = int(l) - if l > 0 { - copy(cell.downHashedKey[:], data[pos:pos+int(l)]) - copy(cell.extension[:], data[pos:pos+int(l)]) - pos += int(l) - } - } else { - cell.downHashedLen = 0 - cell.extLen = 0 - } - if fieldBits&AccountPlainPart != 0 { - l, n := binary.Uvarint(data[pos:]) - if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey len") - } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for accountPlainKey len") - } - pos += n - if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey") - } - cell.apl = int(l) - if l > 0 { - copy(cell.apk[:], data[pos:pos+int(l)]) - pos += int(l) - } - } else { - cell.apl = 0 - } - if fieldBits&StoragePlainPart != 0 { - l, n := binary.Uvarint(data[pos:]) - if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey len") - } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for storagePlainKey len") - } - pos += n - if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey") - } - cell.spl = int(l) - if l > 0 { - copy(cell.spk[:], data[pos:pos+int(l)]) - pos += int(l) - } - } else { - cell.spl = 0 - } - if fieldBits&HashPart != 0 { - l, n := binary.Uvarint(data[pos:]) - if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for hash len") - } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for hash len") - } - pos += n - if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for hash") - } - cell.hl = int(l) - if l > 0 { - copy(cell.h[:], data[pos:pos+int(l)]) - pos += int(l) - } - } else { - cell.hl = 0 - } - return pos, nil -} - -func (cell *BinaryCell) setStorage(value []byte) { - cell.StorageLen = len(value) - if len(value) > 0 { - copy(cell.Storage[:], value) - } -} - -func (cell *BinaryCell) setAccountFields(codeHash []byte, balance *uint256.Int, nonce uint64) { - copy(cell.CodeHash[:], codeHash) - - cell.Balance.SetBytes(balance.Bytes()) - cell.Nonce = nonce -} - -func (cell *BinaryCell) accountForHashing(buffer []byte, storageRootHash [length.Hash]byte) int { - balanceBytes := 0 - if !cell.Balance.LtUint64(128) { - balanceBytes = cell.Balance.ByteLen() - } - - var nonceBytes int - if cell.Nonce < 128 && cell.Nonce != 0 { - nonceBytes = 0 - } else { - nonceBytes = common.BitLenToByteLen(bits.Len64(cell.Nonce)) - } - - var structLength = uint(balanceBytes + nonceBytes + 2) - structLength += 66 // Two 32-byte arrays + 2 prefixes - - var pos int - if structLength < 56 { - buffer[0] = byte(192 + structLength) - pos = 1 - } else { - lengthBytes := common.BitLenToByteLen(bits.Len(structLength)) - buffer[0] = byte(247 + lengthBytes) - - for i := lengthBytes; i > 0; i-- { - buffer[i] = byte(structLength) - structLength >>= 8 - } - - pos = lengthBytes + 1 - } - - // Encoding nonce - if cell.Nonce < 128 && cell.Nonce != 0 { - buffer[pos] = byte(cell.Nonce) - } else { - buffer[pos] = byte(128 + nonceBytes) - var nonce = cell.Nonce - for i := nonceBytes; i > 0; i-- { - buffer[pos+i] = byte(nonce) - nonce >>= 8 - } - } - pos += 1 + nonceBytes - - // Encoding balance - if cell.Balance.LtUint64(128) && !cell.Balance.IsZero() { - buffer[pos] = byte(cell.Balance.Uint64()) - pos++ - } else { - buffer[pos] = byte(128 + balanceBytes) - pos++ - cell.Balance.WriteToSlice(buffer[pos : pos+balanceBytes]) - pos += balanceBytes - } - - // Encoding Root and CodeHash - buffer[pos] = 128 + 32 - pos++ - copy(buffer[pos:], storageRootHash[:]) - pos += 32 - buffer[pos] = 128 + 32 - pos++ - copy(buffer[pos:], cell.CodeHash[:]) - pos += 32 - return pos -} - -func (bph *BinPatriciaHashed) ResetContext(ctx PatriciaContext) {} - -func (bph *BinPatriciaHashed) completeLeafHash(buf, keyPrefix []byte, kp, kl, compactLen int, key []byte, compact0 byte, ni int, val rlp.RlpSerializable, singleton bool) ([]byte, error) { - totalLen := kp + kl + val.DoubleRLPLen() - var lenPrefix [4]byte - pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) - embedded := !singleton && totalLen+pt < length.Hash - var writer io.Writer - if embedded { - //bph.byteArrayWriter.Setup(buf) - bph.auxBuffer.Reset() - writer = bph.auxBuffer - } else { - bph.keccak.Reset() - writer = bph.keccak - } - if _, err := writer.Write(lenPrefix[:pt]); err != nil { - return nil, err - } - if _, err := writer.Write(keyPrefix[:kp]); err != nil { - return nil, err - } - var b [1]byte - b[0] = compact0 - if _, err := writer.Write(b[:]); err != nil { - return nil, err - } - for i := 1; i < compactLen; i++ { - b[0] = key[ni]*16 + key[ni+1] - if _, err := writer.Write(b[:]); err != nil { - return nil, err - } - ni += 2 - } - var prefixBuf [8]byte - if err := val.ToDoubleRLP(writer, prefixBuf[:]); err != nil { - return nil, err - } - if embedded { - buf = bph.auxBuffer.Bytes() - } else { - var hashBuf [33]byte - hashBuf[0] = 0x80 + length.Hash - if _, err := bph.keccak.Read(hashBuf[1:]); err != nil { - return nil, err - } - buf = append(buf, hashBuf[:]...) - } - return buf, nil -} - -func (bph *BinPatriciaHashed) leafHashWithKeyVal(buf, key []byte, val rlp.RlpSerializableBytes, singleton bool) ([]byte, error) { - // Compute the total length of binary representation - var kp, kl int - // Write key - var compactLen int - var ni int - var compact0 byte - compactLen = (len(key)-1)/2 + 1 - if len(key)&1 == 0 { - compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble - ni = 1 - } else { - compact0 = 0x20 - } - var keyPrefix [1]byte - if compactLen > 1 { - keyPrefix[0] = 0x80 + byte(compactLen) - kp = 1 - kl = compactLen - } else { - kl = 1 - } - return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, singleton) -} - -func (bph *BinPatriciaHashed) accountLeafHashWithKey(buf, key []byte, val rlp.RlpSerializable) ([]byte, error) { - // Compute the total length of binary representation - var kp, kl int - // Write key - var compactLen int - var ni int - var compact0 byte - if hasTerm(key) { - compactLen = (len(key)-1)/2 + 1 - if len(key)&1 == 0 { - compact0 = 48 + key[0] // Odd (1<<4) + first nibble - ni = 1 - } else { - compact0 = 32 - } - } else { - compactLen = len(key)/2 + 1 - if len(key)&1 == 1 { - compact0 = 16 + key[0] // Odd (1<<4) + first nibble - ni = 1 - } - } - var keyPrefix [1]byte - if compactLen > 1 { - keyPrefix[0] = byte(128 + compactLen) - kp = 1 - kl = compactLen - } else { - kl = 1 - } - return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, true) -} - -func (bph *BinPatriciaHashed) extensionHash(key []byte, hash []byte) ([length.Hash]byte, error) { - var hashBuf [length.Hash]byte - - // Compute the total length of binary representation - var kp, kl int - // Write key - var compactLen int - var ni int - var compact0 byte - if hasTerm(key) { - compactLen = (len(key)-1)/2 + 1 - if len(key)&1 == 0 { - compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble - ni = 1 - } else { - compact0 = 0x20 - } - } else { - compactLen = len(key)/2 + 1 - if len(key)&1 == 1 { - compact0 = 0x10 + key[0] // Odd: (1<<4) + first nibble - ni = 1 - } - } - var keyPrefix [1]byte - if compactLen > 1 { - keyPrefix[0] = 0x80 + byte(compactLen) - kp = 1 - kl = compactLen - } else { - kl = 1 - } - totalLen := kp + kl + 33 - var lenPrefix [4]byte - pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) - bph.keccak.Reset() - if _, err := bph.keccak.Write(lenPrefix[:pt]); err != nil { - return hashBuf, err - } - if _, err := bph.keccak.Write(keyPrefix[:kp]); err != nil { - return hashBuf, err - } - var b [1]byte - b[0] = compact0 - if _, err := bph.keccak.Write(b[:]); err != nil { - return hashBuf, err - } - for i := 1; i < compactLen; i++ { - b[0] = key[ni]*16 + key[ni+1] - if _, err := bph.keccak.Write(b[:]); err != nil { - return hashBuf, err - } - ni += 2 - } - b[0] = 0x80 + length.Hash - if _, err := bph.keccak.Write(b[:]); err != nil { - return hashBuf, err - } - if _, err := bph.keccak.Write(hash); err != nil { - return hashBuf, err - } - // Replace previous hash with the new one - if _, err := bph.keccak.Read(hashBuf[:]); err != nil { - return hashBuf, err - } - return hashBuf, nil -} - -func (bph *BinPatriciaHashed) computeBinaryCellHashLen(cell *BinaryCell, depth int) int { - if cell.spl > 0 && depth >= halfKeySize { - keyLen := 128 - depth + 1 // Length of hex key with terminator character - var kp, kl int - compactLen := (keyLen-1)/2 + 1 - if compactLen > 1 { - kp = 1 - kl = compactLen - } else { - kl = 1 - } - val := rlp.RlpSerializableBytes(cell.Storage[:cell.StorageLen]) - totalLen := kp + kl + val.DoubleRLPLen() - var lenPrefix [4]byte - pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) - if totalLen+pt < length.Hash { - return totalLen + pt - } - } - return length.Hash + 1 -} - -func (bph *BinPatriciaHashed) computeBinaryCellHash(cell *BinaryCell, depth int, buf []byte) ([]byte, error) { - var err error - var storageRootHash [length.Hash]byte - storageRootHashIsSet := false - if cell.spl > 0 { - var hashedKeyOffset int - if depth >= halfKeySize { - hashedKeyOffset = depth - halfKeySize - } - singleton := depth <= halfKeySize - if err := binHashKey(bph.keccak, cell.spk[bph.accountKeyLen:cell.spl], cell.downHashedKey[:], hashedKeyOffset); err != nil { - return nil, err - } - cell.downHashedKey[halfKeySize-hashedKeyOffset] = 16 // Add terminator - if singleton { - if bph.trace { - fmt.Printf("leafHashWithKeyVal(singleton) for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen]) - } - aux := make([]byte, 0, 33) - if aux, err = bph.leafHashWithKeyVal(aux, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], true); err != nil { - return nil, err - } - storageRootHash = *(*[length.Hash]byte)(aux[1:]) - storageRootHashIsSet = true - } else { - if bph.trace { - fmt.Printf("leafHashWithKeyVal for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen]) - } - return bph.leafHashWithKeyVal(buf, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], false) - } - } - if cell.apl > 0 { - if err := binHashKey(bph.keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil { - return nil, err - } - cell.downHashedKey[halfKeySize-depth] = 16 // Add terminator - if !storageRootHashIsSet { - if cell.extLen > 0 { - // Extension - if cell.hl > 0 { - if bph.trace { - fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl]) - } - if storageRootHash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil { - return nil, err - } - } else { - return nil, fmt.Errorf("computeBinaryCellHash extension without hash") - } - } else if cell.hl > 0 { - storageRootHash = cell.h - } else { - storageRootHash = *(*[length.Hash]byte)(EmptyRootHash) - } - } - var valBuf [128]byte - valLen := cell.accountForHashing(valBuf[:], storageRootHash) - if bph.trace { - fmt.Printf("accountLeafHashWithKey for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize+1-depth], rlp.RlpEncodedBytes(valBuf[:valLen])) - } - return bph.accountLeafHashWithKey(buf, cell.downHashedKey[:halfKeySize+1-depth], rlp.RlpEncodedBytes(valBuf[:valLen])) - } - buf = append(buf, 0x80+32) - if cell.extLen > 0 { - // Extension - if cell.hl > 0 { - if bph.trace { - fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl]) - } - var hash [length.Hash]byte - if hash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil { - return nil, err - } - buf = append(buf, hash[:]...) - } else { - return nil, fmt.Errorf("computeBinaryCellHash extension without hash") - } - } else if cell.hl > 0 { - buf = append(buf, cell.h[:cell.hl]...) - } else { - buf = append(buf, EmptyRootHash...) - } - return buf, nil -} - -func (bph *BinPatriciaHashed) needUnfolding(hashedKey []byte) int { - var cell *BinaryCell - var depth int - if bph.activeRows == 0 { - if bph.trace { - fmt.Printf("needUnfolding root, rootChecked = %t\n", bph.rootChecked) - } - if bph.rootChecked && bph.root.downHashedLen == 0 && bph.root.hl == 0 { - // Previously checked, empty root, no unfolding needed - return 0 - } - cell = &bph.root - if cell.downHashedLen == 0 && cell.hl == 0 && !bph.rootChecked { - // Need to attempt to unfold the root - return 1 - } - } else { - col := int(hashedKey[bph.currentKeyLen]) - cell = &bph.grid[bph.activeRows-1][col] - depth = bph.depths[bph.activeRows-1] - if bph.trace { - fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.h=[%x]\n", bph.activeRows-1, col, bph.currentKey[:bph.currentKeyLen], depth, cell.h[:cell.hl]) - } - } - if len(hashedKey) <= depth { - return 0 - } - if cell.downHashedLen == 0 { - if cell.hl == 0 { - // cell is empty, no need to unfold further - return 0 - } - // unfold branch node - return 1 - } - cpl := commonPrefixLen(hashedKey[depth:], cell.downHashedKey[:cell.downHashedLen-1]) - if bph.trace { - fmt.Printf("cpl=%d, cell.downHashedKey=[%x], depth=%d, hashedKey[depth:]=[%x]\n", cpl, cell.downHashedKey[:cell.downHashedLen], depth, hashedKey[depth:]) - } - unfolding := cpl + 1 - if depth < halfKeySize && depth+unfolding > halfKeySize { - // This is to make sure that unfolding always breaks at the level where storage subtrees start - unfolding = halfKeySize - depth - if bph.trace { - fmt.Printf("adjusted unfolding=%d\n", unfolding) - } - } - return unfolding -} - -// unfoldBranchNode returns true if unfolding has been done -func (bph *BinPatriciaHashed) unfoldBranchNode(row int, deleted bool, depth int) (bool, error) { - branchData, _, err := bph.ctx.GetBranch(binToCompact(bph.currentKey[:bph.currentKeyLen])) - if err != nil { - return false, err - } - if len(branchData) >= 2 { - branchData = branchData[2:] // skip touch map and hold aftermap and rest - } - if !bph.rootChecked && bph.currentKeyLen == 0 && len(branchData) == 0 { - // Special case - empty or deleted root - bph.rootChecked = true - return false, nil - } - if len(branchData) == 0 { - log.Warn("got empty branch data during unfold", "row", row, "depth", depth, "deleted", deleted) - } - bph.branchBefore[row] = true - bitmap := binary.BigEndian.Uint16(branchData[0:]) - pos := 2 - if deleted { - // All cells come as deleted (touched but not present after) - bph.afterMap[row] = 0 - bph.touchMap[row] = bitmap - } else { - bph.afterMap[row] = bitmap - bph.touchMap[row] = 0 - } - //fmt.Printf("unfoldBranchNode [%x], afterMap = [%016b], touchMap = [%016b]\n", branchData, bph.afterMap[row], bph.touchMap[row]) - // Loop iterating over the set bits of modMask - for bitset, j := bitmap, 0; bitset != 0; j++ { - bit := bitset & -bitset - nibble := bits.TrailingZeros16(bit) - cell := &bph.grid[row][nibble] - fieldBits := branchData[pos] - pos++ - var err error - if pos, err = cell.fillFromFields(branchData, pos, PartFlags(fieldBits)); err != nil { - return false, fmt.Errorf("prefix [%x], branchData[%x]: %w", bph.currentKey[:bph.currentKeyLen], branchData, err) - } - if bph.trace { - fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], a=[%x], s=[%x], ex=[%x]\n", row, nibble, depth, cell.h[:cell.hl], cell.apk[:cell.apl], cell.spk[:cell.spl], cell.extension[:cell.extLen]) - } - if cell.apl > 0 { - if err := bph.accountFn(cell.apk[:cell.apl], cell); err != nil { - return false, err - } - if bph.trace { - fmt.Printf("GetAccount[%x] return balance=%d, nonce=%d code=%x\n", cell.apk[:cell.apl], &cell.Balance, cell.Nonce, cell.CodeHash[:]) - } - } - if cell.spl > 0 { - if err := bph.storageFn(cell.spk[:cell.spl], cell); err != nil { - return false, err - } - } - if err = cell.deriveHashedKeys(depth, bph.keccak, bph.accountKeyLen); err != nil { - return false, err - } - bitset ^= bit - } - return true, nil -} - -func (bph *BinPatriciaHashed) unfold(hashedKey []byte, unfolding int) error { - if bph.trace { - fmt.Printf("unfold %d: activeRows: %d\n", unfolding, bph.activeRows) - } - var upCell *BinaryCell - var touched, present bool - var col byte - var upDepth, depth int - if bph.activeRows == 0 { - if bph.rootChecked && bph.root.hl == 0 && bph.root.downHashedLen == 0 { - // No unfolding for empty root - return nil - } - upCell = &bph.root - touched = bph.rootTouched - present = bph.rootPresent - if bph.trace { - fmt.Printf("unfold root, touched %t, present %t, column %d\n", touched, present, col) - } - } else { - upDepth = bph.depths[bph.activeRows-1] - col = hashedKey[upDepth-1] - upCell = &bph.grid[bph.activeRows-1][col] - touched = bph.touchMap[bph.activeRows-1]&(uint16(1)<= unfolding { - depth = upDepth + unfolding - nibble := upCell.downHashedKey[unfolding-1] - if touched { - bph.touchMap[row] = uint16(1) << nibble - } - if present { - bph.afterMap[row] = uint16(1) << nibble - } - cell := &bph.grid[row][nibble] - cell.fillFromUpperCell(upCell, depth, unfolding) - if bph.trace { - fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth) - } - if row >= halfKeySize { - cell.apl = 0 - } - if unfolding > 1 { - copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:unfolding-1]) - } - bph.currentKeyLen += unfolding - 1 - } else { - // upCell.downHashedLen < unfolding - depth = upDepth + upCell.downHashedLen - nibble := upCell.downHashedKey[upCell.downHashedLen-1] - if touched { - bph.touchMap[row] = uint16(1) << nibble - } - if present { - bph.afterMap[row] = uint16(1) << nibble - } - cell := &bph.grid[row][nibble] - cell.fillFromUpperCell(upCell, depth, upCell.downHashedLen) - if bph.trace { - fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth) - } - if row >= halfKeySize { - cell.apl = 0 - } - if upCell.downHashedLen > 1 { - copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:upCell.downHashedLen-1]) - } - bph.currentKeyLen += upCell.downHashedLen - 1 - } - bph.depths[bph.activeRows] = depth - bph.activeRows++ - return nil -} - -func (bph *BinPatriciaHashed) needFolding(hashedKey []byte) bool { - return !bytes.HasPrefix(hashedKey, bph.currentKey[:bph.currentKeyLen]) -} - -// The purpose of fold is to reduce hph.currentKey[:hph.currentKeyLen]. It should be invoked -// until that current key becomes a prefix of hashedKey that we will proccess next -// (in other words until the needFolding function returns 0) -func (bph *BinPatriciaHashed) fold() (err error) { - updateKeyLen := bph.currentKeyLen - if bph.activeRows == 0 { - return fmt.Errorf("cannot fold - no active rows") - } - if bph.trace { - fmt.Printf("fold: activeRows: %d, currentKey: [%x], touchMap: %016b, afterMap: %016b\n", bph.activeRows, bph.currentKey[:bph.currentKeyLen], bph.touchMap[bph.activeRows-1], bph.afterMap[bph.activeRows-1]) - } - // Move information to the row above - row := bph.activeRows - 1 - var upBinaryCell *BinaryCell - var col int - var upDepth int - if bph.activeRows == 1 { - if bph.trace { - fmt.Printf("upcell is root\n") - } - upBinaryCell = &bph.root - } else { - upDepth = bph.depths[bph.activeRows-2] - col = int(bph.currentKey[upDepth-1]) - if bph.trace { - fmt.Printf("upcell is (%d x %x), upDepth=%d\n", row-1, col, upDepth) - } - upBinaryCell = &bph.grid[row-1][col] - } - - depth := bph.depths[bph.activeRows-1] - updateKey := binToCompact(bph.currentKey[:updateKeyLen]) - partsCount := bits.OnesCount16(bph.afterMap[row]) - - if bph.trace { - fmt.Printf("touchMap[%d]=%016b, afterMap[%d]=%016b\n", row, bph.touchMap[row], row, bph.afterMap[row]) - } - switch partsCount { - case 0: - // Everything deleted - if bph.touchMap[row] != 0 { - if row == 0 { - // Root is deleted because the tree is empty - bph.rootTouched = true - bph.rootPresent = false - } else if upDepth == halfKeySize { - // Special case - all storage items of an account have been deleted, but it does not automatically delete the account, just makes it empty storage - // Therefore we are not propagating deletion upwards, but turn it into a modification - bph.touchMap[row-1] |= uint16(1) << col - } else { - // Deletion is propagated upwards - bph.touchMap[row-1] |= uint16(1) << col - bph.afterMap[row-1] &^= uint16(1) << col - } - } - upBinaryCell.hl = 0 - upBinaryCell.apl = 0 - upBinaryCell.spl = 0 - upBinaryCell.extLen = 0 - upBinaryCell.downHashedLen = 0 - if bph.branchBefore[row] { - _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) - if err != nil { - return fmt.Errorf("failed to encode leaf node update: %w", err) - } - } - bph.activeRows-- - if upDepth > 0 { - bph.currentKeyLen = upDepth - 1 - } else { - bph.currentKeyLen = 0 - } - case 1: - // Leaf or extension node - if bph.touchMap[row] != 0 { - // any modifications - if row == 0 { - bph.rootTouched = true - } else { - // Modifiction is propagated upwards - bph.touchMap[row-1] |= uint16(1) << col - } - } - nibble := bits.TrailingZeros16(bph.afterMap[row]) - cell := &bph.grid[row][nibble] - upBinaryCell.extLen = 0 - upBinaryCell.fillFromLowerBinaryCell(cell, depth, bph.currentKey[upDepth:bph.currentKeyLen], nibble) - // Delete if it existed - if bph.branchBefore[row] { - _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) - if err != nil { - return fmt.Errorf("failed to encode leaf node update: %w", err) - } - } - bph.activeRows-- - if upDepth > 0 { - bph.currentKeyLen = upDepth - 1 - } else { - bph.currentKeyLen = 0 - } - default: - // Branch node - if bph.touchMap[row] != 0 { - // any modifications - if row == 0 { - bph.rootTouched = true - } else { - // Modifiction is propagated upwards - bph.touchMap[row-1] |= uint16(1) << col - } - } - bitmap := bph.touchMap[row] & bph.afterMap[row] - if !bph.branchBefore[row] { - // There was no branch node before, so we need to touch even the singular child that existed - bph.touchMap[row] |= bph.afterMap[row] - bitmap |= bph.afterMap[row] - } - // Calculate total length of all hashes - totalBranchLen := 17 - partsCount // For every empty cell, one byte - for bitset, j := bph.afterMap[row], 0; bitset != 0; j++ { - bit := bitset & -bitset - nibble := bits.TrailingZeros16(bit) - cell := &bph.grid[row][nibble] - totalBranchLen += bph.computeBinaryCellHashLen(cell, depth) - bitset ^= bit - } - - bph.keccak2.Reset() - pt := rlp.GenerateStructLen(bph.hashAuxBuffer[:], totalBranchLen) - if _, err := bph.keccak2.Write(bph.hashAuxBuffer[:pt]); err != nil { - return err - } - - b := [...]byte{0x80} - cellGetter := func(nibble int, skip bool) (*Cell, error) { - if skip { - if _, err := bph.keccak2.Write(b[:]); err != nil { - return nil, fmt.Errorf("failed to write empty nibble to hash: %w", err) - } - if bph.trace { - fmt.Printf("%x: empty(%d,%x)\n", nibble, row, nibble) - } - return nil, nil - } - cell := &bph.grid[row][nibble] - cellHash, err := bph.computeBinaryCellHash(cell, depth, bph.hashAuxBuffer[:0]) - if err != nil { - return nil, err - } - if bph.trace { - fmt.Printf("%x: computeBinaryCellHash(%d,%x,depth=%d)=[%x]\n", nibble, row, nibble, depth, cellHash) - } - if _, err := bph.keccak2.Write(cellHash); err != nil { - return nil, err - } - - // TODO extension and downHashedKey should be encoded to hex format and vice versa, data loss due to array sizes - return cell.unwrapToHexCell(), nil - } - - var lastNibble int - var err error - _ = cellGetter - - lastNibble, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, bitmap, bph.touchMap[row], bph.afterMap[row], cellGetter) - if err != nil { - return fmt.Errorf("failed to encode branch update: %w", err) - } - for i := lastNibble; i <= maxChild; i++ { - if _, err := bph.keccak2.Write(b[:]); err != nil { - return err - } - if bph.trace { - fmt.Printf("%x: empty(%d,%x)\n", i, row, i) - } - } - upBinaryCell.extLen = depth - upDepth - 1 - upBinaryCell.downHashedLen = upBinaryCell.extLen - if upBinaryCell.extLen > 0 { - copy(upBinaryCell.extension[:], bph.currentKey[upDepth:bph.currentKeyLen]) - copy(upBinaryCell.downHashedKey[:], bph.currentKey[upDepth:bph.currentKeyLen]) - } - if depth < halfKeySize { - upBinaryCell.apl = 0 - } - upBinaryCell.spl = 0 - upBinaryCell.hl = 32 - if _, err := bph.keccak2.Read(upBinaryCell.h[:]); err != nil { - return err - } - if bph.trace { - fmt.Printf("} [%x]\n", upBinaryCell.h[:]) - } - bph.activeRows-- - if upDepth > 0 { - bph.currentKeyLen = upDepth - 1 - } else { - bph.currentKeyLen = 0 - } - } - return nil -} - -func (bph *BinPatriciaHashed) deleteBinaryCell(hashedKey []byte) { - if bph.trace { - fmt.Printf("deleteBinaryCell, activeRows = %d\n", bph.activeRows) - } - var cell *BinaryCell - if bph.activeRows == 0 { - // Remove the root - cell = &bph.root - bph.rootTouched = true - bph.rootPresent = false - } else { - row := bph.activeRows - 1 - if bph.depths[row] < len(hashedKey) { - if bph.trace { - fmt.Printf("deleteBinaryCell skipping spurious delete depth=%d, len(hashedKey)=%d\n", bph.depths[row], len(hashedKey)) - } - return - } - col := int(hashedKey[bph.currentKeyLen]) - cell = &bph.grid[row][col] - if bph.afterMap[row]&(uint16(1)< 0; unfolding = bph.needUnfolding(hashedKey) { - if err := bph.unfold(hashedKey, unfolding); err != nil { - return nil, fmt.Errorf("unfold: %w", err) - } - } - - // Update the cell - stagedBinaryCell.fillEmpty() - if len(plainKey) == bph.accountKeyLen { - if err := bph.accountFn(plainKey, stagedBinaryCell); err != nil { - return nil, fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) - } - if !stagedBinaryCell.Delete { - cell := bph.updateBinaryCell(plainKey, hashedKey) - cell.setAccountFields(stagedBinaryCell.CodeHash[:], &stagedBinaryCell.Balance, stagedBinaryCell.Nonce) - - if bph.trace { - fmt.Printf("GetAccount reading key %x => balance=%d nonce=%v codeHash=%x\n", cell.apk, &cell.Balance, cell.Nonce, cell.CodeHash) - } - } - } else { - if err = bph.storageFn(plainKey, stagedBinaryCell); err != nil { - return nil, fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) - } - if !stagedBinaryCell.Delete { - bph.updateBinaryCell(plainKey, hashedKey).setStorage(stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen]) - if bph.trace { - fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen]) - } - } - } - - if stagedBinaryCell.Delete { - if bph.trace { - fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) - } - bph.deleteBinaryCell(hashedKey) - } - } - // Folding everything up to the root - for bph.activeRows > 0 { - if err := bph.fold(); err != nil { - return nil, fmt.Errorf("final fold: %w", err) - } - } - - rootHash, err = bph.RootHash() - if err != nil { - return nil, fmt.Errorf("root hash evaluation failed: %w", err) - } - err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) - if err != nil { - return nil, fmt.Errorf("branch update failed: %w", err) - } - return rootHash, nil -} - -func (bph *BinPatriciaHashed) SetTrace(trace bool) { bph.trace = trace } - -func (bph *BinPatriciaHashed) Variant() TrieVariant { return VariantBinPatriciaTrie } - -// Reset allows BinPatriciaHashed instance to be reused for the new commitment calculation -func (bph *BinPatriciaHashed) Reset() { - bph.rootChecked = false - bph.root.hl = 0 - bph.root.downHashedLen = 0 - bph.root.apl = 0 - bph.root.spl = 0 - bph.root.extLen = 0 - copy(bph.root.CodeHash[:], EmptyCodeHash) - bph.root.StorageLen = 0 - bph.root.Balance.Clear() - bph.root.Nonce = 0 - bph.rootTouched = false - bph.rootPresent = true -} - -func (c *BinaryCell) bytes() []byte { - var pos = 1 - size := 1 + c.hl + 1 + c.apl + c.spl + 1 + c.downHashedLen + 1 + c.extLen + 1 // max size - buf := make([]byte, size) - - var flags uint8 - if c.hl != 0 { - flags |= 1 - buf[pos] = byte(c.hl) - pos++ - copy(buf[pos:pos+c.hl], c.h[:]) - pos += c.hl - } - if c.apl != 0 { - flags |= 2 - buf[pos] = byte(c.hl) - pos++ - copy(buf[pos:pos+c.apl], c.apk[:]) - pos += c.apl - } - if c.spl != 0 { - flags |= 4 - buf[pos] = byte(c.spl) - pos++ - copy(buf[pos:pos+c.spl], c.spk[:]) - pos += c.spl - } - if c.downHashedLen != 0 { - flags |= 8 - buf[pos] = byte(c.downHashedLen) - pos++ - copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:]) - pos += c.downHashedLen - } - if c.extLen != 0 { - flags |= 16 - buf[pos] = byte(c.extLen) - pos++ - copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:]) - //pos += c.downHashedLen - } - buf[0] = flags - return buf -} - -func (c *BinaryCell) decodeBytes(buf []byte) error { - if len(buf) < 1 { - return fmt.Errorf("invalid buffer size to contain BinaryCell (at least 1 byte expected)") - } - c.fillEmpty() - - var pos int - flags := buf[pos] - pos++ - - if flags&1 != 0 { - c.hl = int(buf[pos]) - pos++ - copy(c.h[:], buf[pos:pos+c.hl]) - pos += c.hl - } - if flags&2 != 0 { - c.apl = int(buf[pos]) - pos++ - copy(c.apk[:], buf[pos:pos+c.apl]) - pos += c.apl - } - if flags&4 != 0 { - c.spl = int(buf[pos]) - pos++ - copy(c.spk[:], buf[pos:pos+c.spl]) - pos += c.spl - } - if flags&8 != 0 { - c.downHashedLen = int(buf[pos]) - pos++ - copy(c.downHashedKey[:], buf[pos:pos+c.downHashedLen]) - pos += c.downHashedLen - } - if flags&16 != 0 { - c.extLen = int(buf[pos]) - pos++ - copy(c.extension[:], buf[pos:pos+c.extLen]) - //pos += c.extLen - } - return nil -} - -// Encode current state of hph into bytes -func (bph *BinPatriciaHashed) EncodeCurrentState(buf []byte) ([]byte, error) { - s := binState{ - CurrentKeyLen: int16(bph.currentKeyLen), - RootChecked: bph.rootChecked, - RootTouched: bph.rootTouched, - RootPresent: bph.rootPresent, - Root: make([]byte, 0), - } - - s.Root = bph.root.bytes() - copy(s.CurrentKey[:], bph.currentKey[:]) - copy(s.Depths[:], bph.depths[:]) - copy(s.BranchBefore[:], bph.branchBefore[:]) - copy(s.TouchMap[:], bph.touchMap[:]) - copy(s.AfterMap[:], bph.afterMap[:]) - - return s.Encode(buf) -} - -// buf expected to be encoded hph state. Decode state and set up hph to that state. -func (bph *BinPatriciaHashed) SetState(buf []byte) error { - if bph.activeRows != 0 { - return fmt.Errorf("has active rows, could not reset state") - } - - var s state - if err := s.Decode(buf); err != nil { - return err - } - - bph.Reset() - - if err := bph.root.decodeBytes(s.Root); err != nil { - return err - } - - bph.rootChecked = s.RootChecked - bph.rootTouched = s.RootTouched - bph.rootPresent = s.RootPresent - - copy(bph.depths[:], s.Depths[:]) - copy(bph.branchBefore[:], s.BranchBefore[:]) - copy(bph.touchMap[:], s.TouchMap[:]) - copy(bph.afterMap[:], s.AfterMap[:]) - - return nil -} - -func (bph *BinPatriciaHashed) ProcessTree(ctx context.Context, t *UpdateTree, lp string) (rootHash []byte, err error) { - panic("not implemented") -} - -func (bph *BinPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][]byte, updates []Update) (rootHash []byte, err error) { - for i, pk := range plainKeys { - updates[i].hashedKey = hexToBin(pk) - updates[i].plainKey = pk - } - - sort.Slice(updates, func(i, j int) bool { - return bytes.Compare(updates[i].hashedKey, updates[j].hashedKey) < 0 - }) - - for i, plainKey := range plainKeys { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - update := updates[i] - if bph.trace { - fmt.Printf("plainKey=[%x], hashedKey=[%x], currentKey=[%x]\n", update.plainKey, update.hashedKey, bph.currentKey[:bph.currentKeyLen]) - } - // Keep folding until the currentKey is the prefix of the key we modify - for bph.needFolding(update.hashedKey) { - if err := bph.fold(); err != nil { - return nil, fmt.Errorf("fold: %w", err) - } - } - // Now unfold until we step on an empty cell - for unfolding := bph.needUnfolding(update.hashedKey); unfolding > 0; unfolding = bph.needUnfolding(update.hashedKey) { - if err := bph.unfold(update.hashedKey, unfolding); err != nil { - return nil, fmt.Errorf("unfold: %w", err) - } - } - - // Update the cell - if update.Flags == DeleteUpdate { - bph.deleteBinaryCell(update.hashedKey) - if bph.trace { - fmt.Printf("key %x deleted\n", update.plainKey) - } - } else { - cell := bph.updateBinaryCell(update.plainKey, update.hashedKey) - if bph.trace { - fmt.Printf("GetAccount updated key %x =>", plainKey) - } - if update.Flags&BalanceUpdate != 0 { - if bph.trace { - fmt.Printf(" balance=%d", &update.Balance) - } - cell.Balance.Set(&update.Balance) - } - if update.Flags&NonceUpdate != 0 { - if bph.trace { - fmt.Printf(" nonce=%d", update.Nonce) - } - cell.Nonce = update.Nonce - } - if update.Flags&CodeUpdate != 0 { - if bph.trace { - fmt.Printf(" codeHash=%x", update.CodeHashOrStorage) - } - copy(cell.CodeHash[:], update.CodeHashOrStorage[:]) - } - if bph.trace { - fmt.Printf("\n") - } - if update.Flags&StorageUpdate != 0 { - cell.setStorage(update.CodeHashOrStorage[:update.ValLength]) - if bph.trace { - fmt.Printf("GetStorage filled key %x => %x\n", plainKey, update.CodeHashOrStorage[:update.ValLength]) - } - } - } - } - // Folding everything up to the root - for bph.activeRows > 0 { - if err := bph.fold(); err != nil { - return nil, fmt.Errorf("final fold: %w", err) - } - } - - rootHash, err = bph.RootHash() - if err != nil { - return nil, fmt.Errorf("root hash evaluation failed: %w", err) - } - - err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) - if err != nil { - return nil, fmt.Errorf("branch update failed: %w", err) - } - - return rootHash, nil -} - -// Hashes provided key and expands resulting hash into nibbles (each byte split into two nibbles by 4 bits) -func (bph *BinPatriciaHashed) hashAndNibblizeKey2(key []byte) []byte { //nolint - hashedKey := make([]byte, length.Hash) - - bph.keccak.Reset() - bph.keccak.Write(key[:length.Addr]) - bph.keccak.Read(hashedKey[:length.Hash]) - - if len(key[length.Addr:]) > 0 { - hashedKey = append(hashedKey, make([]byte, length.Hash)...) - bph.keccak.Reset() - bph.keccak.Write(key[length.Addr:]) - bph.keccak.Read(hashedKey[length.Hash:]) - } - - nibblized := make([]byte, len(hashedKey)*2) - for i, b := range hashedKey { - nibblized[i*2] = (b >> 4) & 0xf - nibblized[i*2+1] = b & 0xf - } - return nibblized -} - -func binHashKey(keccak keccakState, plainKey []byte, dest []byte, hashedKeyOffset int) error { - keccak.Reset() - var hashBufBack [length.Hash]byte - hashBuf := hashBufBack[:] - if _, err := keccak.Write(plainKey); err != nil { - return err - } - if _, err := keccak.Read(hashBuf); err != nil { - return err - } - for k := hashedKeyOffset; k < 256; k++ { - if hashBuf[k/8]&(1<<(7-k%8)) == 0 { - dest[k-hashedKeyOffset] = 0 - } else { - dest[k-hashedKeyOffset] = 1 - } - } - return nil -} - -func wrapAccountStorageFn(fn func([]byte, *Cell) error) func(pk []byte, bc *BinaryCell) error { - return func(pk []byte, bc *BinaryCell) error { - cl := bc.unwrapToHexCell() - - if err := fn(pk, cl); err != nil { - return err - } - - bc.Balance = *cl.Balance.Clone() - bc.Nonce = cl.Nonce - bc.StorageLen = cl.StorageLen - bc.apl = cl.accountPlainKeyLen - bc.spl = cl.storagePlainKeyLen - bc.hl = cl.HashLen - copy(bc.apk[:], cl.accountPlainKey[:]) - copy(bc.spk[:], cl.storagePlainKey[:]) - copy(bc.h[:], cl.hash[:]) - - if cl.extLen > 0 { - binExt := compactToBin(cl.extension[:cl.extLen]) - copy(bc.extension[:], binExt) - bc.extLen = len(binExt) - } - if cl.downHashedLen > 0 { - bindhk := compactToBin(cl.downHashedKey[:cl.downHashedLen]) - copy(bc.downHashedKey[:], bindhk) - bc.downHashedLen = len(bindhk) - } - - copy(bc.CodeHash[:], cl.CodeHash[:]) - copy(bc.Storage[:], cl.Storage[:]) - bc.Delete = cl.Delete - return nil - } -} - -// represents state of the tree -type binState struct { - TouchMap [maxKeySize]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted - AfterMap [maxKeySize]uint16 // For each row, bitmap of cells that were present after modification - CurrentKeyLen int16 - Root []byte // encoded root cell - RootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked - RootTouched bool - RootPresent bool - BranchBefore [maxKeySize]bool // For each row, whether there was a branch node in the database loaded in unfold - CurrentKey [maxKeySize]byte // For each row indicates which column is currently selected - Depths [maxKeySize]int // For each row, the depth of cells in that row -} - -func (s *binState) Encode(buf []byte) ([]byte, error) { - var rootFlags stateRootFlag - if s.RootPresent { - rootFlags |= stateRootPresent - } - if s.RootChecked { - rootFlags |= stateRootChecked - } - if s.RootTouched { - rootFlags |= stateRootTouched - } - - ee := bytes.NewBuffer(buf) - if err := binary.Write(ee, binary.BigEndian, s.CurrentKeyLen); err != nil { - return nil, fmt.Errorf("encode currentKeyLen: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, int8(rootFlags)); err != nil { - return nil, fmt.Errorf("encode rootFlags: %w", err) - } - if n, err := ee.Write(s.CurrentKey[:]); err != nil || n != len(s.CurrentKey) { - return nil, fmt.Errorf("encode currentKey: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, uint16(len(s.Root))); err != nil { - return nil, fmt.Errorf("encode root len: %w", err) - } - if n, err := ee.Write(s.Root); err != nil || n != len(s.Root) { - return nil, fmt.Errorf("encode root: %w", err) - } - d := make([]byte, len(s.Depths)) - for i := 0; i < len(s.Depths); i++ { - d[i] = byte(s.Depths[i]) - } - if n, err := ee.Write(d); err != nil || n != len(s.Depths) { - return nil, fmt.Errorf("encode depths: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, s.TouchMap); err != nil { - return nil, fmt.Errorf("encode touchMap: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, s.AfterMap); err != nil { - return nil, fmt.Errorf("encode afterMap: %w", err) - } - - var before1, before2 uint64 - for i := 0; i < halfKeySize; i++ { - if s.BranchBefore[i] { - before1 |= 1 << i - } - } - for i, j := halfKeySize, 0; i < maxKeySize; i, j = i+1, j+1 { - if s.BranchBefore[i] { - before2 |= 1 << j - } - } - if err := binary.Write(ee, binary.BigEndian, before1); err != nil { - return nil, fmt.Errorf("encode branchBefore_1: %w", err) - } - if err := binary.Write(ee, binary.BigEndian, before2); err != nil { - return nil, fmt.Errorf("encode branchBefore_2: %w", err) - } - return ee.Bytes(), nil -} - -func (s *binState) Decode(buf []byte) error { - aux := bytes.NewBuffer(buf) - if err := binary.Read(aux, binary.BigEndian, &s.CurrentKeyLen); err != nil { - return fmt.Errorf("currentKeyLen: %w", err) - } - var rootFlags stateRootFlag - if err := binary.Read(aux, binary.BigEndian, &rootFlags); err != nil { - return fmt.Errorf("rootFlags: %w", err) - } - - if rootFlags&stateRootPresent != 0 { - s.RootPresent = true - } - if rootFlags&stateRootTouched != 0 { - s.RootTouched = true - } - if rootFlags&stateRootChecked != 0 { - s.RootChecked = true - } - if n, err := aux.Read(s.CurrentKey[:]); err != nil || n != maxKeySize { - return fmt.Errorf("currentKey: %w", err) - } - var rootSize uint16 - if err := binary.Read(aux, binary.BigEndian, &rootSize); err != nil { - return fmt.Errorf("root size: %w", err) - } - s.Root = make([]byte, rootSize) - if _, err := aux.Read(s.Root); err != nil { - return fmt.Errorf("root: %w", err) - } - d := make([]byte, len(s.Depths)) - if err := binary.Read(aux, binary.BigEndian, &d); err != nil { - return fmt.Errorf("depths: %w", err) - } - for i := 0; i < len(s.Depths); i++ { - s.Depths[i] = int(d[i]) - } - if err := binary.Read(aux, binary.BigEndian, &s.TouchMap); err != nil { - return fmt.Errorf("touchMap: %w", err) - } - if err := binary.Read(aux, binary.BigEndian, &s.AfterMap); err != nil { - return fmt.Errorf("afterMap: %w", err) - } - var branch1, branch2 uint64 - if err := binary.Read(aux, binary.BigEndian, &branch1); err != nil { - return fmt.Errorf("branchBefore1: %w", err) - } - if err := binary.Read(aux, binary.BigEndian, &branch2); err != nil { - return fmt.Errorf("branchBefore2: %w", err) - } - - // TODO invalid branch encode - for i := 0; i < halfKeySize; i++ { - if branch1&(1< 0 { +// compactedExt := binToCompact(cell.extension[:cell.extLen]) +// copy(cl.extension[:], compactedExt) +// cl.extLen = len(compactedExt) +// } +// if cell.downHashedLen > 0 { +// compactedDHK := binToCompact(cell.downHashedKey[:cell.downHashedLen]) +// copy(cl.downHashedKey[:], compactedDHK) +// cl.downHashedLen = len(compactedDHK) +// } +// +// copy(cl.CodeHash[:], cell.CodeHash[:]) +// copy(cl.Storage[:], cell.Storage[:]) +// cl.Delete = cell.Delete +// return cl +//} +// +//var ( // TODO REEAVL +// EmptyBinRootHash, _ = hex.DecodeString("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") +// EmptyBinCodeHash, _ = hex.DecodeString("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") +//) +// +//func (cell *BinaryCell) fillEmpty() { +// cell.apl = 0 +// cell.spl = 0 +// cell.downHashedLen = 0 +// cell.extLen = 0 +// cell.hl = 0 +// cell.Nonce = 0 +// cell.Balance.Clear() +// copy(cell.CodeHash[:], EmptyCodeHash) +// cell.StorageLen = 0 +// cell.Delete = false +//} +// +//func (cell *BinaryCell) fillFromUpperCell(upBinaryCell *BinaryCell, depth, depthIncrement int) { +// if upBinaryCell.downHashedLen >= depthIncrement { +// cell.downHashedLen = upBinaryCell.downHashedLen - depthIncrement +// } else { +// cell.downHashedLen = 0 +// } +// if upBinaryCell.downHashedLen > depthIncrement { +// copy(cell.downHashedKey[:], upBinaryCell.downHashedKey[depthIncrement:upBinaryCell.downHashedLen]) +// } +// if upBinaryCell.extLen >= depthIncrement { +// cell.extLen = upBinaryCell.extLen - depthIncrement +// } else { +// cell.extLen = 0 +// } +// if upBinaryCell.extLen > depthIncrement { +// copy(cell.extension[:], upBinaryCell.extension[depthIncrement:upBinaryCell.extLen]) +// } +// if depth <= halfKeySize { +// cell.apl = upBinaryCell.apl +// if upBinaryCell.apl > 0 { +// copy(cell.apk[:], upBinaryCell.apk[:cell.apl]) +// cell.Balance.Set(&upBinaryCell.Balance) +// cell.Nonce = upBinaryCell.Nonce +// copy(cell.CodeHash[:], upBinaryCell.CodeHash[:]) +// cell.extLen = upBinaryCell.extLen +// if upBinaryCell.extLen > 0 { +// copy(cell.extension[:], upBinaryCell.extension[:upBinaryCell.extLen]) +// } +// } +// } else { +// cell.apl = 0 +// } +// cell.spl = upBinaryCell.spl +// if upBinaryCell.spl > 0 { +// copy(cell.spk[:], upBinaryCell.spk[:upBinaryCell.spl]) +// cell.StorageLen = upBinaryCell.StorageLen +// if upBinaryCell.StorageLen > 0 { +// copy(cell.Storage[:], upBinaryCell.Storage[:upBinaryCell.StorageLen]) +// } +// } +// cell.hl = upBinaryCell.hl +// if upBinaryCell.hl > 0 { +// copy(cell.h[:], upBinaryCell.h[:upBinaryCell.hl]) +// } +//} +// +//func (cell *BinaryCell) fillFromLowerBinaryCell(lowBinaryCell *BinaryCell, lowDepth int, preExtension []byte, nibble int) { +// if lowBinaryCell.apl > 0 || lowDepth < halfKeySize { +// cell.apl = lowBinaryCell.apl +// } +// if lowBinaryCell.apl > 0 { +// copy(cell.apk[:], lowBinaryCell.apk[:cell.apl]) +// cell.Balance.Set(&lowBinaryCell.Balance) +// cell.Nonce = lowBinaryCell.Nonce +// copy(cell.CodeHash[:], lowBinaryCell.CodeHash[:]) +// } +// cell.spl = lowBinaryCell.spl +// if lowBinaryCell.spl > 0 { +// copy(cell.spk[:], lowBinaryCell.spk[:cell.spl]) +// cell.StorageLen = lowBinaryCell.StorageLen +// if lowBinaryCell.StorageLen > 0 { +// copy(cell.Storage[:], lowBinaryCell.Storage[:lowBinaryCell.StorageLen]) +// } +// } +// if lowBinaryCell.hl > 0 { +// if (lowBinaryCell.apl == 0 && lowDepth < halfKeySize) || (lowBinaryCell.spl == 0 && lowDepth > halfKeySize) { +// // Extension is related to either accounts branch node, or storage branch node, we prepend it by preExtension | nibble +// if len(preExtension) > 0 { +// copy(cell.extension[:], preExtension) +// } +// cell.extension[len(preExtension)] = byte(nibble) +// if lowBinaryCell.extLen > 0 { +// copy(cell.extension[1+len(preExtension):], lowBinaryCell.extension[:lowBinaryCell.extLen]) +// } +// cell.extLen = lowBinaryCell.extLen + 1 + len(preExtension) +// } else { +// // Extension is related to a storage branch node, so we copy it upwards as is +// cell.extLen = lowBinaryCell.extLen +// if lowBinaryCell.extLen > 0 { +// copy(cell.extension[:], lowBinaryCell.extension[:lowBinaryCell.extLen]) +// } +// } +// } +// cell.hl = lowBinaryCell.hl +// if lowBinaryCell.hl > 0 { +// copy(cell.h[:], lowBinaryCell.h[:lowBinaryCell.hl]) +// } +//} +// +//func (cell *BinaryCell) deriveHashedKeys(depth int, keccak keccakState, accountKeyLen int) error { +// extraLen := 0 +// if cell.apl > 0 { +// if depth > halfKeySize { +// return errors.New("deriveHashedKeys accountPlainKey present at depth > halfKeySize") +// } +// extraLen = halfKeySize - depth +// } +// if cell.spl > 0 { +// if depth >= halfKeySize { +// extraLen = maxKeySize - depth +// } else { +// extraLen += halfKeySize +// } +// } +// if extraLen > 0 { +// if cell.downHashedLen > 0 { +// copy(cell.downHashedKey[extraLen:], cell.downHashedKey[:cell.downHashedLen]) +// } +// cell.downHashedLen += extraLen +// var hashedKeyOffset, downOffset int +// if cell.apl > 0 { +// if err := binHashKey(keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil { +// return err +// } +// downOffset = halfKeySize - depth +// } +// if cell.spl > 0 { +// if depth >= halfKeySize { +// hashedKeyOffset = depth - halfKeySize +// } +// if err := binHashKey(keccak, cell.spk[accountKeyLen:cell.spl], cell.downHashedKey[downOffset:], hashedKeyOffset); err != nil { +// return err +// } +// } +// } +// return nil +//} +// +//func (cell *BinaryCell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int, error) { +// if fieldBits&HashedKeyPart != 0 { +// l, n := binary.Uvarint(data[pos:]) +// if n == 0 { +// return 0, errors.New("fillFromFields buffer too small for hashedKey len") +// } else if n < 0 { +// return 0, errors.New("fillFromFields value overflow for hashedKey len") +// } +// pos += n +// if len(data) < pos+int(l) { +// return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey exp %d got %d", pos+int(l), len(data)) +// } +// cell.downHashedLen = int(l) +// cell.extLen = int(l) +// if l > 0 { +// copy(cell.downHashedKey[:], data[pos:pos+int(l)]) +// copy(cell.extension[:], data[pos:pos+int(l)]) +// pos += int(l) +// } +// } else { +// cell.downHashedLen = 0 +// cell.extLen = 0 +// } +// if fieldBits&AccountPlainPart != 0 { +// l, n := binary.Uvarint(data[pos:]) +// if n == 0 { +// return 0, errors.New("fillFromFields buffer too small for accountPlainKey len") +// } else if n < 0 { +// return 0, errors.New("fillFromFields value overflow for accountPlainKey len") +// } +// pos += n +// if len(data) < pos+int(l) { +// return 0, errors.New("fillFromFields buffer too small for accountPlainKey") +// } +// cell.apl = int(l) +// if l > 0 { +// copy(cell.apk[:], data[pos:pos+int(l)]) +// pos += int(l) +// } +// } else { +// cell.apl = 0 +// } +// if fieldBits&StoragePlainPart != 0 { +// l, n := binary.Uvarint(data[pos:]) +// if n == 0 { +// return 0, errors.New("fillFromFields buffer too small for storagePlainKey len") +// } else if n < 0 { +// return 0, errors.New("fillFromFields value overflow for storagePlainKey len") +// } +// pos += n +// if len(data) < pos+int(l) { +// return 0, errors.New("fillFromFields buffer too small for storagePlainKey") +// } +// cell.spl = int(l) +// if l > 0 { +// copy(cell.spk[:], data[pos:pos+int(l)]) +// pos += int(l) +// } +// } else { +// cell.spl = 0 +// } +// if fieldBits&HashPart != 0 { +// l, n := binary.Uvarint(data[pos:]) +// if n == 0 { +// return 0, errors.New("fillFromFields buffer too small for hash len") +// } else if n < 0 { +// return 0, errors.New("fillFromFields value overflow for hash len") +// } +// pos += n +// if len(data) < pos+int(l) { +// return 0, errors.New("fillFromFields buffer too small for hash") +// } +// cell.hl = int(l) +// if l > 0 { +// copy(cell.h[:], data[pos:pos+int(l)]) +// pos += int(l) +// } +// } else { +// cell.hl = 0 +// } +// return pos, nil +//} +// +//func (cell *BinaryCell) setStorage(value []byte) { +// cell.StorageLen = len(value) +// if len(value) > 0 { +// copy(cell.Storage[:], value) +// } +//} +// +//func (cell *BinaryCell) setAccountFields(codeHash []byte, balance *uint256.Int, nonce uint64) { +// copy(cell.CodeHash[:], codeHash) +// +// cell.Balance.SetBytes(balance.Bytes()) +// cell.Nonce = nonce +//} +// +//func (cell *BinaryCell) accountForHashing(buffer []byte, storageRootHash [length.Hash]byte) int { +// balanceBytes := 0 +// if !cell.Balance.LtUint64(128) { +// balanceBytes = cell.Balance.ByteLen() +// } +// +// var nonceBytes int +// if cell.Nonce < 128 && cell.Nonce != 0 { +// nonceBytes = 0 +// } else { +// nonceBytes = common.BitLenToByteLen(bits.Len64(cell.Nonce)) +// } +// +// var structLength = uint(balanceBytes + nonceBytes + 2) +// structLength += 66 // Two 32-byte arrays + 2 prefixes +// +// var pos int +// if structLength < 56 { +// buffer[0] = byte(192 + structLength) +// pos = 1 +// } else { +// lengthBytes := common.BitLenToByteLen(bits.Len(structLength)) +// buffer[0] = byte(247 + lengthBytes) +// +// for i := lengthBytes; i > 0; i-- { +// buffer[i] = byte(structLength) +// structLength >>= 8 +// } +// +// pos = lengthBytes + 1 +// } +// +// // Encoding nonce +// if cell.Nonce < 128 && cell.Nonce != 0 { +// buffer[pos] = byte(cell.Nonce) +// } else { +// buffer[pos] = byte(128 + nonceBytes) +// var nonce = cell.Nonce +// for i := nonceBytes; i > 0; i-- { +// buffer[pos+i] = byte(nonce) +// nonce >>= 8 +// } +// } +// pos += 1 + nonceBytes +// +// // Encoding balance +// if cell.Balance.LtUint64(128) && !cell.Balance.IsZero() { +// buffer[pos] = byte(cell.Balance.Uint64()) +// pos++ +// } else { +// buffer[pos] = byte(128 + balanceBytes) +// pos++ +// cell.Balance.WriteToSlice(buffer[pos : pos+balanceBytes]) +// pos += balanceBytes +// } +// +// // Encoding Root and CodeHash +// buffer[pos] = 128 + 32 +// pos++ +// copy(buffer[pos:], storageRootHash[:]) +// pos += 32 +// buffer[pos] = 128 + 32 +// pos++ +// copy(buffer[pos:], cell.CodeHash[:]) +// pos += 32 +// return pos +//} +// +//func (bph *BinPatriciaHashed) ResetContext(ctx PatriciaContext) {} +// +//func (bph *BinPatriciaHashed) completeLeafHash(buf, keyPrefix []byte, kp, kl, compactLen int, key []byte, compact0 byte, ni int, val rlp.RlpSerializable, singleton bool) ([]byte, error) { +// totalLen := kp + kl + val.DoubleRLPLen() +// var lenPrefix [4]byte +// pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) +// embedded := !singleton && totalLen+pt < length.Hash +// var writer io.Writer +// if embedded { +// //bph.byteArrayWriter.Setup(buf) +// bph.auxBuffer.Reset() +// writer = bph.auxBuffer +// } else { +// bph.keccak.Reset() +// writer = bph.keccak +// } +// if _, err := writer.Write(lenPrefix[:pt]); err != nil { +// return nil, err +// } +// if _, err := writer.Write(keyPrefix[:kp]); err != nil { +// return nil, err +// } +// var b [1]byte +// b[0] = compact0 +// if _, err := writer.Write(b[:]); err != nil { +// return nil, err +// } +// for i := 1; i < compactLen; i++ { +// b[0] = key[ni]*16 + key[ni+1] +// if _, err := writer.Write(b[:]); err != nil { +// return nil, err +// } +// ni += 2 +// } +// var prefixBuf [8]byte +// if err := val.ToDoubleRLP(writer, prefixBuf[:]); err != nil { +// return nil, err +// } +// if embedded { +// buf = bph.auxBuffer.Bytes() +// } else { +// var hashBuf [33]byte +// hashBuf[0] = 0x80 + length.Hash +// if _, err := bph.keccak.Read(hashBuf[1:]); err != nil { +// return nil, err +// } +// buf = append(buf, hashBuf[:]...) +// } +// return buf, nil +//} +// +//func (bph *BinPatriciaHashed) leafHashWithKeyVal(buf, key []byte, val rlp.RlpSerializableBytes, singleton bool) ([]byte, error) { +// // Compute the total length of binary representation +// var kp, kl int +// // Write key +// var compactLen int +// var ni int +// var compact0 byte +// compactLen = (len(key)-1)/2 + 1 +// if len(key)&1 == 0 { +// compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble +// ni = 1 +// } else { +// compact0 = 0x20 +// } +// var keyPrefix [1]byte +// if compactLen > 1 { +// keyPrefix[0] = 0x80 + byte(compactLen) +// kp = 1 +// kl = compactLen +// } else { +// kl = 1 +// } +// return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, singleton) +//} +// +//func (bph *BinPatriciaHashed) accountLeafHashWithKey(buf, key []byte, val rlp.RlpSerializable) ([]byte, error) { +// // Compute the total length of binary representation +// var kp, kl int +// // Write key +// var compactLen int +// var ni int +// var compact0 byte +// if hasTerm(key) { +// compactLen = (len(key)-1)/2 + 1 +// if len(key)&1 == 0 { +// compact0 = 48 + key[0] // Odd (1<<4) + first nibble +// ni = 1 +// } else { +// compact0 = 32 +// } +// } else { +// compactLen = len(key)/2 + 1 +// if len(key)&1 == 1 { +// compact0 = 16 + key[0] // Odd (1<<4) + first nibble +// ni = 1 +// } +// } +// var keyPrefix [1]byte +// if compactLen > 1 { +// keyPrefix[0] = byte(128 + compactLen) +// kp = 1 +// kl = compactLen +// } else { +// kl = 1 +// } +// return bph.completeLeafHash(buf, keyPrefix[:], kp, kl, compactLen, key, compact0, ni, val, true) +//} +// +//func (bph *BinPatriciaHashed) extensionHash(key []byte, hash []byte) ([length.Hash]byte, error) { +// var hashBuf [length.Hash]byte +// +// // Compute the total length of binary representation +// var kp, kl int +// // Write key +// var compactLen int +// var ni int +// var compact0 byte +// if hasTerm(key) { +// compactLen = (len(key)-1)/2 + 1 +// if len(key)&1 == 0 { +// compact0 = 0x30 + key[0] // Odd: (3<<4) + first nibble +// ni = 1 +// } else { +// compact0 = 0x20 +// } +// } else { +// compactLen = len(key)/2 + 1 +// if len(key)&1 == 1 { +// compact0 = 0x10 + key[0] // Odd: (1<<4) + first nibble +// ni = 1 +// } +// } +// var keyPrefix [1]byte +// if compactLen > 1 { +// keyPrefix[0] = 0x80 + byte(compactLen) +// kp = 1 +// kl = compactLen +// } else { +// kl = 1 +// } +// totalLen := kp + kl + 33 +// var lenPrefix [4]byte +// pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) +// bph.keccak.Reset() +// if _, err := bph.keccak.Write(lenPrefix[:pt]); err != nil { +// return hashBuf, err +// } +// if _, err := bph.keccak.Write(keyPrefix[:kp]); err != nil { +// return hashBuf, err +// } +// var b [1]byte +// b[0] = compact0 +// if _, err := bph.keccak.Write(b[:]); err != nil { +// return hashBuf, err +// } +// for i := 1; i < compactLen; i++ { +// b[0] = key[ni]*16 + key[ni+1] +// if _, err := bph.keccak.Write(b[:]); err != nil { +// return hashBuf, err +// } +// ni += 2 +// } +// b[0] = 0x80 + length.Hash +// if _, err := bph.keccak.Write(b[:]); err != nil { +// return hashBuf, err +// } +// if _, err := bph.keccak.Write(hash); err != nil { +// return hashBuf, err +// } +// // Replace previous hash with the new one +// if _, err := bph.keccak.Read(hashBuf[:]); err != nil { +// return hashBuf, err +// } +// return hashBuf, nil +//} +// +//func (bph *BinPatriciaHashed) computeBinaryCellHashLen(cell *BinaryCell, depth int) int { +// if cell.spl > 0 && depth >= halfKeySize { +// keyLen := 128 - depth + 1 // Length of hex key with terminator character +// var kp, kl int +// compactLen := (keyLen-1)/2 + 1 +// if compactLen > 1 { +// kp = 1 +// kl = compactLen +// } else { +// kl = 1 +// } +// val := rlp.RlpSerializableBytes(cell.Storage[:cell.StorageLen]) +// totalLen := kp + kl + val.DoubleRLPLen() +// var lenPrefix [4]byte +// pt := rlp.GenerateStructLen(lenPrefix[:], totalLen) +// if totalLen+pt < length.Hash { +// return totalLen + pt +// } +// } +// return length.Hash + 1 +//} +// +//func (bph *BinPatriciaHashed) computeBinaryCellHash(cell *BinaryCell, depth int, buf []byte) ([]byte, error) { +// var err error +// var storageRootHash [length.Hash]byte +// storageRootHashIsSet := false +// if cell.spl > 0 { +// var hashedKeyOffset int +// if depth >= halfKeySize { +// hashedKeyOffset = depth - halfKeySize +// } +// singleton := depth <= halfKeySize +// if err := binHashKey(bph.keccak, cell.spk[bph.accountKeyLen:cell.spl], cell.downHashedKey[:], hashedKeyOffset); err != nil { +// return nil, err +// } +// cell.downHashedKey[halfKeySize-hashedKeyOffset] = 16 // Add terminator +// if singleton { +// if bph.trace { +// fmt.Printf("leafHashWithKeyVal(singleton) for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen]) +// } +// aux := make([]byte, 0, 33) +// if aux, err = bph.leafHashWithKeyVal(aux, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], true); err != nil { +// return nil, err +// } +// storageRootHash = *(*[length.Hash]byte)(aux[1:]) +// storageRootHashIsSet = true +// } else { +// if bph.trace { +// fmt.Printf("leafHashWithKeyVal for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen]) +// } +// return bph.leafHashWithKeyVal(buf, cell.downHashedKey[:halfKeySize-hashedKeyOffset+1], cell.Storage[:cell.StorageLen], false) +// } +// } +// if cell.apl > 0 { +// if err := binHashKey(bph.keccak, cell.apk[:cell.apl], cell.downHashedKey[:], depth); err != nil { +// return nil, err +// } +// cell.downHashedKey[halfKeySize-depth] = 16 // Add terminator +// if !storageRootHashIsSet { +// if cell.extLen > 0 { +// // Extension +// if cell.hl > 0 { +// if bph.trace { +// fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl]) +// } +// if storageRootHash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil { +// return nil, err +// } +// } else { +// return nil, errors.New("computeBinaryCellHash extension without hash") +// } +// } else if cell.hl > 0 { +// storageRootHash = cell.h +// } else { +// storageRootHash = *(*[length.Hash]byte)(EmptyRootHash) +// } +// } +// var valBuf [128]byte +// valLen := cell.accountForHashing(valBuf[:], storageRootHash) +// if bph.trace { +// fmt.Printf("accountLeafHashWithKey for [%x]=>[%x]\n", cell.downHashedKey[:halfKeySize+1-depth], rlp.RlpEncodedBytes(valBuf[:valLen])) +// } +// return bph.accountLeafHashWithKey(buf, cell.downHashedKey[:halfKeySize+1-depth], rlp.RlpEncodedBytes(valBuf[:valLen])) +// } +// buf = append(buf, 0x80+32) +// if cell.extLen > 0 { +// // Extension +// if cell.hl > 0 { +// if bph.trace { +// fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.h[:cell.hl]) +// } +// var hash [length.Hash]byte +// if hash, err = bph.extensionHash(cell.extension[:cell.extLen], cell.h[:cell.hl]); err != nil { +// return nil, err +// } +// buf = append(buf, hash[:]...) +// } else { +// return nil, errors.New("computeBinaryCellHash extension without hash") +// } +// } else if cell.hl > 0 { +// buf = append(buf, cell.h[:cell.hl]...) +// } else { +// buf = append(buf, EmptyRootHash...) +// } +// return buf, nil +//} +// +//func (bph *BinPatriciaHashed) needUnfolding(hashedKey []byte) int { +// var cell *BinaryCell +// var depth int +// if bph.activeRows == 0 { +// if bph.trace { +// fmt.Printf("needUnfolding root, rootChecked = %t\n", bph.rootChecked) +// } +// if bph.rootChecked && bph.root.downHashedLen == 0 && bph.root.hl == 0 { +// // Previously checked, empty root, no unfolding needed +// return 0 +// } +// cell = &bph.root +// if cell.downHashedLen == 0 && cell.hl == 0 && !bph.rootChecked { +// // Need to attempt to unfold the root +// return 1 +// } +// } else { +// col := int(hashedKey[bph.currentKeyLen]) +// cell = &bph.grid[bph.activeRows-1][col] +// depth = bph.depths[bph.activeRows-1] +// if bph.trace { +// fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.h=[%x]\n", bph.activeRows-1, col, bph.currentKey[:bph.currentKeyLen], depth, cell.h[:cell.hl]) +// } +// } +// if len(hashedKey) <= depth { +// return 0 +// } +// if cell.downHashedLen == 0 { +// if cell.hl == 0 { +// // cell is empty, no need to unfold further +// return 0 +// } +// // unfold branch node +// return 1 +// } +// cpl := commonPrefixLen(hashedKey[depth:], cell.downHashedKey[:cell.downHashedLen-1]) +// if bph.trace { +// fmt.Printf("cpl=%d, cell.downHashedKey=[%x], depth=%d, hashedKey[depth:]=[%x]\n", cpl, cell.downHashedKey[:cell.downHashedLen], depth, hashedKey[depth:]) +// } +// unfolding := cpl + 1 +// if depth < halfKeySize && depth+unfolding > halfKeySize { +// // This is to make sure that unfolding always breaks at the level where storage subtrees start +// unfolding = halfKeySize - depth +// if bph.trace { +// fmt.Printf("adjusted unfolding=%d\n", unfolding) +// } +// } +// return unfolding +//} +// +//// unfoldBranchNode returns true if unfolding has been done +//func (bph *BinPatriciaHashed) unfoldBranchNode(row int, deleted bool, depth int) (bool, error) { +// branchData, _, err := bph.ctx.GetBranch(binToCompact(bph.currentKey[:bph.currentKeyLen])) +// if err != nil { +// return false, err +// } +// if len(branchData) >= 2 { +// branchData = branchData[2:] // skip touch map and hold aftermap and rest +// } +// if !bph.rootChecked && bph.currentKeyLen == 0 && len(branchData) == 0 { +// // Special case - empty or deleted root +// bph.rootChecked = true +// return false, nil +// } +// if len(branchData) == 0 { +// log.Warn("got empty branch data during unfold", "row", row, "depth", depth, "deleted", deleted) +// } +// bph.branchBefore[row] = true +// bitmap := binary.BigEndian.Uint16(branchData[0:]) +// pos := 2 +// if deleted { +// // All cells come as deleted (touched but not present after) +// bph.afterMap[row] = 0 +// bph.touchMap[row] = bitmap +// } else { +// bph.afterMap[row] = bitmap +// bph.touchMap[row] = 0 +// } +// //fmt.Printf("unfoldBranchNode [%x], afterMap = [%016b], touchMap = [%016b]\n", branchData, bph.afterMap[row], bph.touchMap[row]) +// // Loop iterating over the set bits of modMask +// for bitset, j := bitmap, 0; bitset != 0; j++ { +// bit := bitset & -bitset +// nibble := bits.TrailingZeros16(bit) +// cell := &bph.grid[row][nibble] +// fieldBits := branchData[pos] +// pos++ +// var err error +// if pos, err = cell.fillFromFields(branchData, pos, PartFlags(fieldBits)); err != nil { +// return false, fmt.Errorf("prefix [%x], branchData[%x]: %w", bph.currentKey[:bph.currentKeyLen], branchData, err) +// } +// if bph.trace { +// fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], a=[%x], s=[%x], ex=[%x]\n", row, nibble, depth, cell.h[:cell.hl], cell.apk[:cell.apl], cell.spk[:cell.spl], cell.extension[:cell.extLen]) +// } +// if cell.apl > 0 { +// if err := bph.accountFn(cell.apk[:cell.apl], cell); err != nil { +// return false, err +// } +// if bph.trace { +// fmt.Printf("GetAccount[%x] return balance=%d, nonce=%d code=%x\n", cell.apk[:cell.apl], &cell.Balance, cell.Nonce, cell.CodeHash[:]) +// } +// } +// if cell.spl > 0 { +// if err := bph.storageFn(cell.spk[:cell.spl], cell); err != nil { +// return false, err +// } +// } +// if err = cell.deriveHashedKeys(depth, bph.keccak, bph.accountKeyLen); err != nil { +// return false, err +// } +// bitset ^= bit +// } +// return true, nil +//} +// +//func (bph *BinPatriciaHashed) unfold(hashedKey []byte, unfolding int) error { +// if bph.trace { +// fmt.Printf("unfold %d: activeRows: %d\n", unfolding, bph.activeRows) +// } +// var upCell *BinaryCell +// var touched, present bool +// var col byte +// var upDepth, depth int +// if bph.activeRows == 0 { +// if bph.rootChecked && bph.root.hl == 0 && bph.root.downHashedLen == 0 { +// // No unfolding for empty root +// return nil +// } +// upCell = &bph.root +// touched = bph.rootTouched +// present = bph.rootPresent +// if bph.trace { +// fmt.Printf("unfold root, touched %t, present %t, column %d\n", touched, present, col) +// } +// } else { +// upDepth = bph.depths[bph.activeRows-1] +// col = hashedKey[upDepth-1] +// upCell = &bph.grid[bph.activeRows-1][col] +// touched = bph.touchMap[bph.activeRows-1]&(uint16(1)<= unfolding { +// depth = upDepth + unfolding +// nibble := upCell.downHashedKey[unfolding-1] +// if touched { +// bph.touchMap[row] = uint16(1) << nibble +// } +// if present { +// bph.afterMap[row] = uint16(1) << nibble +// } +// cell := &bph.grid[row][nibble] +// cell.fillFromUpperCell(upCell, depth, unfolding) +// if bph.trace { +// fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth) +// } +// if row >= halfKeySize { +// cell.apl = 0 +// } +// if unfolding > 1 { +// copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:unfolding-1]) +// } +// bph.currentKeyLen += unfolding - 1 +// } else { +// // upCell.downHashedLen < unfolding +// depth = upDepth + upCell.downHashedLen +// nibble := upCell.downHashedKey[upCell.downHashedLen-1] +// if touched { +// bph.touchMap[row] = uint16(1) << nibble +// } +// if present { +// bph.afterMap[row] = uint16(1) << nibble +// } +// cell := &bph.grid[row][nibble] +// cell.fillFromUpperCell(upCell, depth, upCell.downHashedLen) +// if bph.trace { +// fmt.Printf("cell (%d, %x) depth=%d\n", row, nibble, depth) +// } +// if row >= halfKeySize { +// cell.apl = 0 +// } +// if upCell.downHashedLen > 1 { +// copy(bph.currentKey[bph.currentKeyLen:], upCell.downHashedKey[:upCell.downHashedLen-1]) +// } +// bph.currentKeyLen += upCell.downHashedLen - 1 +// } +// bph.depths[bph.activeRows] = depth +// bph.activeRows++ +// return nil +//} +// +//func (bph *BinPatriciaHashed) needFolding(hashedKey []byte) bool { +// return !bytes.HasPrefix(hashedKey, bph.currentKey[:bph.currentKeyLen]) +//} +// +//// The purpose of fold is to reduce hph.currentKey[:hph.currentKeyLen]. It should be invoked +//// until that current key becomes a prefix of hashedKey that we will proccess next +//// (in other words until the needFolding function returns 0) +//func (bph *BinPatriciaHashed) fold() (err error) { +// updateKeyLen := bph.currentKeyLen +// if bph.activeRows == 0 { +// return errors.New("cannot fold - no active rows") +// } +// if bph.trace { +// fmt.Printf("fold: activeRows: %d, currentKey: [%x], touchMap: %016b, afterMap: %016b\n", bph.activeRows, bph.currentKey[:bph.currentKeyLen], bph.touchMap[bph.activeRows-1], bph.afterMap[bph.activeRows-1]) +// } +// // Move information to the row above +// row := bph.activeRows - 1 +// var upBinaryCell *BinaryCell +// var col int +// var upDepth int +// if bph.activeRows == 1 { +// if bph.trace { +// fmt.Printf("upcell is root\n") +// } +// upBinaryCell = &bph.root +// } else { +// upDepth = bph.depths[bph.activeRows-2] +// col = int(bph.currentKey[upDepth-1]) +// if bph.trace { +// fmt.Printf("upcell is (%d x %x), upDepth=%d\n", row-1, col, upDepth) +// } +// upBinaryCell = &bph.grid[row-1][col] +// } +// +// depth := bph.depths[bph.activeRows-1] +// updateKey := binToCompact(bph.currentKey[:updateKeyLen]) +// partsCount := bits.OnesCount16(bph.afterMap[row]) +// +// if bph.trace { +// fmt.Printf("touchMap[%d]=%016b, afterMap[%d]=%016b\n", row, bph.touchMap[row], row, bph.afterMap[row]) +// } +// switch partsCount { +// case 0: +// // Everything deleted +// if bph.touchMap[row] != 0 { +// if row == 0 { +// // Root is deleted because the tree is empty +// bph.rootTouched = true +// bph.rootPresent = false +// } else if upDepth == halfKeySize { +// // Special case - all storage items of an account have been deleted, but it does not automatically delete the account, just makes it empty storage +// // Therefore we are not propagating deletion upwards, but turn it into a modification +// bph.touchMap[row-1] |= uint16(1) << col +// } else { +// // Deletion is propagated upwards +// bph.touchMap[row-1] |= uint16(1) << col +// bph.afterMap[row-1] &^= uint16(1) << col +// } +// } +// upBinaryCell.hl = 0 +// upBinaryCell.apl = 0 +// upBinaryCell.spl = 0 +// upBinaryCell.extLen = 0 +// upBinaryCell.downHashedLen = 0 +// if bph.branchBefore[row] { +// _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) +// if err != nil { +// return fmt.Errorf("failed to encode leaf node update: %w", err) +// } +// } +// bph.activeRows-- +// if upDepth > 0 { +// bph.currentKeyLen = upDepth - 1 +// } else { +// bph.currentKeyLen = 0 +// } +// case 1: +// // Leaf or extension node +// if bph.touchMap[row] != 0 { +// // any modifications +// if row == 0 { +// bph.rootTouched = true +// } else { +// // Modifiction is propagated upwards +// bph.touchMap[row-1] |= uint16(1) << col +// } +// } +// nibble := bits.TrailingZeros16(bph.afterMap[row]) +// cell := &bph.grid[row][nibble] +// upBinaryCell.extLen = 0 +// upBinaryCell.fillFromLowerBinaryCell(cell, depth, bph.currentKey[upDepth:bph.currentKeyLen], nibble) +// // Delete if it existed +// if bph.branchBefore[row] { +// _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) +// if err != nil { +// return fmt.Errorf("failed to encode leaf node update: %w", err) +// } +// } +// bph.activeRows-- +// if upDepth > 0 { +// bph.currentKeyLen = upDepth - 1 +// } else { +// bph.currentKeyLen = 0 +// } +// default: +// // Branch node +// if bph.touchMap[row] != 0 { +// // any modifications +// if row == 0 { +// bph.rootTouched = true +// } else { +// // Modifiction is propagated upwards +// bph.touchMap[row-1] |= uint16(1) << col +// } +// } +// bitmap := bph.touchMap[row] & bph.afterMap[row] +// if !bph.branchBefore[row] { +// // There was no branch node before, so we need to touch even the singular child that existed +// bph.touchMap[row] |= bph.afterMap[row] +// bitmap |= bph.afterMap[row] +// } +// // Calculate total length of all hashes +// totalBranchLen := 17 - partsCount // For every empty cell, one byte +// for bitset, j := bph.afterMap[row], 0; bitset != 0; j++ { +// bit := bitset & -bitset +// nibble := bits.TrailingZeros16(bit) +// cell := &bph.grid[row][nibble] +// totalBranchLen += bph.computeBinaryCellHashLen(cell, depth) +// bitset ^= bit +// } +// +// bph.keccak2.Reset() +// pt := rlp.GenerateStructLen(bph.hashAuxBuffer[:], totalBranchLen) +// if _, err := bph.keccak2.Write(bph.hashAuxBuffer[:pt]); err != nil { +// return err +// } +// +// b := [...]byte{0x80} +// cellGetter := func(nibble int, skip bool) (*Cell, error) { +// if skip { +// if _, err := bph.keccak2.Write(b[:]); err != nil { +// return nil, fmt.Errorf("failed to write empty nibble to hash: %w", err) +// } +// if bph.trace { +// fmt.Printf("%x: empty(%d,%x)\n", nibble, row, nibble) +// } +// return nil, nil +// } +// cell := &bph.grid[row][nibble] +// cellHash, err := bph.computeBinaryCellHash(cell, depth, bph.hashAuxBuffer[:0]) +// if err != nil { +// return nil, err +// } +// if bph.trace { +// fmt.Printf("%x: computeBinaryCellHash(%d,%x,depth=%d)=[%x]\n", nibble, row, nibble, depth, cellHash) +// } +// if _, err := bph.keccak2.Write(cellHash); err != nil { +// return nil, err +// } +// +// // TODO extension and downHashedKey should be encoded to hex format and vice versa, data loss due to array sizes +// return cell.unwrapToHexCell(), nil +// } +// +// var lastNibble int +// var err error +// _ = cellGetter +// +// lastNibble, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, bitmap, bph.touchMap[row], bph.afterMap[row], cellGetter) +// if err != nil { +// return fmt.Errorf("failed to encode branch update: %w", err) +// } +// for i := lastNibble; i <= maxChild; i++ { +// if _, err := bph.keccak2.Write(b[:]); err != nil { +// return err +// } +// if bph.trace { +// fmt.Printf("%x: empty(%d,%x)\n", i, row, i) +// } +// } +// upBinaryCell.extLen = depth - upDepth - 1 +// upBinaryCell.downHashedLen = upBinaryCell.extLen +// if upBinaryCell.extLen > 0 { +// copy(upBinaryCell.extension[:], bph.currentKey[upDepth:bph.currentKeyLen]) +// copy(upBinaryCell.downHashedKey[:], bph.currentKey[upDepth:bph.currentKeyLen]) +// } +// if depth < halfKeySize { +// upBinaryCell.apl = 0 +// } +// upBinaryCell.spl = 0 +// upBinaryCell.hl = 32 +// if _, err := bph.keccak2.Read(upBinaryCell.h[:]); err != nil { +// return err +// } +// if bph.trace { +// fmt.Printf("} [%x]\n", upBinaryCell.h[:]) +// } +// bph.activeRows-- +// if upDepth > 0 { +// bph.currentKeyLen = upDepth - 1 +// } else { +// bph.currentKeyLen = 0 +// } +// } +// return nil +//} +// +//func (bph *BinPatriciaHashed) deleteBinaryCell(hashedKey []byte) { +// if bph.trace { +// fmt.Printf("deleteBinaryCell, activeRows = %d\n", bph.activeRows) +// } +// var cell *BinaryCell +// if bph.activeRows == 0 { +// // Remove the root +// cell = &bph.root +// bph.rootTouched = true +// bph.rootPresent = false +// } else { +// row := bph.activeRows - 1 +// if bph.depths[row] < len(hashedKey) { +// if bph.trace { +// fmt.Printf("deleteBinaryCell skipping spurious delete depth=%d, len(hashedKey)=%d\n", bph.depths[row], len(hashedKey)) +// } +// return +// } +// col := int(hashedKey[bph.currentKeyLen]) +// cell = &bph.grid[row][col] +// if bph.afterMap[row]&(uint16(1)< 0; unfolding = bph.needUnfolding(hashedKey) { +// if err := bph.unfold(hashedKey, unfolding); err != nil { +// return nil, fmt.Errorf("unfold: %w", err) +// } +// } +// +// // Update the cell +// stagedBinaryCell.fillEmpty() +// if len(plainKey) == bph.accountKeyLen { +// if err := bph.accountFn(plainKey, stagedBinaryCell); err != nil { +// return nil, fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) +// } +// if !stagedBinaryCell.Delete { +// cell := bph.updateBinaryCell(plainKey, hashedKey) +// cell.setAccountFields(stagedBinaryCell.CodeHash[:], &stagedBinaryCell.Balance, stagedBinaryCell.Nonce) +// +// if bph.trace { +// fmt.Printf("GetAccount reading key %x => balance=%d nonce=%v codeHash=%x\n", cell.apk, &cell.Balance, cell.Nonce, cell.CodeHash) +// } +// } +// } else { +// if err = bph.storageFn(plainKey, stagedBinaryCell); err != nil { +// return nil, fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) +// } +// if !stagedBinaryCell.Delete { +// bph.updateBinaryCell(plainKey, hashedKey).setStorage(stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen]) +// if bph.trace { +// fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedBinaryCell.Storage[:stagedBinaryCell.StorageLen]) +// } +// } +// } +// +// if stagedBinaryCell.Delete { +// if bph.trace { +// fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) +// } +// bph.deleteBinaryCell(hashedKey) +// } +// } +// // Folding everything up to the root +// for bph.activeRows > 0 { +// if err := bph.fold(); err != nil { +// return nil, fmt.Errorf("final fold: %w", err) +// } +// } +// +// rootHash, err = bph.RootHash() +// if err != nil { +// return nil, fmt.Errorf("root hash evaluation failed: %w", err) +// } +// err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) +// if err != nil { +// return nil, fmt.Errorf("branch update failed: %w", err) +// } +// return rootHash, nil +//} +// +//func (bph *BinPatriciaHashed) SetTrace(trace bool) { bph.trace = trace } +// +//func (bph *BinPatriciaHashed) Variant() TrieVariant { return VariantBinPatriciaTrie } +// +//// Reset allows BinPatriciaHashed instance to be reused for the new commitment calculation +//func (bph *BinPatriciaHashed) Reset() { +// bph.rootChecked = false +// bph.root.hl = 0 +// bph.root.downHashedLen = 0 +// bph.root.apl = 0 +// bph.root.spl = 0 +// bph.root.extLen = 0 +// copy(bph.root.CodeHash[:], EmptyCodeHash) +// bph.root.StorageLen = 0 +// bph.root.Balance.Clear() +// bph.root.Nonce = 0 +// bph.rootTouched = false +// bph.rootPresent = true +//} +// +//func (c *BinaryCell) bytes() []byte { +// var pos = 1 +// size := 1 + c.hl + 1 + c.apl + c.spl + 1 + c.downHashedLen + 1 + c.extLen + 1 // max size +// buf := make([]byte, size) +// +// var flags uint8 +// if c.hl != 0 { +// flags |= 1 +// buf[pos] = byte(c.hl) +// pos++ +// copy(buf[pos:pos+c.hl], c.h[:]) +// pos += c.hl +// } +// if c.apl != 0 { +// flags |= 2 +// buf[pos] = byte(c.hl) +// pos++ +// copy(buf[pos:pos+c.apl], c.apk[:]) +// pos += c.apl +// } +// if c.spl != 0 { +// flags |= 4 +// buf[pos] = byte(c.spl) +// pos++ +// copy(buf[pos:pos+c.spl], c.spk[:]) +// pos += c.spl +// } +// if c.downHashedLen != 0 { +// flags |= 8 +// buf[pos] = byte(c.downHashedLen) +// pos++ +// copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:]) +// pos += c.downHashedLen +// } +// if c.extLen != 0 { +// flags |= 16 +// buf[pos] = byte(c.extLen) +// pos++ +// copy(buf[pos:pos+c.downHashedLen], c.downHashedKey[:]) +// //pos += c.downHashedLen +// } +// buf[0] = flags +// return buf +//} +// +//func (c *BinaryCell) decodeBytes(buf []byte) error { +// if len(buf) < 1 { +// return errors.New("invalid buffer size to contain BinaryCell (at least 1 byte expected)") +// } +// c.fillEmpty() +// +// var pos int +// flags := buf[pos] +// pos++ +// +// if flags&1 != 0 { +// c.hl = int(buf[pos]) +// pos++ +// copy(c.h[:], buf[pos:pos+c.hl]) +// pos += c.hl +// } +// if flags&2 != 0 { +// c.apl = int(buf[pos]) +// pos++ +// copy(c.apk[:], buf[pos:pos+c.apl]) +// pos += c.apl +// } +// if flags&4 != 0 { +// c.spl = int(buf[pos]) +// pos++ +// copy(c.spk[:], buf[pos:pos+c.spl]) +// pos += c.spl +// } +// if flags&8 != 0 { +// c.downHashedLen = int(buf[pos]) +// pos++ +// copy(c.downHashedKey[:], buf[pos:pos+c.downHashedLen]) +// pos += c.downHashedLen +// } +// if flags&16 != 0 { +// c.extLen = int(buf[pos]) +// pos++ +// copy(c.extension[:], buf[pos:pos+c.extLen]) +// //pos += c.extLen +// } +// return nil +//} +// +//// Encode current state of hph into bytes +//func (bph *BinPatriciaHashed) EncodeCurrentState(buf []byte) ([]byte, error) { +// s := binState{ +// CurrentKeyLen: int16(bph.currentKeyLen), +// RootChecked: bph.rootChecked, +// RootTouched: bph.rootTouched, +// RootPresent: bph.rootPresent, +// Root: make([]byte, 0), +// } +// +// s.Root = bph.root.bytes() +// copy(s.CurrentKey[:], bph.currentKey[:]) +// copy(s.Depths[:], bph.depths[:]) +// copy(s.BranchBefore[:], bph.branchBefore[:]) +// copy(s.TouchMap[:], bph.touchMap[:]) +// copy(s.AfterMap[:], bph.afterMap[:]) +// +// return s.Encode(buf) +//} +// +//// buf expected to be encoded hph state. Decode state and set up hph to that state. +//func (bph *BinPatriciaHashed) SetState(buf []byte) error { +// if bph.activeRows != 0 { +// return errors.New("has active rows, could not reset state") +// } +// +// var s state +// if err := s.Decode(buf); err != nil { +// return err +// } +// +// bph.Reset() +// +// if err := bph.root.decodeBytes(s.Root); err != nil { +// return err +// } +// +// bph.rootChecked = s.RootChecked +// bph.rootTouched = s.RootTouched +// bph.rootPresent = s.RootPresent +// +// copy(bph.depths[:], s.Depths[:]) +// copy(bph.branchBefore[:], s.BranchBefore[:]) +// copy(bph.touchMap[:], s.TouchMap[:]) +// copy(bph.afterMap[:], s.AfterMap[:]) +// +// return nil +//} +// +//func (bph *BinPatriciaHashed) ProcessTree(ctx context.Context, t *UpdateTree, lp string) (rootHash []byte, err error) { +// panic("not implemented") +//} +// +//func (bph *BinPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][]byte, updates []Update) (rootHash []byte, err error) { +// for i, pk := range plainKeys { +// updates[i].hashedKey = hexToBin(pk) +// updates[i].plainKey = pk +// } +// +// sort.Slice(updates, func(i, j int) bool { +// return bytes.Compare(updates[i].hashedKey, updates[j].hashedKey) < 0 +// }) +// +// for i, plainKey := range plainKeys { +// select { +// case <-ctx.Done(): +// return nil, ctx.Err() +// default: +// } +// update := updates[i] +// if bph.trace { +// fmt.Printf("plainKey=[%x], hashedKey=[%x], currentKey=[%x]\n", update.plainKey, update.hashedKey, bph.currentKey[:bph.currentKeyLen]) +// } +// // Keep folding until the currentKey is the prefix of the key we modify +// for bph.needFolding(update.hashedKey) { +// if err := bph.fold(); err != nil { +// return nil, fmt.Errorf("fold: %w", err) +// } +// } +// // Now unfold until we step on an empty cell +// for unfolding := bph.needUnfolding(update.hashedKey); unfolding > 0; unfolding = bph.needUnfolding(update.hashedKey) { +// if err := bph.unfold(update.hashedKey, unfolding); err != nil { +// return nil, fmt.Errorf("unfold: %w", err) +// } +// } +// +// // Update the cell +// if update.Flags == DeleteUpdate { +// bph.deleteBinaryCell(update.hashedKey) +// if bph.trace { +// fmt.Printf("key %x deleted\n", update.plainKey) +// } +// } else { +// cell := bph.updateBinaryCell(update.plainKey, update.hashedKey) +// if bph.trace { +// fmt.Printf("GetAccount updated key %x =>", plainKey) +// } +// if update.Flags&BalanceUpdate != 0 { +// if bph.trace { +// fmt.Printf(" balance=%d", &update.Balance) +// } +// cell.Balance.Set(&update.Balance) +// } +// if update.Flags&NonceUpdate != 0 { +// if bph.trace { +// fmt.Printf(" nonce=%d", update.Nonce) +// } +// cell.Nonce = update.Nonce +// } +// if update.Flags&CodeUpdate != 0 { +// if bph.trace { +// fmt.Printf(" codeHash=%x", update.CodeHash) +// } +// copy(cell.CodeHash[:], update.CodeHash[:]) +// } +// if bph.trace { +// fmt.Printf("\n") +// } +// if update.Flags&StorageUpdate != 0 { +// cell.setStorage(update.CodeHash[:update.StorageLen]) +// if bph.trace { +// fmt.Printf("GetStorage filled key %x => %x\n", plainKey, update.CodeHash[:update.StorageLen]) +// } +// } +// } +// } +// // Folding everything up to the root +// for bph.activeRows > 0 { +// if err := bph.fold(); err != nil { +// return nil, fmt.Errorf("final fold: %w", err) +// } +// } +// +// rootHash, err = bph.RootHash() +// if err != nil { +// return nil, fmt.Errorf("root hash evaluation failed: %w", err) +// } +// +// err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) +// if err != nil { +// return nil, fmt.Errorf("branch update failed: %w", err) +// } +// +// return rootHash, nil +//} +// +//// Hashes provided key and expands resulting hash into nibbles (each byte split into two nibbles by 4 bits) +//func (bph *BinPatriciaHashed) hashAndNibblizeKey2(key []byte) []byte { //nolint +// hashedKey := make([]byte, length.Hash) +// +// bph.keccak.Reset() +// bph.keccak.Write(key[:length.Addr]) +// bph.keccak.Read(hashedKey[:length.Hash]) +// +// if len(key[length.Addr:]) > 0 { +// hashedKey = append(hashedKey, make([]byte, length.Hash)...) +// bph.keccak.Reset() +// bph.keccak.Write(key[length.Addr:]) +// bph.keccak.Read(hashedKey[length.Hash:]) +// } +// +// nibblized := make([]byte, len(hashedKey)*2) +// for i, b := range hashedKey { +// nibblized[i*2] = (b >> 4) & 0xf +// nibblized[i*2+1] = b & 0xf +// } +// return nibblized +//} +// +//func binHashKey(keccak keccakState, plainKey []byte, dest []byte, hashedKeyOffset int) error { +// keccak.Reset() +// var hashBufBack [length.Hash]byte +// hashBuf := hashBufBack[:] +// if _, err := keccak.Write(plainKey); err != nil { +// return err +// } +// if _, err := keccak.Read(hashBuf); err != nil { +// return err +// } +// for k := hashedKeyOffset; k < 256; k++ { +// if hashBuf[k/8]&(1<<(7-k%8)) == 0 { +// dest[k-hashedKeyOffset] = 0 +// } else { +// dest[k-hashedKeyOffset] = 1 +// } +// } +// return nil +//} +// +//func wrapAccountStorageFn(fn func([]byte, *Cell) error) func(pk []byte, bc *BinaryCell) error { +// return func(pk []byte, bc *BinaryCell) error { +// cl := bc.unwrapToHexCell() +// +// if err := fn(pk, cl); err != nil { +// return err +// } +// +// bc.Balance = *cl.Balance.Clone() +// bc.Nonce = cl.Nonce +// bc.StorageLen = cl.StorageLen +// bc.apl = cl.accountPlainKeyLen +// bc.spl = cl.storagePlainKeyLen +// bc.hl = cl.hashLen +// copy(bc.apk[:], cl.accountPlainKey[:]) +// copy(bc.spk[:], cl.storagePlainKey[:]) +// copy(bc.h[:], cl.hash[:]) +// +// if cl.extLen > 0 { +// binExt := compactToBin(cl.extension[:cl.extLen]) +// copy(bc.extension[:], binExt) +// bc.extLen = len(binExt) +// } +// if cl.downHashedLen > 0 { +// bindhk := compactToBin(cl.downHashedKey[:cl.downHashedLen]) +// copy(bc.downHashedKey[:], bindhk) +// bc.downHashedLen = len(bindhk) +// } +// +// copy(bc.CodeHash[:], cl.CodeHash[:]) +// copy(bc.Storage[:], cl.Storage[:]) +// bc.Delete = cl.Delete +// return nil +// } +//} +// +//// represents state of the tree +//type binState struct { +// TouchMap [maxKeySize]uint16 // For each row, bitmap of cells that were either present before modification, or modified or deleted +// AfterMap [maxKeySize]uint16 // For each row, bitmap of cells that were present after modification +// CurrentKeyLen int16 +// Root []byte // encoded root cell +// RootChecked bool // Set to false if it is not known whether the root is empty, set to true if it is checked +// RootTouched bool +// RootPresent bool +// BranchBefore [maxKeySize]bool // For each row, whether there was a branch node in the database loaded in unfold +// CurrentKey [maxKeySize]byte // For each row indicates which column is currently selected +// Depths [maxKeySize]int // For each row, the depth of cells in that row +//} +// +//func (s *binState) Encode(buf []byte) ([]byte, error) { +// var rootFlags stateRootFlag +// if s.RootPresent { +// rootFlags |= stateRootPresent +// } +// if s.RootChecked { +// rootFlags |= stateRootChecked +// } +// if s.RootTouched { +// rootFlags |= stateRootTouched +// } +// +// ee := bytes.NewBuffer(buf) +// if err := binary.Write(ee, binary.BigEndian, s.CurrentKeyLen); err != nil { +// return nil, fmt.Errorf("encode currentKeyLen: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, int8(rootFlags)); err != nil { +// return nil, fmt.Errorf("encode rootFlags: %w", err) +// } +// if n, err := ee.Write(s.CurrentKey[:]); err != nil || n != len(s.CurrentKey) { +// return nil, fmt.Errorf("encode currentKey: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, uint16(len(s.Root))); err != nil { +// return nil, fmt.Errorf("encode root len: %w", err) +// } +// if n, err := ee.Write(s.Root); err != nil || n != len(s.Root) { +// return nil, fmt.Errorf("encode root: %w", err) +// } +// d := make([]byte, len(s.Depths)) +// for i := 0; i < len(s.Depths); i++ { +// d[i] = byte(s.Depths[i]) +// } +// if n, err := ee.Write(d); err != nil || n != len(s.Depths) { +// return nil, fmt.Errorf("encode depths: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, s.TouchMap); err != nil { +// return nil, fmt.Errorf("encode touchMap: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, s.AfterMap); err != nil { +// return nil, fmt.Errorf("encode afterMap: %w", err) +// } +// +// var before1, before2 uint64 +// for i := 0; i < halfKeySize; i++ { +// if s.BranchBefore[i] { +// before1 |= 1 << i +// } +// } +// for i, j := halfKeySize, 0; i < maxKeySize; i, j = i+1, j+1 { +// if s.BranchBefore[i] { +// before2 |= 1 << j +// } +// } +// if err := binary.Write(ee, binary.BigEndian, before1); err != nil { +// return nil, fmt.Errorf("encode branchBefore_1: %w", err) +// } +// if err := binary.Write(ee, binary.BigEndian, before2); err != nil { +// return nil, fmt.Errorf("encode branchBefore_2: %w", err) +// } +// return ee.Bytes(), nil +//} +// +//func (s *binState) Decode(buf []byte) error { +// aux := bytes.NewBuffer(buf) +// if err := binary.Read(aux, binary.BigEndian, &s.CurrentKeyLen); err != nil { +// return fmt.Errorf("currentKeyLen: %w", err) +// } +// var rootFlags stateRootFlag +// if err := binary.Read(aux, binary.BigEndian, &rootFlags); err != nil { +// return fmt.Errorf("rootFlags: %w", err) +// } +// +// if rootFlags&stateRootPresent != 0 { +// s.RootPresent = true +// } +// if rootFlags&stateRootTouched != 0 { +// s.RootTouched = true +// } +// if rootFlags&stateRootChecked != 0 { +// s.RootChecked = true +// } +// if n, err := aux.Read(s.CurrentKey[:]); err != nil || n != maxKeySize { +// return fmt.Errorf("currentKey: %w", err) +// } +// var rootSize uint16 +// if err := binary.Read(aux, binary.BigEndian, &rootSize); err != nil { +// return fmt.Errorf("root size: %w", err) +// } +// s.Root = make([]byte, rootSize) +// if _, err := aux.Read(s.Root); err != nil { +// return fmt.Errorf("root: %w", err) +// } +// d := make([]byte, len(s.Depths)) +// if err := binary.Read(aux, binary.BigEndian, &d); err != nil { +// return fmt.Errorf("depths: %w", err) +// } +// for i := 0; i < len(s.Depths); i++ { +// s.Depths[i] = int(d[i]) +// } +// if err := binary.Read(aux, binary.BigEndian, &s.TouchMap); err != nil { +// return fmt.Errorf("touchMap: %w", err) +// } +// if err := binary.Read(aux, binary.BigEndian, &s.AfterMap); err != nil { +// return fmt.Errorf("afterMap: %w", err) +// } +// var branch1, branch2 uint64 +// if err := binary.Read(aux, binary.BigEndian, &branch1); err != nil { +// return fmt.Errorf("branchBefore1: %w", err) +// } +// if err := binary.Read(aux, binary.BigEndian, &branch2); err != nil { +// return fmt.Errorf("branchBefore2: %w", err) +// } +// +// // TODO invalid branch encode +// for i := 0; i < halfKeySize; i++ { +// if branch1&(1< %s\n", CompactedKeyToHex([]byte(key)), branchNodeUpdate.String()) - } -} - -func Test_BinPatriciaHashed_UniqueRepresentation(t *testing.T) { - t.Skip() - ctx := context.Background() - - ms := NewMockState(t) - ms2 := NewMockState(t) - - plainKeys, updates := NewUpdateBuilder(). - Balance("f5", 4). - Balance("ff", 900234). - Balance("04", 1233). - Storage("04", "01", "0401"). - Balance("ba", 065606). - Balance("00", 4). - Balance("01", 5). - Balance("02", 6). - Balance("03", 7). - Storage("03", "56", "050505"). - Balance("05", 9). - Storage("03", "87", "060606"). - Balance("b9", 6). - Nonce("ff", 169356). - Storage("05", "02", "8989"). - Storage("f5", "04", "9898"). - Build() - - trieOne := NewBinPatriciaHashed(1, ms, ms.TempDir()) - trieTwo := NewBinPatriciaHashed(1, ms2, ms2.TempDir()) - - trieOne.SetTrace(true) - trieTwo.SetTrace(true) - - // single sequential update - roots := make([][]byte, 0) - // branchNodeUpdatesOne := make(map[string]BranchData) - fmt.Printf("1. Trie sequential update generated following branch updates\n") - for i := 0; i < len(updates); i++ { - if err := ms.applyPlainUpdates(plainKeys[i:i+1], updates[i:i+1]); err != nil { - t.Fatal(err) - } - - sequentialRoot, err := trieOne.ProcessKeys(ctx, plainKeys[i:i+1], "") - require.NoError(t, err) - roots = append(roots, sequentialRoot) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - //renderUpdates(branchNodeUpdates) - } - - err := ms2.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - fmt.Printf("\n2. Trie batch update generated following branch updates\n") - // batch update - batchRoot, err := trieTwo.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - //renderUpdates(branchNodeUpdatesTwo) - - fmt.Printf("\n sequential roots:\n") - for i, rh := range roots { - fmt.Printf("%2d %+v\n", i, hex.EncodeToString(rh)) - } - - //ms2.applyBranchNodeUpdates(branchNodeUpdatesTwo) - - require.EqualValues(t, batchRoot, roots[len(roots)-1], - "expected equal roots, got sequential [%v] != batch [%v]", hex.EncodeToString(roots[len(roots)-1]), hex.EncodeToString(batchRoot)) - require.Lenf(t, batchRoot, 32, "root hash length should be equal to 32 bytes") -} -func Test_BinPatriciaHashed_EmptyState(t *testing.T) { - ctx := context.Background() - ms := NewMockState(t) - hph := NewBinPatriciaHashed(1, ms, ms.TempDir()) - hph.SetTrace(false) - plainKeys, updates := NewUpdateBuilder(). - Balance("00", 4). - Balance("01", 5). - Balance("02", 6). - Balance("03", 7). - Balance("04", 8). - Storage("04", "01", "0401"). - Storage("03", "56", "050505"). - Storage("03", "57", "060606"). - Balance("05", 9). - Storage("05", "02", "8989"). - Storage("05", "04", "9898"). - Build() - - err := ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - firstRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - - t.Logf("root hash %x\n", firstRootHash) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - - fmt.Printf("1. Generated updates\n") - //renderUpdates(branchNodeUpdates) - - // More updates - hph.Reset() - hph.SetTrace(false) - plainKeys, updates = NewUpdateBuilder(). - Storage("03", "58", "050505"). - Build() - err = ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - secondRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - require.NotEqualValues(t, firstRootHash, secondRootHash) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - fmt.Printf("2. Generated single update\n") - //renderUpdates(branchNodeUpdates) - - // More updates - //hph.Reset() // one update - no need to reset - hph.SetTrace(false) - plainKeys, updates = NewUpdateBuilder(). - Storage("03", "58", "070807"). - Build() - err = ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - thirdRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - require.NotEqualValues(t, secondRootHash, thirdRootHash) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - fmt.Printf("3. Generated single update\n") - //renderUpdates(branchNodeUpdates) -} - -func Test_BinPatriciaHashed_EmptyUpdateState(t *testing.T) { - ctx := context.Background() - ms := NewMockState(t) - hph := NewBinPatriciaHashed(1, ms, ms.TempDir()) - hph.SetTrace(false) - plainKeys, updates := NewUpdateBuilder(). - Balance("00", 4). - Nonce("00", 246462653). - Balance("01", 5). - CodeHash("03", "aaaaaaaaaaf7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a870"). - Delete("00"). - Storage("04", "01", "0401"). - Storage("03", "56", "050505"). - Build() - - err := ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - hashBeforeEmptyUpdate, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - require.NotEmpty(t, hashBeforeEmptyUpdate) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - - fmt.Println("1. Updates applied") - //renderUpdates(branchNodeUpdates) - - // generate empty updates and do NOT reset tree - hph.SetTrace(true) - - plainKeys, updates = NewUpdateBuilder().Build() - - err = ms.applyPlainUpdates(plainKeys, updates) - require.NoError(t, err) - - hashAfterEmptyUpdate, err := hph.ProcessKeys(ctx, plainKeys, "") - require.NoError(t, err) - - //ms.applyBranchNodeUpdates(branchNodeUpdates) - fmt.Println("2. Empty updates applied without state reset") - - require.EqualValues(t, hashBeforeEmptyUpdate, hashAfterEmptyUpdate) -} +//import ( +// "context" +// "encoding/hex" +// "fmt" +// "slices" +// "testing" +// +// "github.com/stretchr/testify/require" +// +// "github.com/erigontech/erigon-lib/common/length" +//) +// +//func Test_BinPatriciaTrie_UniqueRepresentation(t *testing.T) { +// t.Skip() +// ctx := context.Background() +// +// ms := NewMockState(t) +// ms2 := NewMockState(t) +// +// trie := NewBinPatriciaHashed(length.Addr, ms, ms.TempDir()) +// trieBatch := NewBinPatriciaHashed(length.Addr, ms2, ms2.TempDir()) +// +// plainKeys, updates := NewUpdateBuilder(). +// Balance("e25652aaa6b9417973d325f9a1246b48ff9420bf", 12). +// Balance("cdd0a12034e978f7eccda72bd1bd89a8142b704e", 120000). +// Balance("5bb6abae12c87592b940458437526cb6cad60d50", 170). +// Nonce("5bb6abae12c87592b940458437526cb6cad60d50", 152512). +// Balance("2fcb355beb0ea2b5fcf3b62a24e2faaff1c8d0c0", 100000). +// Balance("463510be61a7ccde354509c0ab813e599ee3fc8a", 200000). +// Balance("cd3e804beea486038609f88f399140dfbe059ef3", 200000). +// Storage("cd3e804beea486038609f88f399140dfbe059ef3", "01023402", "98"). +// Balance("82c88c189d5deeba0ad11463b80b44139bd519c1", 300000). +// Balance("0647e43e8f9ba3fb8b14ad30796b7553d667c858", 400000). +// Delete("cdd0a12034e978f7eccda72bd1bd89a8142b704e"). +// Balance("06548d648c23b12f2e9bfd1bae274b658be208f4", 500000). +// Balance("e5417f49640cf8a0b1d6e38f9dfdc00196e99e8b", 600000). +// Nonce("825ac9fa5d015ec7c6b4cbbc50f78d619d255ea7", 184). +// Build() +// +// ms.applyPlainUpdates(plainKeys, updates) +// ms2.applyPlainUpdates(plainKeys, updates) +// +// fmt.Println("1. Running sequential updates over the bin trie") +// var seqHash []byte +// for i := 0; i < len(updates); i++ { +// sh, err := trie.ProcessKeys(ctx, plainKeys[i:i+1], "") +// require.NoError(t, err) +// require.Len(t, sh, length.Hash) +// // WARN! provided sequential branch updates are incorrect - lead to deletion of prefixes (afterMap is zero) +// // while root hashes are equal +// //renderUpdates(branchNodeUpdates) +// +// fmt.Printf("h=%x\n", sh) +// seqHash = sh +// } +// +// fmt.Println("2. Running batch updates over the bin trie") +// +// batchHash, err := trieBatch.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// //ms2.applyBranchNodeUpdates(branchBatchUpdates) +// +// //renderUpdates(branchBatchUpdates) +// +// require.EqualValues(t, seqHash, batchHash) +// // require.EqualValues(t, seqHash, batchHash) +// +// // expectedHash, _ := hex.DecodeString("3ed2b89c0f9c6ebc7fa11a181baac21aa0236b12bb4492c708562cb3e40c7c9e") +// // require.EqualValues(t, expectedHash, seqHash) +//} +// +//func renderUpdates(branchNodeUpdates map[string]BranchData) { +// keys := make([]string, 0, len(branchNodeUpdates)) +// for key := range branchNodeUpdates { +// keys = append(keys, key) +// } +// slices.Sort(keys) +// for _, key := range keys { +// branchNodeUpdate := branchNodeUpdates[key] +// fmt.Printf("%x => %s\n", CompactedKeyToHex([]byte(key)), branchNodeUpdate.String()) +// } +//} +// +//func Test_BinPatriciaHashed_UniqueRepresentation(t *testing.T) { +// t.Skip() +// ctx := context.Background() +// +// ms := NewMockState(t) +// ms2 := NewMockState(t) +// +// plainKeys, updates := NewUpdateBuilder(). +// Balance("f5", 4). +// Balance("ff", 900234). +// Balance("04", 1233). +// Storage("04", "01", "0401"). +// Balance("ba", 065606). +// Balance("00", 4). +// Balance("01", 5). +// Balance("02", 6). +// Balance("03", 7). +// Storage("03", "56", "050505"). +// Balance("05", 9). +// Storage("03", "87", "060606"). +// Balance("b9", 6). +// Nonce("ff", 169356). +// Storage("05", "02", "8989"). +// Storage("f5", "04", "9898"). +// Build() +// +// trieOne := NewBinPatriciaHashed(1, ms, ms.TempDir()) +// trieTwo := NewBinPatriciaHashed(1, ms2, ms2.TempDir()) +// +// trieOne.SetTrace(true) +// trieTwo.SetTrace(true) +// +// // single sequential update +// roots := make([][]byte, 0) +// // branchNodeUpdatesOne := make(map[string]BranchData) +// fmt.Printf("1. Trie sequential update generated following branch updates\n") +// for i := 0; i < len(updates); i++ { +// if err := ms.applyPlainUpdates(plainKeys[i:i+1], updates[i:i+1]); err != nil { +// t.Fatal(err) +// } +// +// sequentialRoot, err := trieOne.ProcessKeys(ctx, plainKeys[i:i+1], "") +// require.NoError(t, err) +// roots = append(roots, sequentialRoot) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// //renderUpdates(branchNodeUpdates) +// } +// +// err := ms2.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// fmt.Printf("\n2. Trie batch update generated following branch updates\n") +// // batch update +// batchRoot, err := trieTwo.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// //renderUpdates(branchNodeUpdatesTwo) +// +// fmt.Printf("\n sequential roots:\n") +// for i, rh := range roots { +// fmt.Printf("%2d %+v\n", i, hex.EncodeToString(rh)) +// } +// +// //ms2.applyBranchNodeUpdates(branchNodeUpdatesTwo) +// +// require.EqualValues(t, batchRoot, roots[len(roots)-1], +// "expected equal roots, got sequential [%v] != batch [%v]", hex.EncodeToString(roots[len(roots)-1]), hex.EncodeToString(batchRoot)) +// require.Lenf(t, batchRoot, 32, "root hash length should be equal to 32 bytes") +//} +//func Test_BinPatriciaHashed_EmptyState(t *testing.T) { +// ctx := context.Background() +// ms := NewMockState(t) +// hph := NewBinPatriciaHashed(1, ms, ms.TempDir()) +// hph.SetTrace(false) +// plainKeys, updates := NewUpdateBuilder(). +// Balance("00", 4). +// Balance("01", 5). +// Balance("02", 6). +// Balance("03", 7). +// Balance("04", 8). +// Storage("04", "01", "0401"). +// Storage("03", "56", "050505"). +// Storage("03", "57", "060606"). +// Balance("05", 9). +// Storage("05", "02", "8989"). +// Storage("05", "04", "9898"). +// Build() +// +// err := ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// firstRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// +// t.Logf("root hash %x\n", firstRootHash) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// +// fmt.Printf("1. Generated updates\n") +// //renderUpdates(branchNodeUpdates) +// +// // More updates +// hph.Reset() +// hph.SetTrace(false) +// plainKeys, updates = NewUpdateBuilder(). +// Storage("03", "58", "050505"). +// Build() +// err = ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// secondRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// require.NotEqualValues(t, firstRootHash, secondRootHash) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// fmt.Printf("2. Generated single update\n") +// //renderUpdates(branchNodeUpdates) +// +// // More updates +// //hph.Reset() // one update - no need to reset +// hph.SetTrace(false) +// plainKeys, updates = NewUpdateBuilder(). +// Storage("03", "58", "070807"). +// Build() +// err = ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// thirdRootHash, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// require.NotEqualValues(t, secondRootHash, thirdRootHash) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// fmt.Printf("3. Generated single update\n") +// //renderUpdates(branchNodeUpdates) +//} +// +//func Test_BinPatriciaHashed_EmptyUpdateState(t *testing.T) { +// ctx := context.Background() +// ms := NewMockState(t) +// hph := NewBinPatriciaHashed(1, ms, ms.TempDir()) +// hph.SetTrace(false) +// plainKeys, updates := NewUpdateBuilder(). +// Balance("00", 4). +// Nonce("00", 246462653). +// Balance("01", 5). +// CodeHash("03", "aaaaaaaaaaf7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a870"). +// Delete("00"). +// Storage("04", "01", "0401"). +// Storage("03", "56", "050505"). +// Build() +// +// err := ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// hashBeforeEmptyUpdate, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// require.NotEmpty(t, hashBeforeEmptyUpdate) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// +// fmt.Println("1. Updates applied") +// //renderUpdates(branchNodeUpdates) +// +// // generate empty updates and do NOT reset tree +// hph.SetTrace(true) +// +// plainKeys, updates = NewUpdateBuilder().Build() +// +// err = ms.applyPlainUpdates(plainKeys, updates) +// require.NoError(t, err) +// +// hashAfterEmptyUpdate, err := hph.ProcessKeys(ctx, plainKeys, "") +// require.NoError(t, err) +// +// //ms.applyBranchNodeUpdates(branchNodeUpdates) +// fmt.Println("2. Empty updates applied without state reset") +// +// require.EqualValues(t, hashBeforeEmptyUpdate, hashAfterEmptyUpdate) +//} diff --git a/erigon-lib/commitment/commitment.go b/erigon-lib/commitment/commitment.go index 57aa6039828..8e1d7e4a96d 100644 --- a/erigon-lib/commitment/commitment.go +++ b/erigon-lib/commitment/commitment.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "math/bits" "strings" @@ -59,7 +60,7 @@ type Trie interface { // Set context for state IO ResetContext(ctx PatriciaContext) - ProcessTree(ctx context.Context, tree *UpdateTree, logPrefix string) (rootHash []byte, err error) + ProcessTree(ctx context.Context, tree *Updates, logPrefix string) (rootHash []byte, err error) // Reads updates from storage ProcessKeys(ctx context.Context, pk [][]byte, logPrefix string) (rootHash []byte, err error) @@ -73,12 +74,12 @@ type PatriciaContext interface { // For each cell, it sets the cell type, clears the modified flag, fills the hash, // and for the extension, account, and leaf type, the `l` and `k` GetBranch(prefix []byte) ([]byte, uint64, error) - // fetch account with given plain key - GetAccount(plainKey []byte, cell *Cell) error - // fetch storage with given plain key - GetStorage(plainKey []byte, cell *Cell) error // store branch data PutBranch(prefix []byte, data []byte, prevData []byte, prevStep uint64) error + // fetch account with given plain key + GetAccount(plainKey []byte) (*Update, error) + // fetch storage with given plain key + GetStorage(plainKey []byte) (*Update, error) } type TrieVariant string @@ -90,19 +91,20 @@ const ( VariantBinPatriciaTrie TrieVariant = "bin-patricia-hashed" ) -func InitializeTrieAndUpdateTree(tv TrieVariant, mode Mode, tmpdir string) (Trie, *UpdateTree) { +func InitializeTrieAndUpdates(tv TrieVariant, mode Mode, tmpdir string) (Trie, *Updates) { switch tv { case VariantBinPatriciaTrie: - trie := NewBinPatriciaHashed(length.Addr, nil, tmpdir) - fn := func(key []byte) []byte { return hexToBin(key) } - tree := NewUpdateTree(mode, tmpdir, fn) - return trie, tree + //trie := NewBinPatriciaHashed(length.Addr, nil, tmpdir) + //fn := func(key []byte) []byte { return hexToBin(key) } + //tree := NewUpdateTree(mode, tmpdir, fn) + //return trie, tree + panic("omg its not supported") case VariantHexPatriciaTrie: fallthrough default: trie := NewHexPatriciaHashed(length.Addr, nil, tmpdir) - tree := NewUpdateTree(mode, tmpdir, trie.hashAndNibblizeKey) + tree := NewUpdates(mode, tmpdir, trie.hashAndNibblizeKey) return trie, tree } } @@ -156,8 +158,8 @@ func (branchData BranchData) String() string { fmt.Fprintf(&sb, "%sstoragePlainKey=[%x]", comma, cell.storagePlainKey[:cell.storagePlainKeyLen]) comma = "," } - if cell.HashLen > 0 { - fmt.Fprintf(&sb, "%shash=[%x]", comma, cell.hash[:cell.HashLen]) + if cell.hashLen > 0 { + fmt.Fprintf(&sb, "%shash=[%x]", comma, cell.hash[:cell.hashLen]) } sb.WriteString("}\n") } @@ -180,16 +182,24 @@ func NewBranchEncoder(sz uint64, tmpdir string) *BranchEncoder { tmpdir: tmpdir, merger: NewHexBranchMerger(sz / 2), } - be.initCollector() + //be.initCollector() return be } func (be *BranchEncoder) initCollector() { + if be.updates != nil { + be.updates.Close() + } be.updates = etl.NewCollector("commitment.BranchEncoder", be.tmpdir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize/2), log.Root().New("branch-encoder")) be.updates.LogLvl(log.LvlDebug) } func (be *BranchEncoder) Load(pc PatriciaContext, args etl.TransformArgs) error { + // do not collect them at least now. Write them at CollectUpdate into pc + if be.updates == nil { + return nil + } + if err := be.updates.Load(nil, "", func(prefix, update []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { stateValue, stateStep, err := pc.GetBranch(prefix) if err != nil { @@ -237,9 +247,11 @@ func (be *BranchEncoder) CollectUpdate( } } //fmt.Printf("collectBranchUpdate [%x] -> [%x]\n", prefix, update) - if err = be.updates.Collect(prefix, update); err != nil { + // has to copy :( + if err = ctx.PutBranch(common.Copy(prefix), common.Copy(update), prev, prevStep); err != nil { return 0, err } + mxBranchUpdatesApplied.Inc() return lastNibble, nil } @@ -261,14 +273,14 @@ func (be *BranchEncoder) EncodeBranch(bitmap, touchMap, afterMap uint16, readCel return err } if n != wn { - return fmt.Errorf("n != wn size") + return errors.New("n != wn size") } wn, err = be.buf.Write(val) if err != nil { return err } if len(val) != wn { - return fmt.Errorf("wn != value size") + return errors.New("wn != value size") } return nil } @@ -300,7 +312,7 @@ func (be *BranchEncoder) EncodeBranch(bitmap, touchMap, afterMap uint16, readCel if cell.storagePlainKeyLen > 0 { fieldBits |= StoragePlainPart } - if cell.HashLen > 0 { + if cell.hashLen > 0 { fieldBits |= HashPart } if err := be.buf.WriteByte(byte(fieldBits)); err != nil { @@ -322,7 +334,7 @@ func (be *BranchEncoder) EncodeBranch(bitmap, touchMap, afterMap uint16, readCel } } if fieldBits&HashPart != 0 { - if err := putUvarAndVal(uint64(cell.HashLen), cell.hash[:cell.HashLen]); err != nil { + if err := putUvarAndVal(uint64(cell.hashLen), cell.hash[:cell.hashLen]); err != nil { return nil, 0, err } } @@ -357,14 +369,14 @@ func (branchData BranchData) ReplacePlainKeys(newData []byte, fn func(key []byte if fieldBits&HashedKeyPart != 0 { l, n := binary.Uvarint(branchData[pos:]) if n == 0 { - return nil, fmt.Errorf("replacePlainKeys buffer too small for hashedKey len") + return nil, errors.New("replacePlainKeys buffer too small for hashedKey len") } else if n < 0 { - return nil, fmt.Errorf("replacePlainKeys value overflow for hashedKey len") + return nil, errors.New("replacePlainKeys value overflow for hashedKey len") } newData = append(newData, branchData[pos:pos+n]...) pos += n if len(branchData) < pos+int(l) { - return nil, fmt.Errorf("replacePlainKeys buffer too small for hashedKey") + return nil, errors.New("replacePlainKeys buffer too small for hashedKey") } if l > 0 { newData = append(newData, branchData[pos:pos+int(l)]...) @@ -374,13 +386,13 @@ func (branchData BranchData) ReplacePlainKeys(newData []byte, fn func(key []byte if fieldBits&AccountPlainPart != 0 { l, n := binary.Uvarint(branchData[pos:]) if n == 0 { - return nil, fmt.Errorf("replacePlainKeys buffer too small for accountPlainKey len") + return nil, errors.New("replacePlainKeys buffer too small for accountPlainKey len") } else if n < 0 { - return nil, fmt.Errorf("replacePlainKeys value overflow for accountPlainKey len") + return nil, errors.New("replacePlainKeys value overflow for accountPlainKey len") } pos += n if len(branchData) < pos+int(l) { - return nil, fmt.Errorf("replacePlainKeys buffer too small for accountPlainKey") + return nil, errors.New("replacePlainKeys buffer too small for accountPlainKey") } if l > 0 { pos += int(l) @@ -407,13 +419,13 @@ func (branchData BranchData) ReplacePlainKeys(newData []byte, fn func(key []byte if fieldBits&StoragePlainPart != 0 { l, n := binary.Uvarint(branchData[pos:]) if n == 0 { - return nil, fmt.Errorf("replacePlainKeys buffer too small for storagePlainKey len") + return nil, errors.New("replacePlainKeys buffer too small for storagePlainKey len") } else if n < 0 { - return nil, fmt.Errorf("replacePlainKeys value overflow for storagePlainKey len") + return nil, errors.New("replacePlainKeys value overflow for storagePlainKey len") } pos += n if len(branchData) < pos+int(l) { - return nil, fmt.Errorf("replacePlainKeys buffer too small for storagePlainKey") + return nil, errors.New("replacePlainKeys buffer too small for storagePlainKey") } if l > 0 { pos += int(l) @@ -440,14 +452,14 @@ func (branchData BranchData) ReplacePlainKeys(newData []byte, fn func(key []byte if fieldBits&HashPart != 0 { l, n := binary.Uvarint(branchData[pos:]) if n == 0 { - return nil, fmt.Errorf("replacePlainKeys buffer too small for hash len") + return nil, errors.New("replacePlainKeys buffer too small for hash len") } else if n < 0 { - return nil, fmt.Errorf("replacePlainKeys value overflow for hash len") + return nil, errors.New("replacePlainKeys value overflow for hash len") } newData = append(newData, branchData[pos:pos+n]...) pos += n if len(branchData) < pos+int(l) { - return nil, fmt.Errorf("replacePlainKeys buffer too small for hash") + return nil, errors.New("replacePlainKeys buffer too small for hash") } if l > 0 { newData = append(newData, branchData[pos:pos+int(l)]...) @@ -501,14 +513,14 @@ func (branchData BranchData) MergeHexBranches(branchData2 BranchData, newData [] for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { l, n := binary.Uvarint(branchData2[pos2:]) if n == 0 { - return nil, fmt.Errorf("MergeHexBranches buffer2 too small for field") + return nil, errors.New("MergeHexBranches buffer2 too small for field") } else if n < 0 { - return nil, fmt.Errorf("MergeHexBranches value2 overflow for field") + return nil, errors.New("MergeHexBranches value2 overflow for field") } newData = append(newData, branchData2[pos2:pos2+n]...) pos2 += n if len(branchData2) < pos2+int(l) { - return nil, fmt.Errorf("MergeHexBranches buffer2 too small for field") + return nil, errors.New("MergeHexBranches buffer2 too small for field") } if l > 0 { newData = append(newData, branchData2[pos2:pos2+int(l)]...) @@ -526,16 +538,16 @@ func (branchData BranchData) MergeHexBranches(branchData2 BranchData, newData [] for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { l, n := binary.Uvarint(branchData[pos1:]) if n == 0 { - return nil, fmt.Errorf("MergeHexBranches buffer1 too small for field") + return nil, errors.New("MergeHexBranches buffer1 too small for field") } else if n < 0 { - return nil, fmt.Errorf("MergeHexBranches value1 overflow for field") + return nil, errors.New("MergeHexBranches value1 overflow for field") } if add { newData = append(newData, branchData[pos1:pos1+n]...) } pos1 += n if len(branchData) < pos1+int(l) { - return nil, fmt.Errorf("MergeHexBranches buffer1 too small for field") + return nil, errors.New("MergeHexBranches buffer1 too small for field") } if l > 0 { if add { @@ -562,7 +574,7 @@ func (branchData BranchData) DecodeCells() (touchMap, afterMap uint16, row [16]* pos++ row[nibble] = new(Cell) if pos, err = row[nibble].fillFromFields(branchData, pos, fieldBits); err != nil { - err = fmt.Errorf("faield to fill cell at nibble %x: %w", nibble, err) + err = fmt.Errorf("failed to fill cell at nibble %x: %w", nibble, err) return } } @@ -616,9 +628,9 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { l, n := binary.Uvarint(branch2[pos2:]) if n == 0 { - return nil, fmt.Errorf("MergeHexBranches branch2 is too small: expected node info size") + return nil, errors.New("MergeHexBranches branch2 is too small: expected node info size") } else if n < 0 { - return nil, fmt.Errorf("MergeHexBranches branch2: size overflow for length") + return nil, errors.New("MergeHexBranches branch2: size overflow for length") } m.buf = append(m.buf, branch2[pos2:pos2+n]...) @@ -644,9 +656,9 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { l, n := binary.Uvarint(branch1[pos1:]) if n == 0 { - return nil, fmt.Errorf("MergeHexBranches branch1 is too small: expected node info size") + return nil, errors.New("MergeHexBranches branch1 is too small: expected node info size") } else if n < 0 { - return nil, fmt.Errorf("MergeHexBranches branch1: size overflow for length") + return nil, errors.New("MergeHexBranches branch1: size overflow for length") } if add { @@ -757,8 +769,8 @@ func DecodeBranchAndCollectStat(key, branch []byte, tv TrieVariant) *BranchStat case c.storagePlainKeyLen > 0: stat.SPKSize += uint64(c.storagePlainKeyLen) stat.SPKCount++ - case c.HashLen > 0: - stat.HashSize += uint64(c.HashLen) + case c.hashLen > 0: + stat.HashSize += uint64(c.hashLen) stat.HashCount++ default: panic("no plain key" + fmt.Sprintf("#+%v", c)) @@ -813,10 +825,11 @@ func ParseCommitmentMode(s string) Mode { return mode } -type UpdateTree struct { +type Updates struct { keccak cryptozerocopy.KeccakState hasher keyHasher keys map[string]struct{} + etl *etl.Collector tree *btree.BTreeG[*KeyUpdate] mode Mode tmpdir string @@ -826,8 +839,8 @@ type keyHasher func(key []byte) []byte func keyHasherNoop(key []byte) []byte { return key } -func NewUpdateTree(m Mode, tmpdir string, hasher keyHasher) *UpdateTree { - t := &UpdateTree{ +func NewUpdates(m Mode, tmpdir string, hasher keyHasher) *Updates { + t := &Updates{ keccak: sha3.NewLegacyKeccak256().(cryptozerocopy.KeccakState), hasher: hasher, tmpdir: tmpdir, @@ -835,18 +848,42 @@ func NewUpdateTree(m Mode, tmpdir string, hasher keyHasher) *UpdateTree { } if t.mode == ModeDirect { t.keys = make(map[string]struct{}) + t.initCollector() } else if t.mode == ModeUpdate { t.tree = btree.NewG[*KeyUpdate](64, keyUpdateLessFn) } return t } +func (t *Updates) Mode() Mode { return t.mode } + +func (t *Updates) Size() (updates uint64) { + switch t.mode { + case ModeDirect: + return uint64(len(t.keys)) + case ModeUpdate: + return uint64(t.tree.Len()) + default: + return 0 + } +} + +func (t *Updates) initCollector() { + if t.etl != nil { + t.etl.Close() + t.etl = nil + } + t.etl = etl.NewCollector("commitment", t.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize/2), log.Root().New("update-tree")) + t.etl.LogLvl(log.LvlDebug) + t.etl.SortAndFlushInBackground(true) +} + // TouchPlainKey marks plainKey as updated and applies different fn for different key types // (different behaviour for Code, Account and Storage key modifications). -func (t *UpdateTree) TouchPlainKey(key, val []byte, fn func(c *KeyUpdate, val []byte)) { +func (t *Updates) TouchPlainKey(key, val []byte, fn func(c *KeyUpdate, val []byte)) { switch t.mode { case ModeUpdate: - pivot, updated := &KeyUpdate{plainKey: key}, false + pivot, updated := &KeyUpdate{plainKey: key, update: new(Update)}, false t.tree.DescendLessOrEqual(pivot, func(item *KeyUpdate) bool { if bytes.Equal(item.plainKey, pivot.plainKey) { @@ -862,23 +899,17 @@ func (t *UpdateTree) TouchPlainKey(key, val []byte, fn func(c *KeyUpdate, val [] t.tree.ReplaceOrInsert(pivot) } case ModeDirect: - t.keys[string(key)] = struct{}{} - default: - } -} - -func (t *UpdateTree) Size() (updates uint64) { - switch t.mode { - case ModeDirect: - return uint64(len(t.keys)) - case ModeUpdate: - return uint64(t.tree.Len()) + if _, ok := t.keys[string(key)]; !ok { + if err := t.etl.Collect(t.hasher(key), key); err != nil { + log.Warn("failed to collect updated key", "key", key, "err", err) + } + t.keys[string(key)] = struct{}{} + } default: - return 0 } } -func (t *UpdateTree) TouchAccount(c *KeyUpdate, val []byte) { +func (t *Updates) TouchAccount(c *KeyUpdate, val []byte) { if len(val) == 0 { c.update.Flags = DeleteUpdate return @@ -895,44 +926,41 @@ func (t *UpdateTree) TouchAccount(c *KeyUpdate, val []byte) { c.update.Balance.Set(balance) c.update.Flags |= BalanceUpdate } - if !bytes.Equal(chash, c.update.CodeHashOrStorage[:]) { + if !bytes.Equal(chash, c.update.CodeHash[:]) { if len(chash) == 0 { - c.update.ValLength = length.Hash - copy(c.update.CodeHashOrStorage[:], EmptyCodeHash) + copy(c.update.CodeHash[:], EmptyCodeHash) } else { - copy(c.update.CodeHashOrStorage[:], chash) - c.update.ValLength = length.Hash c.update.Flags |= CodeUpdate + copy(c.update.CodeHash[:], chash) } } } -func (t *UpdateTree) TouchStorage(c *KeyUpdate, val []byte) { - c.update.ValLength = len(val) +func (t *Updates) TouchStorage(c *KeyUpdate, val []byte) { + c.update.StorageLen = len(val) if len(val) == 0 { c.update.Flags = DeleteUpdate } else { c.update.Flags |= StorageUpdate - copy(c.update.CodeHashOrStorage[:], val) + copy(c.update.Storage[:], val) } } -func (t *UpdateTree) TouchCode(c *KeyUpdate, val []byte) { - t.keccak.Reset() - t.keccak.Write(val) - t.keccak.Read(c.update.CodeHashOrStorage[:]) - if c.update.Flags == DeleteUpdate && len(val) == 0 { - c.update.Flags = DeleteUpdate - c.update.ValLength = 0 +func (t *Updates) TouchCode(c *KeyUpdate, val []byte) { + c.update.Flags |= CodeUpdate + if len(val) == 0 { + if c.update.Flags == 0 || c.update.Flags == DeleteUpdate { + c.update.Flags = DeleteUpdate + } + copy(c.update.CodeHash[:], EmptyCodeHash) return } - c.update.ValLength = length.Hash - if len(val) != 0 { - c.update.Flags |= CodeUpdate - } + t.keccak.Reset() + t.keccak.Write(val) + t.keccak.Read(c.update.CodeHash[:]) } -func (t *UpdateTree) Close() { +func (t *Updates) Close() { if t.keys != nil { clear(t.keys) } @@ -940,34 +968,25 @@ func (t *UpdateTree) Close() { t.tree.Clear(true) t.tree = nil } + if t.etl != nil { + t.etl.Close() + } } -func (t *UpdateTree) HashSort(ctx context.Context, fn func(hk, pk []byte) error) error { +// HashSort sorts and applies fn to each key-value pair in the order of hashed keys. +func (t *Updates) HashSort(ctx context.Context, fn func(hk, pk []byte) error) error { switch t.mode { case ModeDirect: - collector := etl.NewCollector("commitment", t.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize/4), log.Root().New("update-tree")) - defer collector.Close() - collector.LogLvl(log.LvlDebug) - collector.SortAndFlushInBackground(true) - - for k := range t.keys { - select { - case <-ctx.Done(): - return nil - default: - } - if err := collector.Collect(t.hasher([]byte(k)), []byte(k)); err != nil { - return err - } - } clear(t.keys) - err := collector.Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + err := t.etl.Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { return fn(k, v) }, etl.TransformArgs{Quit: ctx.Done()}) if err != nil { return err } + + t.initCollector() case ModeUpdate: t.tree.Ascend(func(item *KeyUpdate) bool { select { @@ -990,7 +1009,8 @@ func (t *UpdateTree) HashSort(ctx context.Context, fn func(hk, pk []byte) error) // Returns list of both plain and hashed keys. If .mode is ModeUpdate, updates also returned. // No ordering guarantees is provided. -func (t *UpdateTree) List(clear bool) ([][]byte, []Update) { +// TODO replace with Clear function. HashSort perfectly dumps all keys. +func (t *Updates) List(clear bool) ([][]byte, []Update) { switch t.mode { case ModeDirect: plainKeys := make([][]byte, 0, len(t.keys)) @@ -1007,7 +1027,7 @@ func (t *UpdateTree) List(clear bool) ([][]byte, []Update) { updates := make([]Update, t.tree.Len()) i := 0 t.tree.Ascend(func(item *KeyUpdate) bool { - plainKeys[i], updates[i] = item.plainKey, item.update + plainKeys[i], updates[i] = item.plainKey, *item.update i++ return true }) @@ -1022,7 +1042,7 @@ func (t *UpdateTree) List(clear bool) ([][]byte, []Update) { type KeyUpdate struct { plainKey []byte - update Update + update *Update } func keyUpdateLessFn(i, j *KeyUpdate) bool { diff --git a/erigon-lib/commitment/commitment_test.go b/erigon-lib/commitment/commitment_test.go index cbf2ef54643..24121bda86b 100644 --- a/erigon-lib/commitment/commitment_test.go +++ b/erigon-lib/commitment/commitment_test.go @@ -37,10 +37,10 @@ func generateCellRow(tb testing.TB, size int) (row []*Cell, bitmap uint16) { var bm uint16 for i := 0; i < len(row); i++ { row[i] = new(Cell) - row[i].HashLen = 32 + row[i].hashLen = 32 n, err := rand.Read(row[i].hash[:]) require.NoError(tb, err) - require.EqualValues(tb, row[i].HashLen, n) + require.EqualValues(tb, row[i].hashLen, n) th := rand.Intn(120) switch { @@ -305,9 +305,9 @@ func TestBranchData_ReplacePlainKeys_WithEmpty(t *testing.T) { }) } -func TestNewUpdateTree(t *testing.T) { +func TestNewUpdates(t *testing.T) { t.Run("ModeUpdate", func(t *testing.T) { - ut := NewUpdateTree(ModeUpdate, t.TempDir(), keyHasherNoop) + ut := NewUpdates(ModeUpdate, t.TempDir(), keyHasherNoop) require.NotNil(t, ut.tree) require.NotNil(t, ut.keccak) @@ -316,7 +316,7 @@ func TestNewUpdateTree(t *testing.T) { }) t.Run("ModeDirect", func(t *testing.T) { - ut := NewUpdateTree(ModeDirect, t.TempDir(), keyHasherNoop) + ut := NewUpdates(ModeDirect, t.TempDir(), keyHasherNoop) require.NotNil(t, ut.keccak) require.NotNil(t, ut.keys) @@ -325,11 +325,11 @@ func TestNewUpdateTree(t *testing.T) { } -func TestUpdateTree_TouchPlainKey(t *testing.T) { - utUpdate := NewUpdateTree(ModeUpdate, t.TempDir(), keyHasherNoop) - utDirect := NewUpdateTree(ModeDirect, t.TempDir(), keyHasherNoop) - utUpdate1 := NewUpdateTree(ModeUpdate, t.TempDir(), keyHasherNoop) - utDirect1 := NewUpdateTree(ModeDirect, t.TempDir(), keyHasherNoop) +func TestUpdates_TouchPlainKey(t *testing.T) { + utUpdate := NewUpdates(ModeUpdate, t.TempDir(), keyHasherNoop) + utDirect := NewUpdates(ModeDirect, t.TempDir(), keyHasherNoop) + utUpdate1 := NewUpdates(ModeUpdate, t.TempDir(), keyHasherNoop) + utDirect1 := NewUpdates(ModeDirect, t.TempDir(), keyHasherNoop) type tc struct { key []byte @@ -373,7 +373,7 @@ func TestUpdateTree_TouchPlainKey(t *testing.T) { for i := 0; i < len(sortedUniqUpds); i++ { require.EqualValues(t, sortedUniqUpds[i].key, pk[i]) - require.EqualValues(t, sortedUniqUpds[i].val, upd[i].CodeHashOrStorage[:upd[i].ValLength]) + require.EqualValues(t, sortedUniqUpds[i].val, upd[i].Storage[:upd[i].StorageLen]) } pk, upd = utDirect.List(true) diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go index 5f29890e0e8..7f0d853d0d4 100644 --- a/erigon-lib/commitment/hex_patricia_hashed.go +++ b/erigon-lib/commitment/hex_patricia_hashed.go @@ -21,6 +21,7 @@ import ( "context" "encoding/binary" "encoding/hex" + "errors" "fmt" "hash" "io" @@ -98,22 +99,17 @@ func NewHexPatriciaHashed(accountKeyLen int, ctx PatriciaContext, tmpdir string) } type Cell struct { - Balance uint256.Int - Nonce uint64 - HashLen int // Length of the hash (or embedded) - StorageLen int - accountPlainKeyLen int // length of account plain key - storagePlainKeyLen int // length of the storage plain key - downHashedLen int - extLen int downHashedKey [128]byte extension [64]byte accountPlainKey [length.Addr]byte // account plain key storagePlainKey [length.Addr + length.Hash]byte // storage plain key hash [length.Hash]byte // cell hash - CodeHash [length.Hash]byte // hash of the bytecode - Storage [length.Hash]byte - Delete bool + hashLen int // Length of the hash (or embedded) + accountPlainKeyLen int // length of account plain key + storagePlainKeyLen int // length of the storage plain key + downHashedLen int + extLen int + Update } var ( @@ -127,14 +123,12 @@ func (cell *Cell) reset() { cell.storagePlainKeyLen = 0 cell.downHashedLen = 0 cell.extLen = 0 - cell.HashLen = 0 - cell.Nonce = 0 - cell.Balance.Clear() - copy(cell.CodeHash[:], EmptyCodeHash) - cell.StorageLen = 0 - cell.Delete = false + cell.hashLen = 0 + cell.Update.Reset() } +func (cell *Cell) setFromUpdate(update *Update) { cell.Update.Merge(update) } + func (cell *Cell) fillFromUpperCell(upCell *Cell, depth, depthIncrement int) { if upCell.downHashedLen >= depthIncrement { cell.downHashedLen = upCell.downHashedLen - depthIncrement @@ -175,9 +169,9 @@ func (cell *Cell) fillFromUpperCell(upCell *Cell, depth, depthIncrement int) { copy(cell.Storage[:], upCell.Storage[:upCell.StorageLen]) } } - cell.HashLen = upCell.HashLen - if upCell.HashLen > 0 { - copy(cell.hash[:], upCell.hash[:upCell.HashLen]) + cell.hashLen = upCell.hashLen + if upCell.hashLen > 0 { + copy(cell.hash[:], upCell.hash[:upCell.hashLen]) } } @@ -199,7 +193,7 @@ func (cell *Cell) fillFromLowerCell(lowCell *Cell, lowDepth int, preExtension [] copy(cell.Storage[:], lowCell.Storage[:lowCell.StorageLen]) } } - if lowCell.HashLen > 0 { + if lowCell.hashLen > 0 { if (lowCell.accountPlainKeyLen == 0 && lowDepth < 64) || (lowCell.storagePlainKeyLen == 0 && lowDepth > 64) { // Extension is related to either accounts branch node, or storage branch node, we prepend it by preExtension | nibble if len(preExtension) > 0 { @@ -218,9 +212,9 @@ func (cell *Cell) fillFromLowerCell(lowCell *Cell, lowDepth int, preExtension [] } } } - cell.HashLen = lowCell.HashLen - if lowCell.HashLen > 0 { - copy(cell.hash[:], lowCell.hash[:lowCell.HashLen]) + cell.hashLen = lowCell.hashLen + if lowCell.hashLen > 0 { + copy(cell.hash[:], lowCell.hash[:lowCell.hashLen]) } } @@ -261,7 +255,7 @@ func (cell *Cell) deriveHashedKeys(depth int, keccak keccakState, accountKeyLen extraLen := 0 if cell.accountPlainKeyLen > 0 { if depth > 64 { - return fmt.Errorf("deriveHashedKeys accountPlainKey present at depth > 64") + return errors.New("deriveHashedKeys accountPlainKey present at depth > 64") } extraLen = 64 - depth } @@ -303,9 +297,9 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int if fieldBits&HashedKeyPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for hashedKey len") + return 0, errors.New("fillFromFields buffer too small for hashedKey len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for hashedKey len") + return 0, errors.New("fillFromFields value overflow for hashedKey len") } pos += n if len(data) < pos+int(l) { @@ -325,13 +319,13 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int if fieldBits&AccountPlainPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey len") + return 0, errors.New("fillFromFields buffer too small for accountPlainKey len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for accountPlainKey len") + return 0, errors.New("fillFromFields value overflow for accountPlainKey len") } pos += n if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for accountPlainKey") + return 0, errors.New("fillFromFields buffer too small for accountPlainKey") } cell.accountPlainKeyLen = int(l) if l > 0 { @@ -344,13 +338,13 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int if fieldBits&StoragePlainPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey len") + return 0, errors.New("fillFromFields buffer too small for storagePlainKey len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for storagePlainKey len") + return 0, errors.New("fillFromFields value overflow for storagePlainKey len") } pos += n if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for storagePlainKey") + return 0, errors.New("fillFromFields buffer too small for storagePlainKey") } cell.storagePlainKeyLen = int(l) if l > 0 { @@ -363,42 +357,25 @@ func (cell *Cell) fillFromFields(data []byte, pos int, fieldBits PartFlags) (int if fieldBits&HashPart != 0 { l, n := binary.Uvarint(data[pos:]) if n == 0 { - return 0, fmt.Errorf("fillFromFields buffer too small for hash len") + return 0, errors.New("fillFromFields buffer too small for hash len") } else if n < 0 { - return 0, fmt.Errorf("fillFromFields value overflow for hash len") + return 0, errors.New("fillFromFields value overflow for hash len") } pos += n if len(data) < pos+int(l) { - return 0, fmt.Errorf("fillFromFields buffer too small for hash") + return 0, errors.New("fillFromFields buffer too small for hash") } - cell.HashLen = int(l) + cell.hashLen = int(l) if l > 0 { copy(cell.hash[:], data[pos:pos+int(l)]) pos += int(l) } } else { - cell.HashLen = 0 + cell.hashLen = 0 } return pos, nil } -func (cell *Cell) setStorage(value []byte) { - cell.StorageLen = len(value) - if len(value) > 0 { - copy(cell.Storage[:], value) - } -} - -func (cell *Cell) setAccountFields(codeHash []byte, balance *uint256.Int, nonce uint64) { - if len(codeHash) == 0 { - codeHash = common.Copy(EmptyCodeHash) - } - copy(cell.CodeHash[:], codeHash) - - cell.Balance.SetBytes(balance.Bytes()) - cell.Nonce = nonce -} - func (cell *Cell) accountForHashing(buffer []byte, storageRootHash [length.Hash]byte) int { balanceBytes := 0 if !cell.Balance.LtUint64(128) { @@ -711,17 +688,17 @@ func (hph *HexPatriciaHashed) computeCellHash(cell *Cell, depth int, buf []byte) if !storageRootHashIsSet { if cell.extLen > 0 { // Extension - if cell.HashLen > 0 { + if cell.hashLen > 0 { if hph.trace { - fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.hash[:cell.HashLen]) + fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.hash[:cell.hashLen]) } - if storageRootHash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.hash[:cell.HashLen]); err != nil { + if storageRootHash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.hash[:cell.hashLen]); err != nil { return nil, err } } else { - return nil, fmt.Errorf("computeCellHash extension without hash") + return nil, errors.New("computeCellHash extension without hash") } - } else if cell.HashLen > 0 { + } else if cell.hashLen > 0 { storageRootHash = cell.hash } else { storageRootHash = *(*[length.Hash]byte)(EmptyRootHash) @@ -737,20 +714,20 @@ func (hph *HexPatriciaHashed) computeCellHash(cell *Cell, depth int, buf []byte) buf = append(buf, 0x80+32) if cell.extLen > 0 { // Extension - if cell.HashLen > 0 { + if cell.hashLen > 0 { if hph.trace { - fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.hash[:cell.HashLen]) + fmt.Printf("extensionHash for [%x]=>[%x]\n", cell.extension[:cell.extLen], cell.hash[:cell.hashLen]) } var hash [length.Hash]byte - if hash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.hash[:cell.HashLen]); err != nil { + if hash, err = hph.extensionHash(cell.extension[:cell.extLen], cell.hash[:cell.hashLen]); err != nil { return nil, err } buf = append(buf, hash[:]...) } else { - return nil, fmt.Errorf("computeCellHash extension without hash") + return nil, errors.New("computeCellHash extension without hash") } - } else if cell.HashLen > 0 { - buf = append(buf, cell.hash[:cell.HashLen]...) + } else if cell.hashLen > 0 { + buf = append(buf, cell.hash[:cell.hashLen]...) //} else if storageRootHashIsSet { // buf = append(buf, storageRootHash[:]...) // copy(cell.h[:], storageRootHash[:]) @@ -767,7 +744,7 @@ func (hph *HexPatriciaHashed) needUnfolding(hashedKey []byte) int { if hph.trace { fmt.Printf("needUnfolding root, rootChecked = %t\n", hph.rootChecked) } - if hph.root.downHashedLen == 0 && hph.root.HashLen == 0 { + if hph.root.downHashedLen == 0 && hph.root.hashLen == 0 { if hph.rootChecked { // Previously checked, empty root, no unfolding needed return 0 @@ -781,14 +758,14 @@ func (hph *HexPatriciaHashed) needUnfolding(hashedKey []byte) int { cell = &hph.grid[hph.activeRows-1][col] depth = hph.depths[hph.activeRows-1] if hph.trace { - fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.hash=[%x]\n", hph.activeRows-1, col, hph.currentKey[:hph.currentKeyLen], depth, cell.hash[:cell.HashLen]) + fmt.Printf("needUnfolding cell (%d, %x), currentKey=[%x], depth=%d, cell.hash=[%x]\n", hph.activeRows-1, col, hph.currentKey[:hph.currentKeyLen], depth, cell.hash[:cell.hashLen]) } } if len(hashedKey) <= depth { return 0 } if cell.downHashedLen == 0 { - if cell.HashLen == 0 { + if cell.hashLen == 0 { // cell is empty, no need to unfold further return 0 } @@ -861,20 +838,24 @@ func (hph *HexPatriciaHashed) unfoldBranchNode(row int, deleted bool, depth int) return false, fmt.Errorf("prefix [%x], branchData[%x]: %w", hph.currentKey[:hph.currentKeyLen], branchData, err) } if hph.trace { - fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], accountPlainKey=[%x], storagePlainKey=[%x], extension=[%x]\n", row, nibble, depth, cell.hash[:cell.HashLen], cell.accountPlainKey[:cell.accountPlainKeyLen], cell.storagePlainKey[:cell.storagePlainKeyLen], cell.extension[:cell.extLen]) + fmt.Printf("cell (%d, %x) depth=%d, hash=[%x], accountPlainKey=[%x], storagePlainKey=[%x], extension=[%x]\n", row, nibble, depth, cell.hash[:cell.hashLen], cell.accountPlainKey[:cell.accountPlainKeyLen], cell.storagePlainKey[:cell.storagePlainKeyLen], cell.extension[:cell.extLen]) } if cell.accountPlainKeyLen > 0 { - if err = hph.ctx.GetAccount(cell.accountPlainKey[:cell.accountPlainKeyLen], cell); err != nil { + update, err := hph.ctx.GetAccount(cell.accountPlainKey[:cell.accountPlainKeyLen]) + if err != nil { return false, fmt.Errorf("unfoldBranchNode GetAccount: %w", err) } + cell.setFromUpdate(update) if hph.trace { fmt.Printf("GetAccount[%x] return balance=%d, nonce=%d code=%x\n", cell.accountPlainKey[:cell.accountPlainKeyLen], &cell.Balance, cell.Nonce, cell.CodeHash[:]) } } if cell.storagePlainKeyLen > 0 { - if err = hph.ctx.GetStorage(cell.storagePlainKey[:cell.storagePlainKeyLen], cell); err != nil { + update, err := hph.ctx.GetStorage(cell.storagePlainKey[:cell.storagePlainKeyLen]) + if err != nil { return false, fmt.Errorf("unfoldBranchNode GetAccount: %w", err) } + cell.setFromUpdate(update) } if err = cell.deriveHashedKeys(depth, hph.keccak, hph.accountKeyLen); err != nil { return false, err @@ -893,7 +874,7 @@ func (hph *HexPatriciaHashed) unfold(hashedKey []byte, unfolding int) error { var col byte var upDepth, depth int if hph.activeRows == 0 { - if hph.rootChecked && hph.root.HashLen == 0 && hph.root.downHashedLen == 0 { + if hph.rootChecked && hph.root.hashLen == 0 && hph.root.downHashedLen == 0 { // No unfolding for empty root return nil } @@ -991,7 +972,7 @@ func (hph *HexPatriciaHashed) needFolding(hashedKey []byte) bool { func (hph *HexPatriciaHashed) fold() (err error) { updateKeyLen := hph.currentKeyLen if hph.activeRows == 0 { - return fmt.Errorf("cannot fold - no active rows") + return errors.New("cannot fold - no active rows") } if hph.trace { fmt.Printf("fold: activeRows: %d, currentKey: [%x], touchMap: %016b, afterMap: %016b\n", hph.activeRows, hph.currentKey[:hph.currentKeyLen], hph.touchMap[hph.activeRows-1], hph.afterMap[hph.activeRows-1]) @@ -1042,7 +1023,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { hph.afterMap[row-1] &^= (uint16(1) << col) } } - upCell.HashLen = 0 + upCell.hashLen = 0 upCell.accountPlainKeyLen = 0 upCell.storagePlainKeyLen = 0 upCell.extLen = 0 @@ -1171,7 +1152,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { upCell.accountPlainKeyLen = 0 } upCell.storagePlainKeyLen = 0 - upCell.HashLen = 32 + upCell.hashLen = 32 if _, err := hph.keccak2.Read(upCell.hash[:]); err != nil { return err } @@ -1193,11 +1174,9 @@ func (hph *HexPatriciaHashed) deleteCell(hashedKey []byte) { fmt.Printf("deleteCell, activeRows = %d\n", hph.activeRows) } var cell *Cell - if hph.activeRows == 0 { - // Remove the root + if hph.activeRows == 0 { // Remove the root cell = &hph.root - hph.rootTouched = true - hph.rootPresent = false + hph.rootTouched, hph.rootPresent = true, false } else { row := hph.activeRows - 1 if hph.depths[row] < len(hashedKey) { @@ -1206,18 +1185,19 @@ func (hph *HexPatriciaHashed) deleteCell(hashedKey []byte) { } return } - col := int(hashedKey[hph.currentKeyLen]) - cell = &hph.grid[row][col] - if hph.afterMap[row]&(uint16(1)< %s\n", plainKey, u.String()) + } return cell } @@ -1272,16 +1264,16 @@ func (hph *HexPatriciaHashed) RootHash() ([]byte, error) { return rootHash[1:], nil // first byte is 128+hash_len } -func (hph *HexPatriciaHashed) ProcessTree(ctx context.Context, tree *UpdateTree, logPrefix string) (rootHash []byte, err error) { +func (hph *HexPatriciaHashed) ProcessTree(ctx context.Context, tree *Updates, logPrefix string) (rootHash []byte, err error) { var ( - stagedCell = new(Cell) - logEvery = time.NewTicker(20 * time.Second) + m runtime.MemStats + ki uint64 + update *Update - m runtime.MemStats - ki uint64 + updatesCount = tree.Size() + logEvery = time.NewTicker(20 * time.Second) ) defer logEvery.Stop() - updatesCount := tree.Size() err = tree.HashSort(ctx, func(hashedKey, plainKey []byte) error { select { @@ -1309,37 +1301,19 @@ func (hph *HexPatriciaHashed) ProcessTree(ctx context.Context, tree *UpdateTree, } // Update the cell - stagedCell.reset() if len(plainKey) == hph.accountKeyLen { - if err := hph.ctx.GetAccount(plainKey, stagedCell); err != nil { + update, err = hph.ctx.GetAccount(plainKey) + if err != nil { return fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) } - if !stagedCell.Delete { - cell := hph.updateCell(plainKey, hashedKey) - cell.setAccountFields(stagedCell.CodeHash[:], &stagedCell.Balance, stagedCell.Nonce) - - if hph.trace { - fmt.Printf("GetAccount update key %x => balance=%d nonce=%v codeHash=%x\n", cell.accountPlainKey, &cell.Balance, cell.Nonce, cell.CodeHash) - } - } } else { - if err = hph.ctx.GetStorage(plainKey, stagedCell); err != nil { + update, err = hph.ctx.GetStorage(plainKey) + if err != nil { return fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) } - if !stagedCell.Delete { - hph.updateCell(plainKey, hashedKey).setStorage(stagedCell.Storage[:stagedCell.StorageLen]) - if hph.trace { - fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedCell.Storage[:stagedCell.StorageLen]) - } - } } + hph.updateCell(plainKey, hashedKey, update) - if stagedCell.Delete { - if hph.trace { - fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) - } - hph.deleteCell(hashedKey) - } mxKeys.Inc() ki++ return nil @@ -1386,7 +1360,8 @@ func (hph *HexPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt defer logEvery.Stop() var m runtime.MemStats - stagedCell := new(Cell) + //stagedCell := new(Cell) + var update *Update for i, hashedKey := range hashedKeys { select { case <-ctx.Done(): @@ -1414,37 +1389,18 @@ func (hph *HexPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt } // Update the cell - stagedCell.reset() if len(plainKey) == hph.accountKeyLen { - if err := hph.ctx.GetAccount(plainKey, stagedCell); err != nil { + update, err = hph.ctx.GetAccount(plainKey) + if err != nil { return nil, fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) } - if !stagedCell.Delete { - cell := hph.updateCell(plainKey, hashedKey) - cell.setAccountFields(stagedCell.CodeHash[:], &stagedCell.Balance, stagedCell.Nonce) - - if hph.trace { - fmt.Printf("GetAccount update key %x => balance=%d nonce=%v codeHash=%x\n", cell.accountPlainKey, &cell.Balance, cell.Nonce, cell.CodeHash) - } - } } else { - if err = hph.ctx.GetStorage(plainKey, stagedCell); err != nil { + update, err = hph.ctx.GetStorage(plainKey) + if err != nil { return nil, fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) } - if !stagedCell.Delete { - hph.updateCell(plainKey, hashedKey).setStorage(stagedCell.Storage[:stagedCell.StorageLen]) - if hph.trace { - fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedCell.Storage[:stagedCell.StorageLen]) - } - } - } - - if stagedCell.Delete { - if hph.trace { - fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) - } - hph.deleteCell(hashedKey) } + hph.updateCell(plainKey, hashedKey, update) mxKeys.Inc() } // Folding everything up to the root @@ -1484,6 +1440,7 @@ func (hph *HexPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] return nil, ctx.Err() default: } + if hph.trace { fmt.Printf("(%d/%d) key=[%x] %s hashedKey=[%x] currentKey=[%x]\n", i+1, len(updates), update.plainKey, update.String(), update.hashedKey, hph.currentKey[:hph.currentKeyLen]) @@ -1501,45 +1458,7 @@ func (hph *HexPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] } } - // Update the cell - if update.Flags == DeleteUpdate { - hph.deleteCell(update.hashedKey) - if hph.trace { - fmt.Printf("delete cell %x hash %x\n", update.plainKey, update.hashedKey) - } - } else { - cell := hph.updateCell(update.plainKey, update.hashedKey) - if hph.trace && len(update.plainKey) == hph.accountKeyLen { - fmt.Printf("GetAccount updated key %x =>", update.plainKey) - } - if update.Flags&BalanceUpdate != 0 { - if hph.trace { - fmt.Printf(" balance=%d", &update.Balance) - } - cell.Balance.Set(&update.Balance) - } - if update.Flags&NonceUpdate != 0 { - if hph.trace { - fmt.Printf(" nonce=%d", update.Nonce) - } - cell.Nonce = update.Nonce - } - if update.Flags&CodeUpdate != 0 { - if hph.trace { - fmt.Printf(" codeHash=%x", update.CodeHashOrStorage) - } - copy(cell.CodeHash[:], update.CodeHashOrStorage[:update.ValLength]) - } - if hph.trace { - fmt.Printf("\n") - } - if update.Flags&StorageUpdate != 0 { - cell.setStorage(update.CodeHashOrStorage[:update.ValLength]) - if hph.trace { - fmt.Printf("\rstorage set %x => %x\n", update.plainKey, update.CodeHashOrStorage[:update.ValLength]) - } - } - } + hph.updateCell(update.plainKey, update.hashedKey, &updates[i]) mxKeys.Inc() } @@ -1567,15 +1486,7 @@ func (hph *HexPatriciaHashed) Variant() TrieVariant { return VariantHexPatriciaT // Reset allows HexPatriciaHashed instance to be reused for the new commitment calculation func (hph *HexPatriciaHashed) Reset() { - hph.root.HashLen = 0 - hph.root.downHashedLen = 0 - hph.root.accountPlainKeyLen = 0 - hph.root.storagePlainKeyLen = 0 - hph.root.extLen = 0 - copy(hph.root.CodeHash[:], EmptyCodeHash) - hph.root.StorageLen = 0 - hph.root.Balance.Clear() - hph.root.Nonce = 0 + hph.root.reset() hph.rootTouched = false hph.rootChecked = false hph.rootPresent = true @@ -1722,16 +1633,16 @@ func (s *state) Decode(buf []byte) error { func (cell *Cell) Encode() []byte { var pos = 1 - size := pos + 5 + cell.HashLen + cell.accountPlainKeyLen + cell.storagePlainKeyLen + cell.downHashedLen + cell.extLen // max size + size := pos + 5 + cell.hashLen + cell.accountPlainKeyLen + cell.storagePlainKeyLen + cell.downHashedLen + cell.extLen // max size buf := make([]byte, size) var flags uint8 - if cell.HashLen != 0 { + if cell.hashLen != 0 { flags |= cellFlagHash - buf[pos] = byte(cell.HashLen) + buf[pos] = byte(cell.hashLen) pos++ - copy(buf[pos:pos+cell.HashLen], cell.hash[:]) - pos += cell.HashLen + copy(buf[pos:pos+cell.hashLen], cell.hash[:]) + pos += cell.hashLen } if cell.accountPlainKeyLen != 0 { flags |= cellFlagAccount @@ -1761,7 +1672,7 @@ func (cell *Cell) Encode() []byte { copy(buf[pos:pos+cell.extLen], cell.extension[:]) pos += cell.extLen //nolint } - if cell.Delete { + if cell.Deleted() { flags |= cellFlagDelete } buf[0] = flags @@ -1779,7 +1690,7 @@ const ( func (cell *Cell) Decode(buf []byte) error { if len(buf) < 1 { - return fmt.Errorf("invalid buffer size to contain Cell (at least 1 byte expected)") + return errors.New("invalid buffer size to contain Cell (at least 1 byte expected)") } cell.reset() @@ -1788,10 +1699,10 @@ func (cell *Cell) Decode(buf []byte) error { pos++ if flags&cellFlagHash != 0 { - cell.HashLen = int(buf[pos]) + cell.hashLen = int(buf[pos]) pos++ - copy(cell.hash[:], buf[pos:pos+cell.HashLen]) - pos += cell.HashLen + copy(cell.hash[:], buf[pos:pos+cell.hashLen]) + pos += cell.hashLen } if flags&cellFlagAccount != 0 { cell.accountPlainKeyLen = int(buf[pos]) @@ -1818,7 +1729,8 @@ func (cell *Cell) Decode(buf []byte) error { pos += cell.extLen //nolint } if flags&cellFlagDelete != 0 { - cell.Delete = true + log.Warn("deleted cell should not be encoded", "cell", cell.String()) + cell.Update.Flags = DeleteUpdate } return nil } @@ -1864,7 +1776,7 @@ func (hph *HexPatriciaHashed) SetState(buf []byte) error { return nil } if hph.activeRows != 0 { - return fmt.Errorf("target trie has active rows, could not reset state before fold") + return errors.New("target trie has active rows, could not reset state before fold") } var s state @@ -1888,32 +1800,37 @@ func (hph *HexPatriciaHashed) SetState(buf []byte) error { if hph.ctx == nil { panic("nil ctx") } - if err := hph.ctx.GetAccount(hph.root.accountPlainKey[:hph.root.accountPlainKeyLen], &hph.root); err != nil { + + update, err := hph.ctx.GetAccount(hph.root.accountPlainKey[:hph.root.accountPlainKeyLen]) + if err != nil { return err } + hph.root.setFromUpdate(update) } if hph.root.storagePlainKeyLen > 0 { if hph.ctx == nil { panic("nil ctx") } - if err := hph.ctx.GetStorage(hph.root.storagePlainKey[:hph.root.storagePlainKeyLen], &hph.root); err != nil { + update, err := hph.ctx.GetStorage(hph.root.storagePlainKey[:hph.root.storagePlainKeyLen]) + if err != nil { return err } + hph.root.setFromUpdate(update) //hph.root.deriveHashedKeys(0, hph.keccak, hph.accountKeyLen) } return nil } -func bytesToUint64(buf []byte) (x uint64) { - for i, b := range buf { - x = x<<8 + uint64(b) - if i == 7 { - return - } - } - return -} +//func bytesToUint64(buf []byte) (x uint64) { +// for i, b := range buf { +// x = x<<8 + uint64(b) +// if i == 7 { +// return +// } +// } +// return +//} func hexToCompact(key []byte) []byte { zeroByte, keyPos, keyLen := makeCompactZeroByte(key) @@ -2057,21 +1974,22 @@ func (uf UpdateFlags) String() string { } type Update struct { - hashedKey []byte - plainKey []byte - Flags UpdateFlags - Balance uint256.Int - Nonce uint64 - ValLength int - CodeHashOrStorage [length.Hash]byte + hashedKey []byte + plainKey []byte + CodeHash [length.Hash]byte + Storage [length.Hash]byte + StorageLen int + Flags UpdateFlags + Balance uint256.Int + Nonce uint64 } func (u *Update) Reset() { u.Flags = 0 u.Balance.Clear() u.Nonce = 0 - u.ValLength = 0 - copy(u.CodeHashOrStorage[:], EmptyCodeHash) + u.StorageLen = 0 + copy(u.CodeHash[:], EmptyCodeHash) } func (u *Update) Merge(b *Update) { @@ -2089,77 +2007,12 @@ func (u *Update) Merge(b *Update) { } if b.Flags&CodeUpdate != 0 { u.Flags |= CodeUpdate - copy(u.CodeHashOrStorage[:], b.CodeHashOrStorage[:]) - u.ValLength = b.ValLength + copy(u.CodeHash[:], b.CodeHash[:]) } if b.Flags&StorageUpdate != 0 { u.Flags |= StorageUpdate - copy(u.CodeHashOrStorage[:], b.CodeHashOrStorage[:]) - u.ValLength = b.ValLength - } -} - -func (u *Update) DecodeForStorage(enc []byte) { - //u.Reset() - - //balance := new(uint256.Int) - // - //if len(enc) > 0 { - // pos := 0 - // nonceBytes := int(enc[pos]) - // pos++ - // if nonceBytes > 0 { - // nonce := bytesToUint64(enc[pos : pos+nonceBytes]) - // if u.Nonce != nonce { - // u.Flags |= NonceUpdate - // } - // u.Nonce = nonce - // pos += nonceBytes - // } - // balanceBytes := int(enc[pos]) - // pos++ - // if balanceBytes > 0 { - // balance.SetBytes(enc[pos : pos+balanceBytes]) - // if u.Balance.Cmp(balance) != 0 { - // u.Flags |= BalanceUpdate - // } - // u.Balance.Set(balance) - // pos += balanceBytes - // } - // codeHashBytes := int(enc[pos]) - // pos++ - // - // if codeHashBytes > 0 { - // if !bytes.Equal(u.CodeHashOrStorage[:], enc[pos:pos+codeHashBytes]) { - // u.Flags |= CodeUpdate - // copy(u.CodeHashOrStorage[:], enc[pos:pos+codeHashBytes]) - // u.ValLength = length.Hash - // } - // } - //} - //return - - pos := 0 - nonceBytes := int(enc[pos]) - pos++ - if nonceBytes > 0 { - u.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) - u.Flags |= NonceUpdate - pos += nonceBytes - } - balanceBytes := int(enc[pos]) - pos++ - if balanceBytes > 0 { - u.Balance.SetBytes(enc[pos : pos+balanceBytes]) - u.Flags |= BalanceUpdate - pos += balanceBytes - } - codeHashBytes := int(enc[pos]) - pos++ - if codeHashBytes > 0 { - copy(u.CodeHashOrStorage[:], enc[pos:pos+codeHashBytes]) - u.ValLength = length.Hash - u.Flags |= CodeUpdate + copy(u.Storage[:], b.Storage[:b.StorageLen]) + u.StorageLen = b.StorageLen } } @@ -2174,32 +2027,38 @@ func (u *Update) Encode(buf []byte, numBuf []byte) []byte { buf = append(buf, numBuf[:n]...) } if u.Flags&CodeUpdate != 0 { - buf = append(buf, u.CodeHashOrStorage[:]...) + buf = append(buf, u.CodeHash[:]...) } if u.Flags&StorageUpdate != 0 { - n := binary.PutUvarint(numBuf, uint64(u.ValLength)) + n := binary.PutUvarint(numBuf, uint64(u.StorageLen)) buf = append(buf, numBuf[:n]...) - if u.ValLength > 0 { - buf = append(buf, u.CodeHashOrStorage[:u.ValLength]...) + if u.StorageLen > 0 { + buf = append(buf, u.Storage[:u.StorageLen]...) } } return buf } +func (u *Update) Deleted() bool { + return u.Flags == DeleteUpdate +} + func (u *Update) Decode(buf []byte, pos int) (int, error) { if len(buf) < pos+1 { - return 0, fmt.Errorf("decode Update: buffer too small for flags") + return 0, errors.New("decode Update: buffer too small for flags") } + u.Reset() + u.Flags = UpdateFlags(buf[pos]) pos++ if u.Flags&BalanceUpdate != 0 { if len(buf) < pos+1 { - return 0, fmt.Errorf("decode Update: buffer too small for balance len") + return 0, errors.New("decode Update: buffer too small for balance len") } balanceLen := int(buf[pos]) pos++ if len(buf) < pos+balanceLen { - return 0, fmt.Errorf("decode Update: buffer too small for balance") + return 0, errors.New("decode Update: buffer too small for balance") } u.Balance.SetBytes(buf[pos : pos+balanceLen]) pos += balanceLen @@ -2208,36 +2067,35 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { var n int u.Nonce, n = binary.Uvarint(buf[pos:]) if n == 0 { - return 0, fmt.Errorf("decode Update: buffer too small for nonce") + return 0, errors.New("decode Update: buffer too small for nonce") } if n < 0 { - return 0, fmt.Errorf("decode Update: nonce overflow") + return 0, errors.New("decode Update: nonce overflow") } pos += n } if u.Flags&CodeUpdate != 0 { if len(buf) < pos+length.Hash { - return 0, fmt.Errorf("decode Update: buffer too small for codeHash") + return 0, errors.New("decode Update: buffer too small for codeHash") } - copy(u.CodeHashOrStorage[:], buf[pos:pos+32]) + copy(u.CodeHash[:], buf[pos:pos+32]) pos += length.Hash - u.ValLength = length.Hash } if u.Flags&StorageUpdate != 0 { l, n := binary.Uvarint(buf[pos:]) if n == 0 { - return 0, fmt.Errorf("decode Update: buffer too small for storage len") + return 0, errors.New("decode Update: buffer too small for storage len") } if n < 0 { - return 0, fmt.Errorf("decode Update: storage pos overflow") + return 0, errors.New("decode Update: storage pos overflow") } pos += n if len(buf) < pos+int(l) { - return 0, fmt.Errorf("decode Update: buffer too small for storage") + return 0, errors.New("decode Update: buffer too small for storage") } - u.ValLength = int(l) - copy(u.CodeHashOrStorage[:], buf[pos:pos+int(l)]) - pos += int(l) + u.StorageLen = int(l) + copy(u.Storage[:], buf[pos:pos+u.StorageLen]) + pos += u.StorageLen } return pos, nil } @@ -2245,6 +2103,9 @@ func (u *Update) Decode(buf []byte, pos int) (int, error) { func (u *Update) String() string { var sb strings.Builder sb.WriteString(fmt.Sprintf("Flags: [%s]", u.Flags)) + if u.Deleted() { + sb.WriteString(", DELETED") + } if u.Flags&BalanceUpdate != 0 { sb.WriteString(fmt.Sprintf(", Balance: [%d]", &u.Balance)) } @@ -2252,10 +2113,10 @@ func (u *Update) String() string { sb.WriteString(fmt.Sprintf(", Nonce: [%d]", u.Nonce)) } if u.Flags&CodeUpdate != 0 { - sb.WriteString(fmt.Sprintf(", CodeHash: [%x]", u.CodeHashOrStorage)) + sb.WriteString(fmt.Sprintf(", CodeHash: [%x]", u.CodeHash)) } if u.Flags&StorageUpdate != 0 { - sb.WriteString(fmt.Sprintf(", Storage: [%x]", u.CodeHashOrStorage[:u.ValLength])) + sb.WriteString(fmt.Sprintf(", Storage: [%x]", u.Storage[:u.StorageLen])) } return sb.String() } diff --git a/erigon-lib/commitment/hex_patricia_hashed_test.go b/erigon-lib/commitment/hex_patricia_hashed_test.go index da27e2e09d4..9741f08c70f 100644 --- a/erigon-lib/commitment/hex_patricia_hashed_test.go +++ b/erigon-lib/commitment/hex_patricia_hashed_test.go @@ -503,9 +503,11 @@ func Test_HexPatriciaHashed_Sepolia(t *testing.T) { func Test_Cell_EncodeDecode(t *testing.T) { rnd := rand.New(rand.NewSource(time.Now().UnixMilli())) first := &Cell{ - Nonce: rnd.Uint64(), - HashLen: length.Hash, - StorageLen: rnd.Intn(33), + //Nonce: rnd.Uint64(), + //StorageLen: rnd.Intn(33), + //CodeHash: [32]byte{}, + //Storage: [32]byte{}, + hashLen: length.Hash, accountPlainKeyLen: length.Addr, storagePlainKeyLen: length.Addr + length.Hash, downHashedLen: rnd.Intn(129), @@ -514,8 +516,6 @@ func Test_Cell_EncodeDecode(t *testing.T) { extension: [64]byte{}, storagePlainKey: [52]byte{}, hash: [32]byte{}, - CodeHash: [32]byte{}, - Storage: [32]byte{}, accountPlainKey: [20]byte{}, } b := uint256.NewInt(rnd.Uint64()) @@ -528,9 +528,9 @@ func Test_Cell_EncodeDecode(t *testing.T) { rnd.Read(first.hash[:]) rnd.Read(first.CodeHash[:]) rnd.Read(first.Storage[:first.StorageLen]) - if rnd.Intn(100) > 50 { - first.Delete = true - } + //if rnd.Intn(100) > 50 { + // first.Delete = true + //} second := &Cell{} second.Decode(first.Encode()) @@ -539,13 +539,13 @@ func Test_Cell_EncodeDecode(t *testing.T) { require.EqualValues(t, first.downHashedKey[:], second.downHashedKey[:]) require.EqualValues(t, first.accountPlainKeyLen, second.accountPlainKeyLen) require.EqualValues(t, first.storagePlainKeyLen, second.storagePlainKeyLen) - require.EqualValues(t, first.HashLen, second.HashLen) + require.EqualValues(t, first.hashLen, second.hashLen) require.EqualValues(t, first.accountPlainKey[:], second.accountPlainKey[:]) require.EqualValues(t, first.storagePlainKey[:], second.storagePlainKey[:]) require.EqualValues(t, first.hash[:], second.hash[:]) require.EqualValues(t, first.extension[:first.extLen], second.extension[:second.extLen]) // encode doesn't code Nonce, Balance, CodeHash and Storage - require.EqualValues(t, first.Delete, second.Delete) + //require.EqualValues(t, first.Delete, second.Delete) } func Test_HexPatriciaHashed_StateEncode(t *testing.T) { @@ -966,3 +966,162 @@ func Test_HexPatriciaHashed_ProcessUpdates_UniqueRepresentationInTheMiddle(t *te "expected equal roots, got sequential [%v] != batch [%v]", hex.EncodeToString(roots[len(roots)-1]), hex.EncodeToString(batchRoot)) require.Lenf(t, batchRoot, 32, "root hash length should be equal to 32 bytes") } + +func TestUpdate_EncodeDecode(t *testing.T) { + updates := []Update{ + {Flags: BalanceUpdate, Balance: *uint256.NewInt(123), CodeHash: [32]byte(EmptyCodeHash)}, + {Flags: BalanceUpdate | NonceUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + {Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, + CodeHash: [length.Hash]byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20}}, + {Flags: StorageUpdate, Storage: [length.Hash]byte{0x21, 0x22, 0x23, 0x24}, StorageLen: 4, CodeHash: [32]byte(EmptyCodeHash)}, + {Flags: DeleteUpdate, CodeHash: [32]byte(EmptyCodeHash)}, + } + + var numBuf [10]byte + for i, update := range updates { + encoded := update.Encode(nil, numBuf[:]) + + decoded := Update{} + n, err := decoded.Decode(encoded, 0) + require.NoError(t, err, i) + require.Equal(t, len(encoded), n, i) + + require.Equal(t, update.Flags, decoded.Flags, i) + require.Equal(t, update.Balance, decoded.Balance, i) + require.Equal(t, update.Nonce, decoded.Nonce, i) + require.Equal(t, update.CodeHash, decoded.CodeHash, i) + require.Equal(t, update.Storage, decoded.Storage, i) + require.Equal(t, update.StorageLen, decoded.StorageLen, i) + } +} + +func TestUpdate_Merge(t *testing.T) { + type tcase struct { + a, b, e Update + } + + updates := []tcase{ + { + a: Update{Flags: BalanceUpdate, Balance: *uint256.NewInt(123), CodeHash: [32]byte(EmptyCodeHash)}, + b: Update{Flags: BalanceUpdate | NonceUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + e: Update{Flags: BalanceUpdate | NonceUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + }, + { + a: Update{Flags: BalanceUpdate | NonceUpdate, Balance: *uint256.NewInt(45639015), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + b: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(1000000), Nonce: 547, CodeHash: [32]byte(EmptyCodeHash)}, + e: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(1000000), Nonce: 547, CodeHash: [32]byte(EmptyCodeHash)}, + }, + { + a: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(4568314), Nonce: 123, CodeHash: [32]byte(EmptyCodeHash)}, + b: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(45639015), Nonce: 124, + CodeHash: [length.Hash]byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20}}, + e: Update{Flags: BalanceUpdate | NonceUpdate | CodeUpdate, Balance: *uint256.NewInt(45639015), Nonce: 124, CodeHash: [length.Hash]byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20}}, + }, + { + a: Update{Flags: StorageUpdate, Storage: [length.Hash]byte{0x21, 0x22, 0x23, 0x24}, StorageLen: 4, CodeHash: [32]byte(EmptyCodeHash)}, + b: Update{Flags: DeleteUpdate, CodeHash: [32]byte(EmptyCodeHash)}, + e: Update{Flags: DeleteUpdate, CodeHash: [32]byte(EmptyCodeHash)}, + }, + } + + var numBuf [10]byte + for i, tc := range updates { + tc.a.Merge(&tc.b) + encA := tc.a.Encode(nil, numBuf[:]) + encE := tc.e.Encode(nil, numBuf[:]) + require.EqualValues(t, encE, encA, i) + } +} + +func TestCell_setFromUpdate(t *testing.T) { + rnd := rand.New(rand.NewSource(42)) + + b := uint256.NewInt(rnd.Uint64()) + update := Update{} + update.Reset() + + update.Balance = *b + update.Nonce = rand.Uint64() + rnd.Read(update.CodeHash[:]) + update.Flags = BalanceUpdate | NonceUpdate | CodeUpdate + + target := new(Cell) + target.setFromUpdate(&update) + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, update.CodeHash, target.CodeHash) + require.EqualValues(t, 0, target.StorageLen) + + update.Reset() + + update.Balance.SetUint64(0) + update.Nonce = rand.Uint64() + rnd.Read(update.CodeHash[:]) + update.Flags = NonceUpdate | CodeUpdate + + target.reset() + target.setFromUpdate(&update) + + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, update.CodeHash, target.CodeHash) + require.EqualValues(t, 0, target.StorageLen) + + update.Reset() + + update.Balance.SetUint64(rnd.Uint64() + rnd.Uint64()) + update.Nonce = rand.Uint64() + rnd.Read(update.Storage[:]) + update.StorageLen = len(update.Storage) + update.Flags = NonceUpdate | BalanceUpdate | StorageUpdate + + target.reset() + target.setFromUpdate(&update) + + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, update.CodeHash, target.CodeHash) + require.EqualValues(t, update.StorageLen, target.StorageLen) + require.EqualValues(t, update.Storage[:update.StorageLen], target.Storage[:target.StorageLen]) + + update.Reset() + + update.Balance.SetUint64(rnd.Uint64() + rnd.Uint64()) + update.Nonce = rand.Uint64() + rnd.Read(update.Storage[:rnd.Intn(len(update.Storage))]) + update.StorageLen = len(update.Storage) + update.Flags = NonceUpdate | BalanceUpdate | StorageUpdate + + target.reset() + target.setFromUpdate(&update) + + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, update.CodeHash, target.CodeHash) + require.EqualValues(t, EmptyCodeHashArray[:], target.CodeHash) + require.EqualValues(t, update.StorageLen, target.StorageLen) + require.EqualValues(t, update.Storage[:update.StorageLen], target.Storage[:target.StorageLen]) + + update.Reset() + update.Flags = DeleteUpdate + target.reset() + target.setFromUpdate(&update) + + require.True(t, update.Balance.Eq(&target.Balance)) + require.EqualValues(t, update.Nonce, target.Nonce) + require.EqualValues(t, EmptyCodeHashArray[:], target.CodeHash) + require.EqualValues(t, update.StorageLen, target.StorageLen) + require.EqualValues(t, update.Storage[:update.StorageLen], target.Storage[:target.StorageLen]) +} diff --git a/erigon-lib/commitment/patricia_state_mock_test.go b/erigon-lib/commitment/patricia_state_mock_test.go index 7004a6ffcb4..b7641ea1783 100644 --- a/erigon-lib/commitment/patricia_state_mock_test.go +++ b/erigon-lib/commitment/patricia_state_mock_test.go @@ -19,6 +19,7 @@ package commitment import ( "encoding/binary" "encoding/hex" + "errors" "fmt" "slices" "testing" @@ -65,90 +66,67 @@ func (ms *MockState) GetBranch(prefix []byte) ([]byte, uint64, error) { return nil, 0, nil } -func (ms *MockState) GetAccount(plainKey []byte, cell *Cell) error { +func (ms *MockState) GetAccount(plainKey []byte) (*Update, error) { exBytes, ok := ms.sm[string(plainKey[:])] if !ok { ms.t.Logf("GetAccount not found key [%x]", plainKey) - cell.Delete = true - return nil + u := new(Update) + u.Flags = DeleteUpdate + return u, nil } + var ex Update pos, err := ex.Decode(exBytes, 0) if err != nil { ms.t.Fatalf("GetAccount decode existing [%x], bytes: [%x]: %v", plainKey, exBytes, err) - return nil + return nil, nil } if pos != len(exBytes) { ms.t.Fatalf("GetAccount key [%x] leftover %d bytes in [%x], comsumed %x", plainKey, len(exBytes)-pos, exBytes, pos) - return nil + return nil, nil } if ex.Flags&StorageUpdate != 0 { ms.t.Logf("GetAccount reading storage item for key [%x]", plainKey) - return fmt.Errorf("storage read by GetAccount") + return nil, errors.New("storage read by GetAccount") } if ex.Flags&DeleteUpdate != 0 { ms.t.Fatalf("GetAccount reading deleted account for key [%x]", plainKey) - return nil + return nil, nil } - if ex.Flags&BalanceUpdate != 0 { - cell.Balance.Set(&ex.Balance) - } else { - cell.Balance.Clear() - } - if ex.Flags&NonceUpdate != 0 { - cell.Nonce = ex.Nonce - } else { - cell.Nonce = 0 - } - if ex.Flags&CodeUpdate != 0 { - copy(cell.CodeHash[:], ex.CodeHashOrStorage[:]) - } else { - copy(cell.CodeHash[:], EmptyCodeHash) - } - return nil + return &ex, nil } -func (ms *MockState) GetStorage(plainKey []byte, cell *Cell) error { +func (ms *MockState) GetStorage(plainKey []byte) (*Update, error) { exBytes, ok := ms.sm[string(plainKey[:])] if !ok { ms.t.Logf("GetStorage not found key [%x]", plainKey) - cell.Delete = true - return nil + u := new(Update) + u.Flags = DeleteUpdate + return u, nil } var ex Update pos, err := ex.Decode(exBytes, 0) if err != nil { ms.t.Fatalf("GetStorage decode existing [%x], bytes: [%x]: %v", plainKey, exBytes, err) - return nil + return nil, nil } if pos != len(exBytes) { ms.t.Fatalf("GetStorage key [%x] leftover bytes in [%x], comsumed %x", plainKey, exBytes, pos) - return nil + return nil, nil } if ex.Flags&BalanceUpdate != 0 { ms.t.Logf("GetStorage reading balance for key [%x]", plainKey) - return nil + return nil, nil } if ex.Flags&NonceUpdate != 0 { ms.t.Fatalf("GetStorage reading nonce for key [%x]", plainKey) - return nil + return nil, nil } if ex.Flags&CodeUpdate != 0 { ms.t.Fatalf("GetStorage reading codeHash for key [%x]", plainKey) - return nil - } - if ex.Flags&DeleteUpdate != 0 { - ms.t.Fatalf("GetStorage reading deleted item for key [%x]", plainKey) - return nil + return nil, nil } - if ex.Flags&StorageUpdate != 0 { - copy(cell.Storage[:], ex.CodeHashOrStorage[:]) - cell.StorageLen = len(ex.CodeHashOrStorage) - } else { - cell.StorageLen = 0 - cell.Storage = [length.Hash]byte{} - } - return nil + return &ex, nil } func (ms *MockState) applyPlainUpdates(plainKeys [][]byte, updates []Update) error { @@ -176,12 +154,12 @@ func (ms *MockState) applyPlainUpdates(plainKeys [][]byte, updates []Update) err } if update.Flags&CodeUpdate != 0 { ex.Flags |= CodeUpdate - copy(ex.CodeHashOrStorage[:], update.CodeHashOrStorage[:]) + copy(ex.CodeHash[:], update.CodeHash[:]) } if update.Flags&StorageUpdate != 0 { ex.Flags |= StorageUpdate - copy(ex.CodeHashOrStorage[:], update.CodeHashOrStorage[:]) - ex.ValLength = update.ValLength + copy(ex.Storage[:], update.Storage[:]) + ex.StorageLen = update.StorageLen } ms.sm[string(key)] = ex.Encode(nil, ms.numBuf[:]) } else { @@ -421,7 +399,7 @@ func (ub *UpdateBuilder) Build() (plainKeys [][]byte, updates []Update) { } if codeHash, ok := ub.codeHashes[string(key)]; ok { u.Flags |= CodeUpdate - copy(u.CodeHashOrStorage[:], codeHash[:]) + copy(u.CodeHash[:], codeHash[:]) } if _, del := ub.deletes[string(key)]; del { u.Flags = DeleteUpdate @@ -437,9 +415,9 @@ func (ub *UpdateBuilder) Build() (plainKeys [][]byte, updates []Update) { if sm, ok1 := ub.storages[string(key)]; ok1 { if storage, ok2 := sm[string(key2)]; ok2 { u.Flags |= StorageUpdate - u.CodeHashOrStorage = [length.Hash]byte{} - u.ValLength = len(storage) - copy(u.CodeHashOrStorage[:], storage) + u.CodeHash = [length.Hash]byte{} + u.StorageLen = len(storage) + copy(u.CodeHash[:], storage) } } } diff --git a/erigon-lib/common/datadir/dirs.go b/erigon-lib/common/datadir/dirs.go index 037d8041100..d8020dd6b62 100644 --- a/erigon-lib/common/datadir/dirs.go +++ b/erigon-lib/common/datadir/dirs.go @@ -47,6 +47,7 @@ type Dirs struct { Nodes string CaplinBlobs string CaplinIndexing string + CaplinLatest string } func New(datadir string) Dirs { @@ -75,11 +76,12 @@ func New(datadir string) Dirs { Nodes: filepath.Join(datadir, "nodes"), CaplinBlobs: filepath.Join(datadir, "caplin", "blobs"), CaplinIndexing: filepath.Join(datadir, "caplin", "indexing"), + CaplinLatest: filepath.Join(datadir, "caplin", "latest"), } dir.MustExist(dirs.Chaindata, dirs.Tmp, dirs.SnapIdx, dirs.SnapHistory, dirs.SnapDomain, dirs.SnapAccessors, - dirs.Downloader, dirs.TxPool, dirs.Nodes, dirs.CaplinBlobs, dirs.CaplinIndexing) + dirs.Downloader, dirs.TxPool, dirs.Nodes, dirs.CaplinBlobs, dirs.CaplinIndexing, dirs.CaplinLatest) return dirs } diff --git a/erigon-lib/common/dbg/experiments.go b/erigon-lib/common/dbg/experiments.go index 593fe7bc455..0250f70d41b 100644 --- a/erigon-lib/common/dbg/experiments.go +++ b/erigon-lib/common/dbg/experiments.go @@ -48,6 +48,9 @@ var ( // force skipping of any non-Erigon2 .torrent files DownloaderOnlyBlocks = EnvBool("DOWNLOADER_ONLY_BLOCKS", false) + // allows to collect reading metrics for kv by file level + KVReadLevelledMetrics = EnvBool("KV_READ_METRICS", false) + // run prune on flush with given timeout. If timeout is 0, no prune on flush will be performed PruneOnFlushTimeout = EnvDuration("PRUNE_ON_FLUSH_TIMEOUT", time.Duration(0)) diff --git a/erigon-lib/common/metrics/metrics_enabled.go b/erigon-lib/common/metrics/metrics_enabled.go index 460504e25a6..f0e20954f24 100644 --- a/erigon-lib/common/metrics/metrics_enabled.go +++ b/erigon-lib/common/metrics/metrics_enabled.go @@ -28,6 +28,6 @@ type Config struct { //nolint:maligned var DefaultConfig = Config{ Enabled: false, EnabledExpensive: false, - HTTP: "0.0.0.0", + HTTP: "127.0.0.1", Port: 6060, } diff --git a/erigon-lib/diagnostics/client.go b/erigon-lib/diagnostics/client.go index eac8cf3d907..a7fec652caa 100644 --- a/erigon-lib/diagnostics/client.go +++ b/erigon-lib/diagnostics/client.go @@ -245,7 +245,7 @@ func ReadSavedData(db kv.RoDB) (hinfo HardwareInfo, ssinfo []SyncStage, snpdwl S } var ramInfo RAMInfo - var cpuInfo CPUInfo + var cpuInfo []CPUInfo var diskInfo DiskInfo ParseData(ramBytes, &ramInfo) ParseData(cpuBytes, &cpuInfo) diff --git a/erigon-lib/diagnostics/entities.go b/erigon-lib/diagnostics/entities.go index 08bbeca5f20..dc850e2fb50 100644 --- a/erigon-lib/diagnostics/entities.go +++ b/erigon-lib/diagnostics/entities.go @@ -22,23 +22,6 @@ import ( "golang.org/x/exp/maps" ) -type SyncStageType string - -const ( - Snapshots SyncStageType = "Snapshots" - BlockHashes SyncStageType = "BlockHashes" - Senders SyncStageType = "Senders" - Execution SyncStageType = "Execution" - HashState SyncStageType = "HashState" - IntermediateHashes SyncStageType = "IntermediateHashes" - CallTraces SyncStageType = "CallTraces" - AccountHistoryIndex SyncStageType = "AccountHistoryIndex" - StorageHistoryIndex SyncStageType = "StorageHistoryIndex" - LogIndex SyncStageType = "LogIndex" - TxLookup SyncStageType = "TxLookup" - Finish SyncStageType = "Finish" -) - type PeerStatistics struct { PeerType string BytesIn uint64 @@ -148,10 +131,6 @@ type SnapshotSegmentIndexingStatistics struct { Sys uint64 `json:"sys"` } -type SnapshotSegmentIndexingFinishedUpdate struct { - SegmentName string `json:"segmentName"` -} - type SnapshotFillDBStatistics struct { Stages []SnapshotFillDBStage `json:"stages"` } @@ -172,26 +151,41 @@ type SnapshoFilesList struct { } type HardwareInfo struct { - Disk DiskInfo `json:"disk"` - RAM RAMInfo `json:"ram"` - CPU CPUInfo `json:"cpu"` + Disk DiskInfo `json:"disk"` + RAM RAMInfo `json:"ram"` + CPU []CPUInfo `json:"cpu"` } type RAMInfo struct { - Total uint64 `json:"total"` - Free uint64 `json:"free"` + Total uint64 `json:"total"` + Available uint64 `json:"available"` + Used uint64 `json:"used"` + UsedPercent float64 `json:"usedPercent"` } type DiskInfo struct { - FsType string `json:"fsType"` - Total uint64 `json:"total"` - Free uint64 `json:"free"` + FsType string `json:"fsType"` + Total uint64 `json:"total"` + Free uint64 `json:"free"` + MountPoint string `json:"mountPoint"` + Device string `json:"device"` + Details string `json:"details"` } type CPUInfo struct { - Cores int `json:"cores"` - ModelName string `json:"modelName"` - Mhz float64 `json:"mhz"` + CPU int32 `json:"cpu"` + VendorID string `json:"vendorId"` + Family string `json:"family"` + Model string `json:"model"` + Stepping int32 `json:"stepping"` + PhysicalID string `json:"physicalId"` + CoreID string `json:"coreId"` + Cores int32 `json:"cores"` + ModelName string `json:"modelName"` + Mhz float64 `json:"mhz"` + CacheSize int32 `json:"cacheSize"` + Flags []string `json:"flags"` + Microcode string `json:"microcode"` } type BlockHeadersUpdate struct { @@ -331,10 +325,6 @@ func (ti SnapshotIndexingStatistics) Type() Type { return TypeOf(ti) } -func (ti SnapshotSegmentIndexingFinishedUpdate) Type() Type { - return TypeOf(ti) -} - func (ti PeerStatisticMsgUpdate) Type() Type { return TypeOf(ti) } diff --git a/erigon-lib/diagnostics/snapshots.go b/erigon-lib/diagnostics/snapshots.go index 78009e86327..7fb2a39bd32 100644 --- a/erigon-lib/diagnostics/snapshots.go +++ b/erigon-lib/diagnostics/snapshots.go @@ -37,92 +37,66 @@ var ( func (d *DiagnosticClient) setupSnapshotDiagnostics(rootCtx context.Context) { d.runSnapshotListener(rootCtx) d.runSegmentDownloadingListener(rootCtx) - d.runSegmentIndexingListener(rootCtx) - d.runSegmentIndexingFinishedListener(rootCtx) d.runSnapshotFilesListListener(rootCtx) + d.runSegmentIndexingListener(rootCtx) d.runFileDownloadedListener(rootCtx) d.runFillDBListener(rootCtx) } -func (d *DiagnosticClient) runSnapshotListener(rootCtx context.Context) { +func (d *DiagnosticClient) runFillDBListener(rootCtx context.Context) { go func() { - ctx, ch, closeChannel := Context[SnapshotDownloadStatistics](rootCtx, 1) + ctx, ch, closeChannel := Context[SnapshotFillDBStageUpdate](rootCtx, 1) defer closeChannel() - StartProviders(ctx, TypeOf(SnapshotDownloadStatistics{}), log.Root()) + StartProviders(ctx, TypeOf(SnapshotFillDBStageUpdate{}), log.Root()) for { select { case <-rootCtx.Done(): return case info := <-ch: + d.SetFillDBInfo(info.Stage) - d.mu.Lock() - d.syncStats.SnapshotDownload.Downloaded = info.Downloaded - d.syncStats.SnapshotDownload.Total = info.Total - d.syncStats.SnapshotDownload.TotalTime = info.TotalTime - d.syncStats.SnapshotDownload.DownloadRate = info.DownloadRate - d.syncStats.SnapshotDownload.UploadRate = info.UploadRate - d.syncStats.SnapshotDownload.Peers = info.Peers - d.syncStats.SnapshotDownload.Files = info.Files - d.syncStats.SnapshotDownload.Connections = info.Connections - d.syncStats.SnapshotDownload.Alloc = info.Alloc - d.syncStats.SnapshotDownload.Sys = info.Sys - d.syncStats.SnapshotDownload.DownloadFinished = info.DownloadFinished - d.syncStats.SnapshotDownload.TorrentMetadataReady = info.TorrentMetadataReady - d.mu.Unlock() - - downloadedPercent := GetShanpshotsPercentDownloaded(info.Downloaded, info.Total, info.TorrentMetadataReady, info.Files) - remainingBytes := info.Total - info.Downloaded - downloadTimeLeft := CalculateTime(remainingBytes, info.DownloadRate) - totalDownloadTimeString := time.Duration(info.TotalTime) * time.Second + totalTimeString := time.Duration(info.TimeElapsed) * time.Second d.UpdateSnapshotStageStats(SyncStageStats{ - TimeElapsed: totalDownloadTimeString.String(), - TimeLeft: downloadTimeLeft, - Progress: downloadedPercent, - }, "Downloading snapshots") - - if info.DownloadFinished { - d.SaveData() - return - } + TimeElapsed: totalTimeString.String(), + TimeLeft: "unknown", + Progress: fmt.Sprintf("%d%%", (info.Stage.Current*100)/info.Stage.Total), + }, "Fill DB from snapshots") + d.SaveSnapshotStageStatsToDB() } } }() } -func GetShanpshotsPercentDownloaded(downloaded uint64, total uint64, torrentMetadataReady int32, files int32) string { - if torrentMetadataReady < files { - return "calculating..." - } +func (d *DiagnosticClient) SetFillDBInfo(info SnapshotFillDBStage) { + d.mu.Lock() + defer d.mu.Unlock() - percent := float32(downloaded) / float32(total/100) + d.setFillDBInfo(info) +} - if percent > 100 { - percent = 100 - } +func (d *DiagnosticClient) setFillDBInfo(info SnapshotFillDBStage) { + if d.syncStats.SnapshotFillDB.Stages == nil { + d.syncStats.SnapshotFillDB.Stages = []SnapshotFillDBStage{info} + } else { - return fmt.Sprintf("%.2f%%", percent) + for idx, stg := range d.syncStats.SnapshotFillDB.Stages { + if stg.StageName == info.StageName { + d.syncStats.SnapshotFillDB.Stages[idx] = info + break + } + } + } } -func (d *DiagnosticClient) UpdateSnapshotStageStats(stats SyncStageStats, subStageInfo string) { +func (d *DiagnosticClient) SaveSnapshotStageStatsToDB() { d.mu.Lock() defer d.mu.Unlock() - d.updateSnapshotStageStats(stats, subStageInfo) + d.saveSnapshotStageStatsToDB() } -func (d *DiagnosticClient) updateSnapshotStageStats(stats SyncStageStats, subStageInfo string) { - idxs := d.getCurrentSyncIdxs() - if idxs.Stage == -1 || idxs.SubStage == -1 { - log.Debug("[Diagnostics] Can't find running stage or substage while updating Snapshots stage stats.", "stages:", d.syncStages, "stats:", stats, "subStageInfo:", subStageInfo) - return - } - - d.syncStages[idxs.Stage].SubStages[idxs.SubStage].Stats = stats -} func (d *DiagnosticClient) saveSnapshotStageStatsToDB() { - d.mu.Lock() - defer d.mu.Unlock() err := d.db.Update(d.ctx, func(tx kv.RwTx) error { err := SnapshotFillDBUpdater(d.syncStats.SnapshotFillDB)(tx) if err != nil { @@ -141,300 +115,18 @@ func (d *DiagnosticClient) saveSnapshotStageStatsToDB() { } } -func (d *DiagnosticClient) runSegmentDownloadingListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SegmentDownloadStatistics](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SegmentDownloadStatistics{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.mu.Lock() - if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { - d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} - } - - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name]; ok { - val.TotalBytes = info.TotalBytes - val.DownloadedBytes = info.DownloadedBytes - val.Webseeds = info.Webseeds - val.Peers = info.Peers - - d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = info - } - d.mu.Unlock() - } - } - }() -} - -func (d *DiagnosticClient) runSegmentIndexingListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SnapshotIndexingStatistics](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SnapshotIndexingStatistics{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.addOrUpdateSegmentIndexingState(info) - indexingFinished := d.UpdateIndexingStatus() - if indexingFinished { - d.SaveData() - return - } - } - } - }() -} - -func (d *DiagnosticClient) runSegmentIndexingFinishedListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SnapshotSegmentIndexingFinishedUpdate](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SnapshotSegmentIndexingFinishedUpdate{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.mu.Lock() - found := false - for i := range d.syncStats.SnapshotIndexing.Segments { - if d.syncStats.SnapshotIndexing.Segments[i].SegmentName == info.SegmentName { - found = true - d.syncStats.SnapshotIndexing.Segments[i].Percent = 100 - } - } - - if !found { - d.syncStats.SnapshotIndexing.Segments = append(d.syncStats.SnapshotIndexing.Segments, SnapshotSegmentIndexingStatistics{ - SegmentName: info.SegmentName, - Percent: 100, - Alloc: 0, - Sys: 0, - }) - } - - d.mu.Unlock() - - d.UpdateIndexingStatus() - } - } - }() -} - -func (d *DiagnosticClient) UpdateIndexingStatus() (indexingFinished bool) { - totalProgressPercent := 0 - d.mu.Lock() - defer d.mu.Unlock() - - for _, seg := range d.syncStats.SnapshotIndexing.Segments { - totalProgressPercent += seg.Percent - } - - totalProgress := totalProgressPercent / len(d.syncStats.SnapshotIndexing.Segments) - - d.updateSnapshotStageStats(SyncStageStats{ - TimeElapsed: SecondsToHHMMString(uint64(d.syncStats.SnapshotIndexing.TimeElapsed)), - TimeLeft: "unknown", - Progress: fmt.Sprintf("%d%%", totalProgress), - }, "Indexing snapshots") - - if totalProgress >= 100 { - d.syncStats.SnapshotIndexing.IndexingFinished = true - } - return d.syncStats.SnapshotIndexing.IndexingFinished -} - -func (d *DiagnosticClient) addOrUpdateSegmentIndexingState(upd SnapshotIndexingStatistics) { - d.mu.Lock() - defer d.mu.Unlock() - if d.syncStats.SnapshotIndexing.Segments == nil { - d.syncStats.SnapshotIndexing.Segments = []SnapshotSegmentIndexingStatistics{} - } - - for i := range upd.Segments { - found := false - for j := range d.syncStats.SnapshotIndexing.Segments { - if d.syncStats.SnapshotIndexing.Segments[j].SegmentName == upd.Segments[i].SegmentName { - d.syncStats.SnapshotIndexing.Segments[j].Percent = upd.Segments[i].Percent - d.syncStats.SnapshotIndexing.Segments[j].Alloc = upd.Segments[i].Alloc - d.syncStats.SnapshotIndexing.Segments[j].Sys = upd.Segments[i].Sys - found = true - break - } - } - - if !found { - d.syncStats.SnapshotIndexing.Segments = append(d.syncStats.SnapshotIndexing.Segments, upd.Segments[i]) - } - } - - d.syncStats.SnapshotIndexing.TimeElapsed = upd.TimeElapsed -} - -func (d *DiagnosticClient) runSnapshotFilesListListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SnapshoFilesList](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SnapshoFilesList{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.mu.Lock() - d.snapshotFileList = info - d.mu.Unlock() - - if len(info.Files) > 0 { - return - } - } - } - }() -} - -func (d *DiagnosticClient) runFileDownloadedListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[FileDownloadedStatisticsUpdate](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(FileDownloadedStatisticsUpdate{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.mu.Lock() - - if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { - d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} - } - - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName]; ok { - val.DownloadedStats = FileDownloadedStatistics{ - TimeTook: info.TimeTook, - AverageRate: info.AverageRate, - } - - d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName] = SegmentDownloadStatistics{ - Name: info.FileName, - TotalBytes: 0, - DownloadedBytes: 0, - Webseeds: nil, - Peers: nil, - DownloadedStats: FileDownloadedStatistics{ - TimeTook: info.TimeTook, - AverageRate: info.AverageRate, - }, - } - } - - d.mu.Unlock() - } - } - }() -} - -func (d *DiagnosticClient) UpdateFileDownloadedStatistics(downloadedInfo *FileDownloadedStatisticsUpdate, downloadingInfo *SegmentDownloadStatistics) { - d.mu.Lock() - defer d.mu.Unlock() - if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { - d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} - } - - if downloadedInfo != nil { - dwStats := FileDownloadedStatistics{ - TimeTook: downloadedInfo.TimeTook, - AverageRate: downloadedInfo.AverageRate, - } - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName]; ok { - val.DownloadedStats = dwStats - - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = SegmentDownloadStatistics{ - Name: downloadedInfo.FileName, - TotalBytes: 0, - DownloadedBytes: 0, - Webseeds: make([]SegmentPeer, 0), - Peers: make([]SegmentPeer, 0), - DownloadedStats: dwStats, - } - } - } else { - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name]; ok { - val.TotalBytes = downloadingInfo.TotalBytes - val.DownloadedBytes = downloadingInfo.DownloadedBytes - val.Webseeds = downloadingInfo.Webseeds - val.Peers = downloadingInfo.Peers - - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = *downloadingInfo - } - } -} - -func (d *DiagnosticClient) runFillDBListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SnapshotFillDBStageUpdate](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SnapshotFillDBStageUpdate{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.SetFillDBInfo(info.Stage) - - totalTimeString := time.Duration(info.TimeElapsed) * time.Second - - d.UpdateSnapshotStageStats(SyncStageStats{ - TimeElapsed: totalTimeString.String(), - TimeLeft: "unknown", - Progress: fmt.Sprintf("%d%%", (info.Stage.Current*100)/info.Stage.Total), - }, "Fill DB from snapshots") - d.saveSnapshotStageStatsToDB() - } - } - }() -} - -func (d *DiagnosticClient) SetFillDBInfo(info SnapshotFillDBStage) { - d.mu.Lock() - defer d.mu.Unlock() - - if d.syncStats.SnapshotFillDB.Stages == nil { - d.syncStats.SnapshotFillDB.Stages = []SnapshotFillDBStage{info} - } else { - - for idx, stg := range d.syncStats.SnapshotFillDB.Stages { - if stg.StageName == info.StageName { - d.syncStats.SnapshotFillDB.Stages[idx] = info - break - } - } - } -} - // Deprecated - it's not thread-safe and used only in tests. Need introduce another method or add special methods for Tests. func (d *DiagnosticClient) SyncStatistics() SyncStatistics { - return d.syncStats + var newStats SyncStatistics + statsBytes, err := json.Marshal(d.syncStats) + if err != nil { + return SyncStatistics{} + } + err = json.Unmarshal(statsBytes, &newStats) + if err != nil { + return SyncStatistics{} + } + return newStats } func (d *DiagnosticClient) SyncStatsJson(w io.Writer) { diff --git a/erigon-lib/diagnostics/snapshots_download.go b/erigon-lib/diagnostics/snapshots_download.go new file mode 100644 index 00000000000..8745b836282 --- /dev/null +++ b/erigon-lib/diagnostics/snapshots_download.go @@ -0,0 +1,215 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package diagnostics + +import ( + "context" + + "github.com/erigontech/erigon-lib/log/v3" +) + +func (d *DiagnosticClient) runSnapshotListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[SnapshotDownloadStatistics](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(SnapshotDownloadStatistics{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.SetSnapshotDownloadInfo(info) + d.UpdateSnapshotStageStats(CalculateSyncStageStats(info), "Downloading snapshots") + + if info.DownloadFinished { + d.SaveData() + return + } + } + } + }() +} + +func (d *DiagnosticClient) SetSnapshotDownloadInfo(info SnapshotDownloadStatistics) { + d.mu.Lock() + defer d.mu.Unlock() + d.setSnapshotDownloadInfo(info) +} + +func (d *DiagnosticClient) setSnapshotDownloadInfo(info SnapshotDownloadStatistics) { + d.syncStats.SnapshotDownload.Downloaded = info.Downloaded + d.syncStats.SnapshotDownload.Total = info.Total + d.syncStats.SnapshotDownload.TotalTime = info.TotalTime + d.syncStats.SnapshotDownload.DownloadRate = info.DownloadRate + d.syncStats.SnapshotDownload.UploadRate = info.UploadRate + d.syncStats.SnapshotDownload.Peers = info.Peers + d.syncStats.SnapshotDownload.Files = info.Files + d.syncStats.SnapshotDownload.Connections = info.Connections + d.syncStats.SnapshotDownload.Alloc = info.Alloc + d.syncStats.SnapshotDownload.Sys = info.Sys + d.syncStats.SnapshotDownload.DownloadFinished = info.DownloadFinished + d.syncStats.SnapshotDownload.TorrentMetadataReady = info.TorrentMetadataReady +} + +func (d *DiagnosticClient) runSegmentDownloadingListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[SegmentDownloadStatistics](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(SegmentDownloadStatistics{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.SetDownloadSegments(info) + } + } + }() +} + +func (d *DiagnosticClient) SetDownloadSegments(info SegmentDownloadStatistics) { + d.mu.Lock() + defer d.mu.Unlock() + d.setDownloadSegments(info) +} + +func (d *DiagnosticClient) setDownloadSegments(info SegmentDownloadStatistics) { + if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { + d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} + } + + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name]; ok { + val.TotalBytes = info.TotalBytes + val.DownloadedBytes = info.DownloadedBytes + val.Webseeds = info.Webseeds + val.Peers = info.Peers + + d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = info + } +} + +func (d *DiagnosticClient) runSnapshotFilesListListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[SnapshoFilesList](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(SnapshoFilesList{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.SetSnapshotFilesList(info) + + if len(info.Files) > 0 { + return + } + } + } + }() +} + +func (d *DiagnosticClient) SetSnapshotFilesList(info SnapshoFilesList) { + d.mu.Lock() + defer d.mu.Unlock() + d.setSnapshotFilesList(info) +} + +func (d *DiagnosticClient) setSnapshotFilesList(info SnapshoFilesList) { + d.snapshotFileList = info +} + +func (d *DiagnosticClient) runFileDownloadedListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[FileDownloadedStatisticsUpdate](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(FileDownloadedStatisticsUpdate{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.UpdateFileDownloadedStatistics(&info, nil) + } + } + }() +} + +func (d *DiagnosticClient) UpdateFileDownloadedStatistics(downloadedInfo *FileDownloadedStatisticsUpdate, downloadingInfo *SegmentDownloadStatistics) { + d.mu.Lock() + defer d.mu.Unlock() + d.updateFileDownloadedStatistics(downloadedInfo, downloadingInfo) +} + +func (d *DiagnosticClient) updateFileDownloadedStatistics(downloadedInfo *FileDownloadedStatisticsUpdate, downloadingInfo *SegmentDownloadStatistics) { + if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { + d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} + } + + if downloadedInfo != nil { + dwStats := FileDownloadedStatistics{ + TimeTook: downloadedInfo.TimeTook, + AverageRate: downloadedInfo.AverageRate, + } + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName]; ok { + val.DownloadedStats = dwStats + + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = SegmentDownloadStatistics{ + Name: downloadedInfo.FileName, + TotalBytes: 0, + DownloadedBytes: 0, + Webseeds: make([]SegmentPeer, 0), + Peers: make([]SegmentPeer, 0), + DownloadedStats: dwStats, + } + } + } else { + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name]; ok { + val.TotalBytes = downloadingInfo.TotalBytes + val.DownloadedBytes = downloadingInfo.DownloadedBytes + val.Webseeds = downloadingInfo.Webseeds + val.Peers = downloadingInfo.Peers + + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = *downloadingInfo + } + } +} + +func (d *DiagnosticClient) UpdateSnapshotStageStats(stats SyncStageStats, subStageInfo string) { + d.mu.Lock() + defer d.mu.Unlock() + d.updateSnapshotStageStats(stats, subStageInfo) +} + +func (d *DiagnosticClient) updateSnapshotStageStats(stats SyncStageStats, subStageInfo string) { + idxs := d.getCurrentSyncIdxs() + if idxs.Stage == -1 || idxs.SubStage == -1 { + log.Debug("[Diagnostics] Can't find running stage or substage while updating Snapshots stage stats.", "stages:", d.syncStages, "stats:", stats, "subStageInfo:", subStageInfo) + return + } + + d.syncStages[idxs.Stage].SubStages[idxs.SubStage].Stats = stats +} diff --git a/erigon-lib/diagnostics/snapshots_indexing.go b/erigon-lib/diagnostics/snapshots_indexing.go new file mode 100644 index 00000000000..4ed032fd380 --- /dev/null +++ b/erigon-lib/diagnostics/snapshots_indexing.go @@ -0,0 +1,108 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package diagnostics + +import ( + "context" + "fmt" + + "github.com/erigontech/erigon-lib/log/v3" +) + +func (d *DiagnosticClient) runSegmentIndexingListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[SnapshotIndexingStatistics](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(SnapshotIndexingStatistics{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.AddOrUpdateSegmentIndexingState(info) + indexingFinished := d.UpdateIndexingStatus() + if indexingFinished { + d.SaveData() + return + } + } + } + }() +} + +func (d *DiagnosticClient) AddOrUpdateSegmentIndexingState(upd SnapshotIndexingStatistics) { + d.mu.Lock() + defer d.mu.Unlock() + d.addOrUpdateSegmentIndexingState(upd) +} + +func (d *DiagnosticClient) addOrUpdateSegmentIndexingState(upd SnapshotIndexingStatistics) { + if d.syncStats.SnapshotIndexing.Segments == nil { + d.syncStats.SnapshotIndexing.Segments = []SnapshotSegmentIndexingStatistics{} + } + + for i := range upd.Segments { + found := false + for j := range d.syncStats.SnapshotIndexing.Segments { + if d.syncStats.SnapshotIndexing.Segments[j].SegmentName == upd.Segments[i].SegmentName { + d.syncStats.SnapshotIndexing.Segments[j].Percent = upd.Segments[i].Percent + d.syncStats.SnapshotIndexing.Segments[j].Alloc = upd.Segments[i].Alloc + d.syncStats.SnapshotIndexing.Segments[j].Sys = upd.Segments[i].Sys + found = true + break + } + } + + if !found { + d.syncStats.SnapshotIndexing.Segments = append(d.syncStats.SnapshotIndexing.Segments, upd.Segments[i]) + } + } + + // If elapsed time is equal to minus one it menas that indexing took less than main loop update and we should not update it + if upd.TimeElapsed != -1 { + d.syncStats.SnapshotIndexing.TimeElapsed = upd.TimeElapsed + } +} + +func (d *DiagnosticClient) UpdateIndexingStatus() (indexingFinished bool) { + d.mu.Lock() + defer d.mu.Unlock() + + return d.updateIndexingStatus() +} + +func (d *DiagnosticClient) updateIndexingStatus() (indexingFinished bool) { + totalProgressPercent := 0 + for _, seg := range d.syncStats.SnapshotIndexing.Segments { + totalProgressPercent += seg.Percent + } + + totalProgress := totalProgressPercent / len(d.syncStats.SnapshotIndexing.Segments) + + d.updateSnapshotStageStats(SyncStageStats{ + TimeElapsed: SecondsToHHMMString(uint64(d.syncStats.SnapshotIndexing.TimeElapsed)), + TimeLeft: "unknown", + Progress: fmt.Sprintf("%d%%", totalProgress), + }, "Indexing snapshots") + + if totalProgress >= 100 { + d.syncStats.SnapshotIndexing.IndexingFinished = true + } + + return d.syncStats.SnapshotIndexing.IndexingFinished +} diff --git a/erigon-lib/diagnostics/snapshots_test.go b/erigon-lib/diagnostics/snapshots_test.go index c92126966ea..55cb5bd36d3 100644 --- a/erigon-lib/diagnostics/snapshots_test.go +++ b/erigon-lib/diagnostics/snapshots_test.go @@ -42,7 +42,9 @@ func TestUpdateFileDownloadingStats(t *testing.T) { d.UpdateFileDownloadedStatistics(&fileDownloadedUpdMock, nil) - require.Equal(t, sd["test"], diagnostics.SegmentDownloadStatistics{ + sd = d.SyncStatistics().SnapshotDownload.SegmentsDownloading + + toccompare := diagnostics.SegmentDownloadStatistics{ Name: "test", TotalBytes: 1, DownloadedBytes: 1, @@ -52,7 +54,8 @@ func TestUpdateFileDownloadingStats(t *testing.T) { TimeTook: 1.0, AverageRate: 1, }, - }) + } + require.Equal(t, sd["test"], toccompare) } var ( @@ -84,19 +87,19 @@ func TestPercentDiownloaded(t *testing.T) { //Test metadata ready progress = diagnostics.GetShanpshotsPercentDownloaded(downloaded, total, files, files) - require.Equal(t, progress, "10.00%") + require.Equal(t, progress, "10%") //Test 100 % progress = diagnostics.GetShanpshotsPercentDownloaded(total, total, files, files) - require.Equal(t, progress, "100.00%") + require.Equal(t, progress, "100%") //Test 0 % progress = diagnostics.GetShanpshotsPercentDownloaded(0, total, files, files) - require.Equal(t, progress, "0.00%") + require.Equal(t, progress, "0%") //Test more than 100 % progress = diagnostics.GetShanpshotsPercentDownloaded(total+1, total, files, files) - require.Equal(t, progress, "100.00%") + require.Equal(t, progress, "100%") } func TestFillDBFromSnapshots(t *testing.T) { @@ -108,3 +111,74 @@ func TestFillDBFromSnapshots(t *testing.T) { require.NotEmpty(t, stats.SnapshotFillDB.Stages) require.Equal(t, stats.SnapshotFillDB.Stages[0], diagnostics.SnapshotFillDBStage{StageName: "Headers", Current: 1, Total: 10}) } + +func TestAddOrUpdateSegmentIndexingState(t *testing.T) { + dts := []diagnostics.SnapshotSegmentIndexingStatistics{ + { + SegmentName: "test", + Percent: 50, + Alloc: 0, + Sys: 0, + }, + } + + d, err := NewTestDiagnosticClient() + require.NoError(t, err) + + d.AddOrUpdateSegmentIndexingState(diagnostics.SnapshotIndexingStatistics{ + Segments: dts, + TimeElapsed: -1, + }) + stats := d.SyncStatistics() + + require.NotEmpty(t, stats.SnapshotIndexing) + require.NotEmpty(t, stats.SnapshotIndexing.Segments) + require.Equal(t, stats.SnapshotIndexing.Segments[0], dts[0]) + require.True(t, stats.SnapshotIndexing.TimeElapsed == 0) + require.False(t, stats.SnapshotIndexing.IndexingFinished) + + dts = []diagnostics.SnapshotSegmentIndexingStatistics{ + { + SegmentName: "test", + Percent: 100, + Alloc: 0, + Sys: 0, + }, + { + SegmentName: "test2", + Percent: 10, + Alloc: 0, + Sys: 0, + }, + } + + d.AddOrUpdateSegmentIndexingState(diagnostics.SnapshotIndexingStatistics{ + Segments: dts, + TimeElapsed: 20, + }) + + stats = d.SyncStatistics() + require.Equal(t, stats.SnapshotIndexing.Segments[0].Percent, 100) + + finished := d.UpdateIndexingStatus() + require.False(t, finished) + + //test indexing finished + dts = []diagnostics.SnapshotSegmentIndexingStatistics{ + { + SegmentName: "test2", + Percent: 100, + Alloc: 0, + Sys: 0, + }, + } + d.AddOrUpdateSegmentIndexingState(diagnostics.SnapshotIndexingStatistics{ + Segments: dts, + TimeElapsed: 20, + }) + + finished = d.UpdateIndexingStatus() + require.True(t, finished) + stats = d.SyncStatistics() + require.True(t, stats.SnapshotIndexing.IndexingFinished) +} diff --git a/erigon-lib/diagnostics/sys_info.go b/erigon-lib/diagnostics/sys_info.go index f5831e79c2c..c32dfe3d48f 100644 --- a/erigon-lib/diagnostics/sys_info.go +++ b/erigon-lib/diagnostics/sys_info.go @@ -90,25 +90,30 @@ func GetSysInfo(dirPath string) HardwareInfo { } func GetRAMInfo() RAMInfo { - totalRAM := uint64(0) - freeRAM := uint64(0) + rmi := RAMInfo{ + Total: 0, + Available: 0, + Used: 0, + UsedPercent: 0, + } vmStat, err := mem.VirtualMemory() if err == nil { - totalRAM = vmStat.Total - freeRAM = vmStat.Free + rmi.Total = vmStat.Total + rmi.Available = vmStat.Available + rmi.Used = vmStat.Used + rmi.UsedPercent = vmStat.UsedPercent } - return RAMInfo{ - Total: totalRAM, - Free: freeRAM, - } + return rmi } func GetDiskInfo(nodeDisk string) DiskInfo { fsType := "" total := uint64(0) free := uint64(0) + mountPoint := "/" + device := "/" partitions, err := disk.Partitions(false) @@ -120,6 +125,8 @@ func GetDiskInfo(nodeDisk string) DiskInfo { fsType = partition.Fstype total = iocounters.Total free = iocounters.Free + mountPoint = partition.Mountpoint + device = partition.Device break } @@ -127,34 +134,36 @@ func GetDiskInfo(nodeDisk string) DiskInfo { } } + diskDetails, err := diskutils.DiskInfo(device) + if err != nil { + log.Debug("[diagnostics] Failed to get disk info", "err", err) + } + return DiskInfo{ - FsType: fsType, - Total: total, - Free: free, + FsType: fsType, + Total: total, + Free: free, + MountPoint: mountPoint, + Device: device, + Details: diskDetails, } } -func GetCPUInfo() CPUInfo { - modelName := "" - cores := 0 - mhz := float64(0) +func GetCPUInfo() []CPUInfo { + cpuinfo := make([]CPUInfo, 0) cpuInfo, err := cpu.Info() if err == nil { for _, info := range cpuInfo { - modelName = info.ModelName - cores = int(info.Cores) - mhz = info.Mhz - - break + cpuinfo = append(cpuinfo, CPUInfo{ + ModelName: info.ModelName, + Cores: info.Cores, + Mhz: info.Mhz, + }) } } - return CPUInfo{ - ModelName: modelName, - Cores: cores, - Mhz: mhz, - } + return cpuinfo } func ReadRAMInfoFromTx(tx kv.Tx) ([]byte, error) { @@ -188,7 +197,7 @@ func RAMInfoUpdater(info RAMInfo) func(tx kv.RwTx) error { return PutDataToTable(kv.DiagSystemInfo, SystemRamInfoKey, info) } -func CPUInfoUpdater(info CPUInfo) func(tx kv.RwTx) error { +func CPUInfoUpdater(info []CPUInfo) func(tx kv.RwTx) error { return PutDataToTable(kv.DiagSystemInfo, SystemCpuInfoKey, info) } diff --git a/erigon-lib/diagnostics/utils.go b/erigon-lib/diagnostics/utils.go index 8f9db4a16c1..0b97941315c 100644 --- a/erigon-lib/diagnostics/utils.go +++ b/erigon-lib/diagnostics/utils.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "reflect" + "time" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" @@ -79,14 +80,21 @@ func InitSubStagesFromList(list []string) []SyncSubStage { func CalculateTime(amountLeft, rate uint64) string { if rate == 0 { - return "999hrs:99m" + return "999h:99m" } timeLeftInSeconds := amountLeft / rate hours := timeLeftInSeconds / 3600 minutes := (timeLeftInSeconds / 60) % 60 - return fmt.Sprintf("%dhrs:%dm", hours, minutes) + if hours == 0 && minutes == 0 { + return fmt.Sprintf("%ds", timeLeftInSeconds) + } else if hours == 0 { + //sec := timeLeftInSeconds % 60 + return fmt.Sprintf("%dm:%ds", minutes, timeLeftInSeconds%60) + } + + return fmt.Sprintf("%dh:%dm", hours, minutes) } func SecondsToHHMMString(seconds uint64) string { @@ -110,3 +118,47 @@ func ParseData(data []byte, v interface{}) { log.Warn("[Diagnostics] Failed to parse data", "data", string(data), "type", reflect.TypeOf(v)) } } + +func CalculateSyncStageStats(info SnapshotDownloadStatistics) SyncStageStats { + downloadedPercent := GetShanpshotsPercentDownloaded(info.Downloaded, info.Total, info.TorrentMetadataReady, info.Files) + remainingBytes := info.Total - info.Downloaded + downloadTimeLeft := CalculateTime(remainingBytes, info.DownloadRate) + totalDownloadTimeString := time.Duration(info.TotalTime) * time.Second + + return SyncStageStats{ + TimeElapsed: totalDownloadTimeString.String(), + TimeLeft: downloadTimeLeft, + Progress: downloadedPercent, + } +} + +func GetShanpshotsPercentDownloaded(downloaded uint64, total uint64, torrentMetadataReady int32, files int32) string { + if torrentMetadataReady < files { + return "calculating..." + } + + if downloaded == 0 || total == 0 { + return "0%" + } + + fd := float32(downloaded) + t100 := float32(total) / 100 + ft := float32(t100) + percent := fd / ft + + if percent > 100 { + percent = 100 + } + + // return the percentage with 2 decimal places if it's not .00 + if percent == float32(int(percent)) { + return fmt.Sprintf("%.0f%%", percent) + } + + // return the percentage with 1 decimal places if it has only one decimal place like (50.5% or 23.7%) + if percent == float32(int(percent*10))/10 { + return fmt.Sprintf("%.1f%%", percent) + } + + return fmt.Sprintf("%.2f%%", percent) +} diff --git a/erigon-lib/diagnostics/utils_test.go b/erigon-lib/diagnostics/utils_test.go index 238ad2a7f6f..6393d0566d6 100644 --- a/erigon-lib/diagnostics/utils_test.go +++ b/erigon-lib/diagnostics/utils_test.go @@ -15,8 +15,10 @@ func TestParseData(t *testing.T) { require.Equal(t, diagnostics.RAMInfo{}, v) newv := diagnostics.RAMInfo{ - Total: 1, - Free: 2, + Total: 1, + Available: 2, + Used: 3, + UsedPercent: 4, } data, err := json.Marshal(newv) @@ -25,3 +27,46 @@ func TestParseData(t *testing.T) { diagnostics.ParseData(data, &v) require.Equal(t, newv, v) } + +// Testing the function CalculateSyncStageStats +func TestCalculateSyncStageStats(t *testing.T) { + sds := diagnostics.SnapshotDownloadStatistics{ + Downloaded: 100, + Total: 200, + TorrentMetadataReady: 10, + Files: 10, + DownloadRate: 10, + TotalTime: 1000, + } + + expected := diagnostics.SyncStageStats{ + TimeElapsed: "16m40s", + TimeLeft: "10s", + Progress: "50%", + } + + require.Equal(t, expected, diagnostics.CalculateSyncStageStats(sds)) +} + +// Test CalculateTime function +func TestCalculateTime(t *testing.T) { + require.Equal(t, "999h:99m", diagnostics.CalculateTime(0, 0)) + require.Equal(t, "999h:99m", diagnostics.CalculateTime(1, 0)) + require.Equal(t, "1s", diagnostics.CalculateTime(1, 1)) + require.Equal(t, "10s", diagnostics.CalculateTime(10, 1)) + require.Equal(t, "2m:40s", diagnostics.CalculateTime(160, 1)) + require.Equal(t, "1h:40m", diagnostics.CalculateTime(6000, 1)) +} + +// Test GetShanpshotsPercentDownloaded function +func TestGetShanpshotsPercentDownloaded(t *testing.T) { + require.Equal(t, "0%", diagnostics.GetShanpshotsPercentDownloaded(0, 0, 0, 0)) + require.Equal(t, "0%", diagnostics.GetShanpshotsPercentDownloaded(0, 1, 0, 0)) + require.Equal(t, "100%", diagnostics.GetShanpshotsPercentDownloaded(1, 1, 1, 1)) + require.Equal(t, "50%", diagnostics.GetShanpshotsPercentDownloaded(1, 2, 1, 1)) + + require.Equal(t, "50.01%", diagnostics.GetShanpshotsPercentDownloaded(5001, 10000, 1, 1)) + require.Equal(t, "50.5%", diagnostics.GetShanpshotsPercentDownloaded(5050, 10000, 1, 1)) + + require.Equal(t, "calculating...", diagnostics.GetShanpshotsPercentDownloaded(10000, 10000, 0, 1)) +} diff --git a/erigon-lib/diskutils/diskutils.go b/erigon-lib/diskutils/diskutils.go index a1195d570c2..90d97749cbf 100644 --- a/erigon-lib/diskutils/diskutils.go +++ b/erigon-lib/diskutils/diskutils.go @@ -24,3 +24,8 @@ func MountPointForDirPath(dirPath string) string { log.Debug("[diskutils] Implemented only for darwin") return "/" } + +func DiskInfo(disk string) (string, error) { + log.Debug("[diskutils] Implemented only for darwin") + return "", nil +} diff --git a/erigon-lib/diskutils/diskutils_darwin.go b/erigon-lib/diskutils/diskutils_darwin.go index fcf92393776..9c938b15e27 100644 --- a/erigon-lib/diskutils/diskutils_darwin.go +++ b/erigon-lib/diskutils/diskutils_darwin.go @@ -19,7 +19,9 @@ package diskutils import ( + "bytes" "os" + "os/exec" "syscall" "github.com/erigontech/erigon-lib/log/v3" @@ -65,3 +67,16 @@ func SmlinkForDirPath(dirPath string) string { return dirPath } } + +func DiskInfo(disk string) (string, error) { + cmd := exec.Command("diskutil", "info", disk) + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + + output := out.String() + return output, nil +} diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go index ec32cdd0172..fbb6d8ed048 100644 --- a/erigon-lib/downloader/downloader.go +++ b/erigon-lib/downloader/downloader.go @@ -122,6 +122,7 @@ type AggStats struct { Progress float32 BytesCompleted, BytesTotal uint64 + CompletionRate uint64 DroppedCompleted, DroppedTotal uint64 BytesDownload, BytesUpload uint64 @@ -129,6 +130,9 @@ type AggStats struct { LocalFileHashes int LocalFileHashTime time.Duration + BytesHashed, BytesFlushed uint64 + HashRate, FlushRate uint64 + WebseedActiveTrips *atomic.Int64 WebseedMaxActiveTrips *atomic.Int64 WebseedTripCount *atomic.Int64 @@ -505,13 +509,12 @@ func initSnapshotLock(ctx context.Context, cfg *downloadercfg.Cfg, db kv.RoDB, s Chain: cfg.ChainName, } - files, err := SeedableFiles(cfg.Dirs, cfg.ChainName) + files, err := SeedableFiles(cfg.Dirs, cfg.ChainName, false) if err != nil { return nil, err } snapCfg := cfg.SnapshotConfig - if snapCfg == nil { snapCfg = snapcfg.KnownCfg(cfg.ChainName) } @@ -815,13 +818,14 @@ type seedHash struct { func (d *Downloader) mainLoop(silent bool) error { if d.webseedsDiscover { + // CornerCase: no peers -> no anoncments to trackers -> no magnetlink resolution (but magnetlink has filename) // means we can start adding weebseeds without waiting for `<-t.GotInfo()` d.wg.Add(1) go func() { defer d.wg.Done() // webseeds.Discover may create new .torrent files on disk - d.webseeds.Discover(d.ctx, d.cfg.WebSeedFiles, d.cfg.Dirs.Snap) + d.webseeds.Discover(d.ctx, d.cfg.WebSeedFileProviders, d.cfg.Dirs.Snap) // apply webseeds to existing torrents if err := d.addTorrentFilesFromDisk(true); err != nil && !errors.Is(err, context.Canceled) { d.logger.Warn("[snapshots] addTorrentFilesFromDisk", "err", err) @@ -939,7 +943,7 @@ func (d *Downloader) mainLoop(silent bool) error { } else { downloadComplete <- downloadStatus{ name: fileInfo.Name(), - err: fmt.Errorf("hash check failed"), + err: errors.New("hash check failed"), } d.logger.Warn("[snapshots] Torrent hash does not match file", "file", fileInfo.Name(), "torrent-hash", infoHash, "file-hash", hex.EncodeToString(fileHashBytes)) @@ -1172,7 +1176,7 @@ func (d *Downloader) mainLoop(silent bool) error { alist = append(alist, t.Name()) } - d.logger.Debug("[snapshot] download status", "pending", plist, "availible", alist, "downloading", dlist, "complete", clist, "failed", flist) + d.logger.Trace("[snapshot] download status", "pending", plist, "availible", alist, "downloading", dlist, "complete", clist, "failed", flist) } for _, t := range available { @@ -1605,7 +1609,7 @@ func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloa func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *webDownloadInfo, statusChan chan downloadStatus) (*RCloneSession, error) { if d.webDownloadClient == nil { - return nil, fmt.Errorf("webdownload client not enabled") + return nil, errors.New("webdownload client not enabled") } peerUrl, err := selectDownloadPeer(d.ctx, peerUrls, t) @@ -1739,7 +1743,7 @@ func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *web func selectDownloadPeer(ctx context.Context, peerUrls []*url.URL, t *torrent.Torrent) (string, error) { switch len(peerUrls) { case 0: - return "", fmt.Errorf("no download peers") + return "", errors.New("no download peers") case 1: downloadUrl := peerUrls[0].JoinPath(t.Name()) @@ -1771,7 +1775,7 @@ func selectDownloadPeer(ctx context.Context, peerUrls []*url.URL, t *torrent.Tor } } - return "", fmt.Errorf("can't find download peer") + return "", errors.New("can't find download peer") } func availableTorrents(ctx context.Context, pending []*torrent.Torrent, downloading map[string]*downloadInfo, fileSlots int, pieceSlots int) []*torrent.Torrent { @@ -1980,11 +1984,14 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { stats.Completed = true stats.BytesDownload = uint64(connStats.BytesReadUsefulIntendedData.Int64()) stats.BytesUpload = uint64(connStats.BytesWrittenData.Int64()) + stats.BytesHashed = uint64(connStats.BytesHashed.Int64()) + stats.BytesFlushed = uint64(connStats.BytesFlushed.Int64()) + stats.BytesCompleted = uint64(connStats.BytesCompleted.Int64()) - stats.DroppedCompleted lastMetadataReady := stats.MetadataReady - stats.BytesTotal, stats.BytesCompleted, stats.ConnectionsTotal, stats.MetadataReady = - atomic.LoadUint64(&stats.DroppedTotal), atomic.LoadUint64(&stats.DroppedCompleted), 0, 0 + stats.BytesTotal, stats.ConnectionsTotal, stats.MetadataReady = + atomic.LoadUint64(&stats.DroppedTotal), 0, 0 var zeroProgress []string var noMetadata []string @@ -2040,7 +2047,7 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } } - stats.BytesCompleted += uint64(bytesCompleted) + //stats.BytesCompleted += uint64(bytesCompleted) stats.BytesTotal += uint64(tLen) for _, peer := range peersOfThisFile { @@ -2173,6 +2180,12 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { "torrent", torrentInfo, "db", dbInfo, "t-complete", tComplete, + "hashed", common.ByteCount(stats.BytesHashed), + "hash-rate", fmt.Sprintf("%s/s", common.ByteCount(stats.HashRate)), + "completed", common.ByteCount(stats.BytesCompleted), + "completion-rate", fmt.Sprintf("%s/s", common.ByteCount(stats.CompletionRate)), + "flushed", common.ByteCount(stats.BytesFlushed), + "flush-rate", fmt.Sprintf("%s/s", common.ByteCount(stats.FlushRate)), "webseed-trips", stats.WebseedTripCount.Load(), "webseed-active", stats.WebseedActiveTrips.Load(), "webseed-max-active", stats.WebseedMaxActiveTrips.Load(), @@ -2243,16 +2256,51 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } } + decay := func(prev uint64) uint64 { + switch { + case prev < 1000: + return prev / 16 + case stats.FlushRate < 10000: + return prev / 8 + case stats.FlushRate < 100000: + return prev / 4 + default: + return prev / 2 + } + } + if stats.BytesDownload > prevStats.BytesDownload { stats.DownloadRate = (stats.BytesDownload - prevStats.BytesDownload) / uint64(interval.Seconds()) } else { - stats.DownloadRate = prevStats.DownloadRate / 2 + stats.DownloadRate = decay(prevStats.DownloadRate) + } + + if stats.BytesHashed > prevStats.BytesHashed { + stats.HashRate = (stats.BytesHashed - prevStats.BytesHashed) / uint64(interval.Seconds()) + } else { + stats.HashRate = decay(prevStats.HashRate) + } + + if stats.BytesCompleted > stats.BytesTotal { + stats.BytesCompleted = stats.BytesTotal + } + + if stats.BytesCompleted > prevStats.BytesCompleted { + stats.CompletionRate = (stats.BytesCompleted - prevStats.BytesCompleted) / uint64(interval.Seconds()) + } else { + stats.CompletionRate = decay(prevStats.CompletionRate) + } + + if stats.BytesFlushed > prevStats.BytesFlushed { + stats.FlushRate = (stats.BytesFlushed - prevStats.BytesFlushed) / uint64(interval.Seconds()) + } else { + stats.FlushRate = decay(prevStats.FlushRate) } if stats.BytesUpload > prevStats.BytesUpload { stats.UploadRate = (stats.BytesUpload - prevStats.BytesUpload) / uint64(interval.Seconds()) } else { - stats.UploadRate = prevStats.UploadRate / 2 + stats.UploadRate = decay(prevStats.UploadRate) } if stats.BytesTotal == 0 { @@ -2592,20 +2640,20 @@ func (d *Downloader) AddMagnetLink(ctx context.Context, infoHash metainfo.Hash, return nil } -func SeedableFiles(dirs datadir.Dirs, chainName string) ([]string, error) { - files, err := seedableSegmentFiles(dirs.Snap, chainName) +func SeedableFiles(dirs datadir.Dirs, chainName string, all bool) ([]string, error) { + files, err := seedableSegmentFiles(dirs.Snap, chainName, all) if err != nil { return nil, fmt.Errorf("seedableSegmentFiles: %w", err) } - l1, err := seedableStateFilesBySubDir(dirs.Snap, "idx") + l1, err := seedableStateFilesBySubDir(dirs.Snap, "idx", all) if err != nil { return nil, err } - l2, err := seedableStateFilesBySubDir(dirs.Snap, "history") + l2, err := seedableStateFilesBySubDir(dirs.Snap, "history", all) if err != nil { return nil, err } - l3, err := seedableStateFilesBySubDir(dirs.Snap, "domain") + l3, err := seedableStateFilesBySubDir(dirs.Snap, "domain", all) if err != nil { return nil, err } @@ -2701,7 +2749,7 @@ func (d *Downloader) addTorrentFilesFromDisk(quiet bool) error { return eg.Wait() } func (d *Downloader) BuildTorrentFilesIfNeed(ctx context.Context, chain string, ignore snapcfg.Preverified) error { - _, err := BuildTorrentFilesIfNeed(ctx, d.cfg.Dirs, d.torrentFS, chain, ignore) + _, err := BuildTorrentFilesIfNeed(ctx, d.cfg.Dirs, d.torrentFS, chain, ignore, false) return err } func (d *Downloader) Stats() AggStats { diff --git a/erigon-lib/downloader/downloader_grpc_server.go b/erigon-lib/downloader/downloader_grpc_server.go index 4b2ef0dc2eb..b48497f7806 100644 --- a/erigon-lib/downloader/downloader_grpc_server.go +++ b/erigon-lib/downloader/downloader_grpc_server.go @@ -18,6 +18,7 @@ package downloader import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -60,7 +61,7 @@ func (s *GrpcServer) Add(ctx context.Context, request *proto_downloader.AddReque for i, it := range request.Items { if it.Path == "" { - return nil, fmt.Errorf("field 'path' is required") + return nil, errors.New("field 'path' is required") } select { @@ -91,7 +92,7 @@ func (s *GrpcServer) Delete(ctx context.Context, request *proto_downloader.Delet torrents := s.d.torrentClient.Torrents() for _, name := range request.Paths { if name == "" { - return nil, fmt.Errorf("field 'path' is required") + return nil, errors.New("field 'path' is required") } for _, t := range torrents { select { @@ -136,6 +137,9 @@ func (s *GrpcServer) Stats(ctx context.Context, request *proto_downloader.StatsR BytesTotal: stats.BytesTotal, UploadRate: stats.UploadRate, DownloadRate: stats.DownloadRate, + HashRate: stats.HashRate, + FlushRate: stats.FlushRate, + CompletionRate: stats.CompletionRate, }, nil } diff --git a/erigon-lib/downloader/downloadercfg/downloadercfg.go b/erigon-lib/downloader/downloadercfg/downloadercfg.go index 865894951fd..3d6eb6697eb 100644 --- a/erigon-lib/downloader/downloadercfg/downloadercfg.go +++ b/erigon-lib/downloader/downloadercfg/downloadercfg.go @@ -52,7 +52,7 @@ type Cfg struct { DownloadSlots int WebSeedUrls []*url.URL - WebSeedFiles []string + WebSeedFileProviders []string SnapshotConfig *snapcfg.Cfg DownloadTorrentFilesFromWebseed bool AddTorrentsFromDisk bool @@ -220,7 +220,7 @@ func New(dirs datadir.Dirs, version string, verbosity lg.Level, downloadRate, up return &Cfg{Dirs: dirs, ChainName: chainName, ClientConfig: torrentConfig, DownloadSlots: downloadSlots, - WebSeedUrls: webseedHttpProviders, WebSeedFiles: webseedFileProviders, + WebSeedUrls: webseedHttpProviders, WebSeedFileProviders: webseedFileProviders, DownloadTorrentFilesFromWebseed: true, AddTorrentsFromDisk: true, SnapshotLock: lockSnapshots, SnapshotConfig: snapcfg.KnownCfg(chainName), MdbxWriteMap: mdbxWriteMap, diff --git a/erigon-lib/downloader/mdbx_piece_completion.go b/erigon-lib/downloader/mdbx_piece_completion.go index 86568a4e526..08317f3f746 100644 --- a/erigon-lib/downloader/mdbx_piece_completion.go +++ b/erigon-lib/downloader/mdbx_piece_completion.go @@ -128,12 +128,17 @@ func (m *mdbxPieceCompletion) Set(pk metainfo.PieceKey, b bool, awaitFlush bool) return nil } + // if we're awaiting flush update the DB immediately so it does not + // intefere with the timing of the background commit - may not be + // necessary - in which case the batch can be used if awaitFlush { return m.db.Update(context.Background(), func(tx kv.RwTx) error { return putCompletion(tx, pk.InfoHash, uint32(pk.Index), b) }) } + // batch updates for non flushed updated as they may happen in validation and + // there may be many if fast succession which can be slow if handled individually return m.db.Batch(func(tx kv.RwTx) error { return putCompletion(tx, pk.InfoHash, uint32(pk.Index), b) }) diff --git a/erigon-lib/downloader/rclone.go b/erigon-lib/downloader/rclone.go index 2957ba4f85b..caa09f7e9b7 100644 --- a/erigon-lib/downloader/rclone.go +++ b/erigon-lib/downloader/rclone.go @@ -110,7 +110,7 @@ func (c *RCloneClient) start(logger log.Logger) error { rclone, _ := exec.LookPath("rclone") if len(rclone) == 0 { - return fmt.Errorf("rclone not found in PATH") + return errors.New("rclone not found in PATH") } logger.Info("[downloader] rclone found in PATH: enhanced upload/download enabled") @@ -687,7 +687,7 @@ var ErrAccessDenied = errors.New("access denied") func (c *RCloneSession) ReadRemoteDir(ctx context.Context, refresh bool) ([]fs.DirEntry, error) { if len(c.remoteFs) == 0 { - return nil, fmt.Errorf("remote fs undefined") + return nil, errors.New("remote fs undefined") } c.oplock.Lock() @@ -871,7 +871,7 @@ func (c *RCloneSession) syncFiles(ctx context.Context) { if syncQueue != nil { syncQueue <- request } else { - request.cerr <- fmt.Errorf("no sync queue available") + request.cerr <- errors.New("no sync queue available") } } diff --git a/erigon-lib/downloader/snaptype/files.go b/erigon-lib/downloader/snaptype/files.go index 085a4c13428..4a0c769b086 100644 --- a/erigon-lib/downloader/snaptype/files.go +++ b/erigon-lib/downloader/snaptype/files.go @@ -34,7 +34,7 @@ import ( ) var ( - ErrInvalidFileName = fmt.Errorf("invalid compressed file name") + ErrInvalidFileName = errors.New("invalid compressed file name") ) func FileName(version Version, from, to uint64, fileType string) string { diff --git a/erigon-lib/downloader/snaptype/type.go b/erigon-lib/downloader/snaptype/type.go index f163674e4eb..e4f2fc69aa8 100644 --- a/erigon-lib/downloader/snaptype/type.go +++ b/erigon-lib/downloader/snaptype/type.go @@ -52,7 +52,7 @@ func ParseVersion(v string) (Version, error) { } if len(v) == 0 { - return 0, fmt.Errorf("invalid version: no prefix") + return 0, errors.New("invalid version: no prefix") } return 0, fmt.Errorf("invalid version prefix: %s", v[0:1]) diff --git a/erigon-lib/downloader/util.go b/erigon-lib/downloader/util.go index 80725a7548a..5269f33972b 100644 --- a/erigon-lib/downloader/util.go +++ b/erigon-lib/downloader/util.go @@ -21,6 +21,7 @@ import ( "context" "crypto/sha1" "encoding/json" + "errors" "fmt" "io" "os" @@ -74,7 +75,7 @@ type torrentInfo struct { Completed *time.Time `json:"completed,omitempty"` } -func seedableSegmentFiles(dir string, chainName string) ([]string, error) { +func seedableSegmentFiles(dir string, chainName string, skipSeedableCheck bool) ([]string, error) { files, err := dir2.ListFiles(dir, snaptype.SeedableV2Extensions()...) if err != nil { return nil, err @@ -90,7 +91,7 @@ func seedableSegmentFiles(dir string, chainName string) ([]string, error) { if !ok || isStateFile { continue } - if !snapcfg.Seedable(chainName, ff) { + if !skipSeedableCheck && !snapcfg.Seedable(chainName, ff) { continue } res = append(res, name) @@ -98,7 +99,7 @@ func seedableSegmentFiles(dir string, chainName string) ([]string, error) { return res, nil } -func seedableStateFilesBySubDir(dir, subDir string) ([]string, error) { +func seedableStateFilesBySubDir(dir, subDir string, skipSeedable bool) ([]string, error) { historyDir := filepath.Join(dir, subDir) dir2.MustExist(historyDir) files, err := dir2.ListFiles(historyDir, snaptype.SeedableV3Extensions()...) @@ -108,7 +109,7 @@ func seedableStateFilesBySubDir(dir, subDir string) ([]string, error) { res := make([]string, 0, len(files)) for _, fPath := range files { _, name := filepath.Split(fPath) - if !snaptype.E3Seedable(name) { + if !skipSeedable && !snaptype.E3Seedable(name) { continue } res = append(res, filepath.Join(subDir, name)) @@ -171,11 +172,11 @@ func BuildTorrentIfNeed(ctx context.Context, fName, root string, torrentFiles *A } // BuildTorrentFilesIfNeed - create .torrent files from .seg files (big IO) - if .seg files were added manually -func BuildTorrentFilesIfNeed(ctx context.Context, dirs datadir.Dirs, torrentFiles *AtomicTorrentFS, chain string, ignore snapcfg.Preverified) (int, error) { +func BuildTorrentFilesIfNeed(ctx context.Context, dirs datadir.Dirs, torrentFiles *AtomicTorrentFS, chain string, ignore snapcfg.Preverified, all bool) (int, error) { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - files, err := SeedableFiles(dirs, chain) + files, err := SeedableFiles(dirs, chain, all) if err != nil { return 0, err } @@ -494,7 +495,7 @@ func ScheduleVerifyFile(ctx context.Context, t *torrent.Torrent, completePieces if change.Err != nil { err = change.Err } else { - err = fmt.Errorf("unexpected piece change error") + err = errors.New("unexpected piece change error") } cancel() diff --git a/erigon-lib/downloader/webseed.go b/erigon-lib/downloader/webseed.go index 0eadbe5105e..1863d42178a 100644 --- a/erigon-lib/downloader/webseed.go +++ b/erigon-lib/downloader/webseed.go @@ -445,8 +445,8 @@ func (d *WebSeeds) ByFileName(name string) (metainfo.UrlList, bool) { return v, ok } -var ErrInvalidEtag = fmt.Errorf("invalid etag") -var ErrEtagNotFound = fmt.Errorf("not found") +var ErrInvalidEtag = errors.New("invalid etag") +var ErrEtagNotFound = errors.New("not found") func (d *WebSeeds) retrieveFileEtag(ctx context.Context, file *url.URL) (string, error) { request, err := http.NewRequestWithContext(ctx, http.MethodHead, file.String(), nil) diff --git a/erigon-lib/etl/etl.go b/erigon-lib/etl/etl.go index 2ecd0fc20ac..366d09b88d0 100644 --- a/erigon-lib/etl/etl.go +++ b/erigon-lib/etl/etl.go @@ -18,6 +18,7 @@ package etl import ( "bytes" + "errors" "fmt" "reflect" "time" @@ -40,7 +41,7 @@ type ExtractFunc func(k []byte, v []byte, next ExtractNextFunc) error // for [0x01, 0x01, 0x01] it will generate [0x01, 0x01, 0x02], etc func NextKey(key []byte) ([]byte, error) { if len(key) == 0 { - return key, fmt.Errorf("could not apply NextKey for the empty key") + return key, errors.New("could not apply NextKey for the empty key") } nextKey := common.Copy(key) for i := len(key) - 1; i >= 0; i-- { @@ -53,7 +54,7 @@ func NextKey(key []byte) ([]byte, error) { nextKey[i] = 0 } } - return key, fmt.Errorf("overflow while applying NextKey") + return key, errors.New("overflow while applying NextKey") } // LoadCommitHandler is a callback called each time a new batch is being diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index 70d4a16946a..c9d9c8d7832 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -3,8 +3,8 @@ module github.com/erigontech/erigon-lib go 1.21.5 require ( - github.com/erigontech/erigon-snapshot v1.3.1-0.20240720122906-e073fcdeca33 - github.com/erigontech/interfaces v0.0.0-20240716134413-fc4152088ee6 + github.com/erigontech/erigon-snapshot v1.3.1-0.20240801141542-7a7b08ebd406 + github.com/erigontech/interfaces v0.0.0-20240723225543-c6b574058f8c github.com/erigontech/mdbx-go v0.38.4 github.com/erigontech/secp256k1 v1.1.0 github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 @@ -150,8 +150,8 @@ require ( ) replace ( - github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-32 - github.com/erigontech/erigon-snapshot => github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240805061542-ddf011a69761 + github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-33 + github.com/erigontech/erigon-snapshot => github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240807061100-d5a04db04e7a github.com/holiman/bloomfilter/v2 => github.com/AskAlexSharov/bloomfilter/v2 v2.0.8 github.com/tidwall/btree => github.com/AskAlexSharov/btree v1.6.2 ) diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index c3c8a79d8c0..65a71d65dbc 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -144,16 +144,16 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/erigontech/interfaces v0.0.0-20240716134413-fc4152088ee6 h1:R1AYJT2FeMEwvBcAuYw7QTezk8DpXTQjCN1y5o0YBvI= -github.com/erigontech/interfaces v0.0.0-20240716134413-fc4152088ee6/go.mod h1:N7OUkhkcagp9+7yb4ycHsG2VWCOmuJ1ONBecJshxtLE= +github.com/erigontech/interfaces v0.0.0-20240723225543-c6b574058f8c h1:KbcMdRKYRL+A4bJGK+fNOITk95wJzh9rUXFJ17wCyUY= +github.com/erigontech/interfaces v0.0.0-20240723225543-c6b574058f8c/go.mod h1:N7OUkhkcagp9+7yb4ycHsG2VWCOmuJ1ONBecJshxtLE= github.com/erigontech/mdbx-go v0.38.4 h1:S9T7mTe9KPcFe4dOoOtVdI6gPzht9y7wMnYfUBgrQLo= github.com/erigontech/mdbx-go v0.38.4/go.mod h1:IcOLQDPw3VM/asP6T5JVPPN4FHHgJtY16XfYjzWKVNI= github.com/erigontech/secp256k1 v1.1.0 h1:mO3YJMUSoASE15Ya//SoHiisptUhdXExuMUN1M0X9qY= github.com/erigontech/secp256k1 v1.1.0/go.mod h1:GokhPepsMB+EYDs7I5JZCprxHW6+yfOcJKaKtoZ+Fls= github.com/erigontech/speedtest v0.0.2 h1:W9Cvky/8AMUtUONwkLA/dZjeQ2XfkBdYfJzvhMZUO+U= github.com/erigontech/speedtest v0.0.2/go.mod h1:vulsRNiM51BmSTbVtch4FWxKxx53pS2D35lZTtao0bw= -github.com/erigontech/torrent v1.54.2-alpha-32 h1:Ly8W2JvD7r1o5TklXxKEV9D9Tr664tSrgj5OPpOrlWg= -github.com/erigontech/torrent v1.54.2-alpha-32/go.mod h1:QtK2WLdEz1Iy1Dh/325UltdHU0nA1xujh2rN6aov6y0= +github.com/erigontech/torrent v1.54.2-alpha-33 h1:CzTALQ+M4iYDirlP6rUFunxwz1MJXVoLOStmPyDP1ok= +github.com/erigontech/torrent v1.54.2-alpha-33/go.mod h1:QtK2WLdEz1Iy1Dh/325UltdHU0nA1xujh2rN6aov6y0= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= @@ -307,8 +307,8 @@ github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOl github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= -github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240805061542-ddf011a69761 h1:Oz/Pa4dMUhGI3iAt47nrz55p6srdCOLwiWe7mGJVhbY= -github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240805061542-ddf011a69761/go.mod h1:ooHlCl+eEYzebiPu+FP6Q6SpPUeMADn8Jxabv3IKb9M= +github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240807061100-d5a04db04e7a h1:UtunjNDwMvR6ySEC7uGYMPLVm5YkQt/9w/qaNlhz3y4= +github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240807061100-d5a04db04e7a/go.mod h1:ooHlCl+eEYzebiPu+FP6Q6SpPUeMADn8Jxabv3IKb9M= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= diff --git a/erigon-lib/gointerfaces/downloaderproto/downloader.pb.go b/erigon-lib/gointerfaces/downloaderproto/downloader.pb.go index 0e809121ef2..d9651c19df2 100644 --- a/erigon-lib/gointerfaces/downloaderproto/downloader.pb.go +++ b/erigon-lib/gointerfaces/downloaderproto/downloader.pb.go @@ -316,8 +316,11 @@ type StatsReply struct { Progress float32 `protobuf:"fixed32,7,opt,name=progress,proto3" json:"progress,omitempty"` BytesCompleted uint64 `protobuf:"varint,8,opt,name=bytes_completed,json=bytesCompleted,proto3" json:"bytes_completed,omitempty"` BytesTotal uint64 `protobuf:"varint,9,opt,name=bytes_total,json=bytesTotal,proto3" json:"bytes_total,omitempty"` - UploadRate uint64 `protobuf:"varint,10,opt,name=upload_rate,json=uploadRate,proto3" json:"upload_rate,omitempty"` // bytes/sec - DownloadRate uint64 `protobuf:"varint,11,opt,name=download_rate,json=downloadRate,proto3" json:"download_rate,omitempty"` // bytes/sec + UploadRate uint64 `protobuf:"varint,10,opt,name=upload_rate,json=uploadRate,proto3" json:"upload_rate,omitempty"` // bytes/sec + DownloadRate uint64 `protobuf:"varint,11,opt,name=download_rate,json=downloadRate,proto3" json:"download_rate,omitempty"` // bytes/sec + HashRate uint64 `protobuf:"varint,12,opt,name=hash_rate,json=hashRate,proto3" json:"hash_rate,omitempty"` // bytes/sec + CompletionRate uint64 `protobuf:"varint,13,opt,name=completion_rate,json=completionRate,proto3" json:"completion_rate,omitempty"` // bytes/sec + FlushRate uint64 `protobuf:"varint,14,opt,name=flush_rate,json=flushRate,proto3" json:"flush_rate,omitempty"` // bytes/sec } func (x *StatsReply) Reset() { @@ -422,6 +425,27 @@ func (x *StatsReply) GetDownloadRate() uint64 { return 0 } +func (x *StatsReply) GetHashRate() uint64 { + if x != nil { + return x.HashRate + } + return 0 +} + +func (x *StatsReply) GetCompletionRate() uint64 { + if x != nil { + return x.CompletionRate + } + return 0 +} + +func (x *StatsReply) GetFlushRate() uint64 { + if x != nil { + return x.FlushRate + } + return 0 +} + var File_downloader_downloader_proto protoreflect.FileDescriptor var file_downloader_downloader_proto_rawDesc = []byte{ @@ -446,7 +470,7 @@ var file_downloader_downloader_proto_rawDesc = []byte{ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x31, 0x0a, 0x1b, 0x50, 0x72, 0x6f, 0x68, 0x69, 0x62, 0x69, 0x74, 0x4e, 0x65, 0x77, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xee, 0x02, 0x0a, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xd3, 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x61, @@ -469,32 +493,38 @@ var file_downloader_downloader_proto_rawDesc = []byte{ 0x64, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x61, 0x74, 0x65, 0x32, 0xdb, 0x02, - 0x0a, 0x0a, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x12, 0x59, 0x0a, 0x14, - 0x50, 0x72, 0x6f, 0x68, 0x69, 0x62, 0x69, 0x74, 0x4e, 0x65, 0x77, 0x44, 0x6f, 0x77, 0x6e, 0x6c, - 0x6f, 0x61, 0x64, 0x73, 0x12, 0x27, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, - 0x72, 0x2e, 0x50, 0x72, 0x6f, 0x68, 0x69, 0x62, 0x69, 0x74, 0x4e, 0x65, 0x77, 0x44, 0x6f, 0x77, - 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x16, - 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, - 0x12, 0x3d, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x64, 0x6f, 0x77, - 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, - 0x3d, 0x0a, 0x06, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x19, 0x2e, 0x64, 0x6f, 0x77, 0x6e, - 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, - 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x1e, 0x5a, 0x1c, 0x2e, - 0x2f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x3b, 0x64, 0x6f, 0x77, 0x6e, - 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x68, 0x61, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x61, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, + 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x61, + 0x74, 0x65, 0x32, 0xdb, 0x02, 0x0a, 0x0a, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x59, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x68, 0x69, 0x62, 0x69, 0x74, 0x4e, 0x65, 0x77, + 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x27, 0x2e, 0x64, 0x6f, 0x77, 0x6e, + 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x6f, 0x68, 0x69, 0x62, 0x69, 0x74, 0x4e, + 0x65, 0x77, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x03, + 0x41, 0x64, 0x64, 0x12, 0x16, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, + 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x19, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x06, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x19, + 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x64, + 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, + 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, + 0x42, 0x1e, 0x5a, 0x1c, 0x2e, 0x2f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, + 0x3b, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/erigon-lib/gointerfaces/typesproto/types.pb.go b/erigon-lib/gointerfaces/typesproto/types.pb.go index a9b45d6d846..581584c5b4a 100644 --- a/erigon-lib/gointerfaces/typesproto/types.pb.go +++ b/erigon-lib/gointerfaces/typesproto/types.pb.go @@ -423,26 +423,27 @@ type ExecutionPayload struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // v1 - no withdrawals, v2 - with withdrawals, v3 - with blob gas - ParentHash *H256 `protobuf:"bytes,2,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty"` - Coinbase *H160 `protobuf:"bytes,3,opt,name=coinbase,proto3" json:"coinbase,omitempty"` - StateRoot *H256 `protobuf:"bytes,4,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"` - ReceiptRoot *H256 `protobuf:"bytes,5,opt,name=receipt_root,json=receiptRoot,proto3" json:"receipt_root,omitempty"` - LogsBloom *H2048 `protobuf:"bytes,6,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty"` - PrevRandao *H256 `protobuf:"bytes,7,opt,name=prev_randao,json=prevRandao,proto3" json:"prev_randao,omitempty"` - BlockNumber uint64 `protobuf:"varint,8,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` - GasLimit uint64 `protobuf:"varint,9,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` - GasUsed uint64 `protobuf:"varint,10,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Timestamp uint64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - ExtraData []byte `protobuf:"bytes,12,opt,name=extra_data,json=extraData,proto3" json:"extra_data,omitempty"` - BaseFeePerGas *H256 `protobuf:"bytes,13,opt,name=base_fee_per_gas,json=baseFeePerGas,proto3" json:"base_fee_per_gas,omitempty"` - BlockHash *H256 `protobuf:"bytes,14,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - Transactions [][]byte `protobuf:"bytes,15,rep,name=transactions,proto3" json:"transactions,omitempty"` - Withdrawals []*Withdrawal `protobuf:"bytes,16,rep,name=withdrawals,proto3" json:"withdrawals,omitempty"` - BlobGasUsed *uint64 `protobuf:"varint,17,opt,name=blob_gas_used,json=blobGasUsed,proto3,oneof" json:"blob_gas_used,omitempty"` - ExcessBlobGas *uint64 `protobuf:"varint,18,opt,name=excess_blob_gas,json=excessBlobGas,proto3,oneof" json:"excess_blob_gas,omitempty"` - DepositRequests []*DepositRequest `protobuf:"bytes,19,rep,name=deposit_requests,json=depositRequests,proto3" json:"deposit_requests,omitempty"` - WithdrawalRequests []*WithdrawalRequest `protobuf:"bytes,20,rep,name=withdrawal_requests,json=withdrawalRequests,proto3" json:"withdrawal_requests,omitempty"` + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // v1 - no withdrawals, v2 - with withdrawals, v3 - with blob gas + ParentHash *H256 `protobuf:"bytes,2,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty"` + Coinbase *H160 `protobuf:"bytes,3,opt,name=coinbase,proto3" json:"coinbase,omitempty"` + StateRoot *H256 `protobuf:"bytes,4,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"` + ReceiptRoot *H256 `protobuf:"bytes,5,opt,name=receipt_root,json=receiptRoot,proto3" json:"receipt_root,omitempty"` + LogsBloom *H2048 `protobuf:"bytes,6,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty"` + PrevRandao *H256 `protobuf:"bytes,7,opt,name=prev_randao,json=prevRandao,proto3" json:"prev_randao,omitempty"` + BlockNumber uint64 `protobuf:"varint,8,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` + GasLimit uint64 `protobuf:"varint,9,opt,name=gas_limit,json=gasLimit,proto3" json:"gas_limit,omitempty"` + GasUsed uint64 `protobuf:"varint,10,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Timestamp uint64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + ExtraData []byte `protobuf:"bytes,12,opt,name=extra_data,json=extraData,proto3" json:"extra_data,omitempty"` + BaseFeePerGas *H256 `protobuf:"bytes,13,opt,name=base_fee_per_gas,json=baseFeePerGas,proto3" json:"base_fee_per_gas,omitempty"` + BlockHash *H256 `protobuf:"bytes,14,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` + Transactions [][]byte `protobuf:"bytes,15,rep,name=transactions,proto3" json:"transactions,omitempty"` + Withdrawals []*Withdrawal `protobuf:"bytes,16,rep,name=withdrawals,proto3" json:"withdrawals,omitempty"` + BlobGasUsed *uint64 `protobuf:"varint,17,opt,name=blob_gas_used,json=blobGasUsed,proto3,oneof" json:"blob_gas_used,omitempty"` + ExcessBlobGas *uint64 `protobuf:"varint,18,opt,name=excess_blob_gas,json=excessBlobGas,proto3,oneof" json:"excess_blob_gas,omitempty"` + DepositRequests []*DepositRequest `protobuf:"bytes,19,rep,name=deposit_requests,json=depositRequests,proto3" json:"deposit_requests,omitempty"` + WithdrawalRequests []*WithdrawalRequest `protobuf:"bytes,20,rep,name=withdrawal_requests,json=withdrawalRequests,proto3" json:"withdrawal_requests,omitempty"` + ConsolidationRequests []*ConsolidationRequest `protobuf:"bytes,21,rep,name=consolidation_requests,json=consolidationRequests,proto3" json:"consolidation_requests,omitempty"` } func (x *ExecutionPayload) Reset() { @@ -617,6 +618,13 @@ func (x *ExecutionPayload) GetWithdrawalRequests() []*WithdrawalRequest { return nil } +func (x *ExecutionPayload) GetConsolidationRequests() []*ConsolidationRequest { + if x != nil { + return x.ConsolidationRequests + } + return nil +} + type DepositRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -759,6 +767,69 @@ func (x *WithdrawalRequest) GetAmount() uint64 { return 0 } +type ConsolidationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourceAddress *H160 `protobuf:"bytes,1,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"` + SourcePubkey []byte `protobuf:"bytes,2,opt,name=source_pubkey,json=sourcePubkey,proto3" json:"source_pubkey,omitempty"` + TargetPubkey []byte `protobuf:"bytes,3,opt,name=target_pubkey,json=targetPubkey,proto3" json:"target_pubkey,omitempty"` +} + +func (x *ConsolidationRequest) Reset() { + *x = ConsolidationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_types_types_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConsolidationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConsolidationRequest) ProtoMessage() {} + +func (x *ConsolidationRequest) ProtoReflect() protoreflect.Message { + mi := &file_types_types_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConsolidationRequest.ProtoReflect.Descriptor instead. +func (*ConsolidationRequest) Descriptor() ([]byte, []int) { + return file_types_types_proto_rawDescGZIP(), []int{10} +} + +func (x *ConsolidationRequest) GetSourceAddress() *H160 { + if x != nil { + return x.SourceAddress + } + return nil +} + +func (x *ConsolidationRequest) GetSourcePubkey() []byte { + if x != nil { + return x.SourcePubkey + } + return nil +} + +func (x *ConsolidationRequest) GetTargetPubkey() []byte { + if x != nil { + return x.TargetPubkey + } + return nil +} + type Withdrawal struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -773,7 +844,7 @@ type Withdrawal struct { func (x *Withdrawal) Reset() { *x = Withdrawal{} if protoimpl.UnsafeEnabled { - mi := &file_types_types_proto_msgTypes[10] + mi := &file_types_types_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -786,7 +857,7 @@ func (x *Withdrawal) String() string { func (*Withdrawal) ProtoMessage() {} func (x *Withdrawal) ProtoReflect() protoreflect.Message { - mi := &file_types_types_proto_msgTypes[10] + mi := &file_types_types_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -799,7 +870,7 @@ func (x *Withdrawal) ProtoReflect() protoreflect.Message { // Deprecated: Use Withdrawal.ProtoReflect.Descriptor instead. func (*Withdrawal) Descriptor() ([]byte, []int) { - return file_types_types_proto_rawDescGZIP(), []int{10} + return file_types_types_proto_rawDescGZIP(), []int{11} } func (x *Withdrawal) GetIndex() uint64 { @@ -845,7 +916,7 @@ type BlobsBundleV1 struct { func (x *BlobsBundleV1) Reset() { *x = BlobsBundleV1{} if protoimpl.UnsafeEnabled { - mi := &file_types_types_proto_msgTypes[11] + mi := &file_types_types_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -858,7 +929,7 @@ func (x *BlobsBundleV1) String() string { func (*BlobsBundleV1) ProtoMessage() {} func (x *BlobsBundleV1) ProtoReflect() protoreflect.Message { - mi := &file_types_types_proto_msgTypes[11] + mi := &file_types_types_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -871,7 +942,7 @@ func (x *BlobsBundleV1) ProtoReflect() protoreflect.Message { // Deprecated: Use BlobsBundleV1.ProtoReflect.Descriptor instead. func (*BlobsBundleV1) Descriptor() ([]byte, []int) { - return file_types_types_proto_rawDescGZIP(), []int{11} + return file_types_types_proto_rawDescGZIP(), []int{12} } func (x *BlobsBundleV1) GetCommitments() [][]byte { @@ -907,7 +978,7 @@ type NodeInfoPorts struct { func (x *NodeInfoPorts) Reset() { *x = NodeInfoPorts{} if protoimpl.UnsafeEnabled { - mi := &file_types_types_proto_msgTypes[12] + mi := &file_types_types_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -920,7 +991,7 @@ func (x *NodeInfoPorts) String() string { func (*NodeInfoPorts) ProtoMessage() {} func (x *NodeInfoPorts) ProtoReflect() protoreflect.Message { - mi := &file_types_types_proto_msgTypes[12] + mi := &file_types_types_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -933,7 +1004,7 @@ func (x *NodeInfoPorts) ProtoReflect() protoreflect.Message { // Deprecated: Use NodeInfoPorts.ProtoReflect.Descriptor instead. func (*NodeInfoPorts) Descriptor() ([]byte, []int) { - return file_types_types_proto_rawDescGZIP(), []int{12} + return file_types_types_proto_rawDescGZIP(), []int{13} } func (x *NodeInfoPorts) GetDiscovery() uint32 { @@ -967,7 +1038,7 @@ type NodeInfoReply struct { func (x *NodeInfoReply) Reset() { *x = NodeInfoReply{} if protoimpl.UnsafeEnabled { - mi := &file_types_types_proto_msgTypes[13] + mi := &file_types_types_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -980,7 +1051,7 @@ func (x *NodeInfoReply) String() string { func (*NodeInfoReply) ProtoMessage() {} func (x *NodeInfoReply) ProtoReflect() protoreflect.Message { - mi := &file_types_types_proto_msgTypes[13] + mi := &file_types_types_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -993,7 +1064,7 @@ func (x *NodeInfoReply) ProtoReflect() protoreflect.Message { // Deprecated: Use NodeInfoReply.ProtoReflect.Descriptor instead. func (*NodeInfoReply) Descriptor() ([]byte, []int) { - return file_types_types_proto_rawDescGZIP(), []int{13} + return file_types_types_proto_rawDescGZIP(), []int{14} } func (x *NodeInfoReply) GetId() string { @@ -1065,7 +1136,7 @@ type PeerInfo struct { func (x *PeerInfo) Reset() { *x = PeerInfo{} if protoimpl.UnsafeEnabled { - mi := &file_types_types_proto_msgTypes[14] + mi := &file_types_types_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1078,7 +1149,7 @@ func (x *PeerInfo) String() string { func (*PeerInfo) ProtoMessage() {} func (x *PeerInfo) ProtoReflect() protoreflect.Message { - mi := &file_types_types_proto_msgTypes[14] + mi := &file_types_types_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1091,7 +1162,7 @@ func (x *PeerInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerInfo.ProtoReflect.Descriptor instead. func (*PeerInfo) Descriptor() ([]byte, []int) { - return file_types_types_proto_rawDescGZIP(), []int{14} + return file_types_types_proto_rawDescGZIP(), []int{15} } func (x *PeerInfo) GetId() string { @@ -1176,7 +1247,7 @@ type ExecutionPayloadBodyV1 struct { func (x *ExecutionPayloadBodyV1) Reset() { *x = ExecutionPayloadBodyV1{} if protoimpl.UnsafeEnabled { - mi := &file_types_types_proto_msgTypes[15] + mi := &file_types_types_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1189,7 +1260,7 @@ func (x *ExecutionPayloadBodyV1) String() string { func (*ExecutionPayloadBodyV1) ProtoMessage() {} func (x *ExecutionPayloadBodyV1) ProtoReflect() protoreflect.Message { - mi := &file_types_types_proto_msgTypes[15] + mi := &file_types_types_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1202,7 +1273,7 @@ func (x *ExecutionPayloadBodyV1) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecutionPayloadBodyV1.ProtoReflect.Descriptor instead. func (*ExecutionPayloadBodyV1) Descriptor() ([]byte, []int) { - return file_types_types_proto_rawDescGZIP(), []int{15} + return file_types_types_proto_rawDescGZIP(), []int{16} } func (x *ExecutionPayloadBodyV1) GetTransactions() [][]byte { @@ -1290,7 +1361,7 @@ var file_types_types_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0x96, 0x07, 0x0a, 0x10, 0x45, 0x78, 0x65, 0x63, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0xea, 0x07, 0x0a, 0x10, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, @@ -1345,106 +1416,121 @@ var file_types_types_proto_rawDesc = []byte{ 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x12, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, - 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x62, - 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x42, 0x12, 0x0a, 0x10, - 0x5f, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, - 0x22, 0xb8, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x16, 0x77, - 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x15, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, - 0x61, 0x77, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, - 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x8a, 0x01, 0x0a, 0x11, - 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x32, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x6f, 0x72, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, - 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x8a, 0x01, 0x0a, 0x0a, 0x57, 0x69, 0x74, - 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x27, 0x0a, - 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, - 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x48, 0x31, 0x36, 0x30, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, - 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x5f, 0x0a, 0x0d, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, - 0x6e, 0x64, 0x6c, 0x65, 0x56, 0x31, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, - 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x22, 0x49, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x63, 0x6f, - 0x76, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x64, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x22, 0xca, 0x01, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x72, 0x12, - 0x2a, 0x0a, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x50, - 0x6f, 0x72, 0x74, 0x73, 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, - 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xb2, - 0x02, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x63, 0x61, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x63, - 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, - 0x64, 0x64, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, - 0x6f, 0x6e, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, - 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x49, 0x6e, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x69, 0x73, - 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, - 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x12, 0x24, 0x0a, - 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x69, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x53, 0x74, 0x61, - 0x74, 0x69, 0x63, 0x22, 0x71, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x6f, 0x64, 0x79, 0x56, 0x31, 0x12, 0x22, 0x0a, - 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, - 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, - 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd1, 0x86, - 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x61, - 0x6a, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0xd2, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x52, - 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd3, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x61, 0x74, 0x63, 0x68, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x42, 0x14, 0x5a, 0x12, 0x2e, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x52, 0x0a, 0x16, 0x63, 0x6f, + 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x15, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x42, 0x10, + 0x0a, 0x0e, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, + 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x65, 0x78, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x62, + 0x5f, 0x67, 0x61, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, + 0x42, 0x0a, 0x16, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x5f, 0x63, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x32, 0x35, 0x36, 0x52, 0x15, 0x77, 0x69, + 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, + 0x8a, 0x01, 0x0a, 0x11, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x75, + 0x62, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x94, 0x01, 0x0a, + 0x14, 0x43, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x23, + 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x75, 0x62, + 0x6b, 0x65, 0x79, 0x22, 0x8a, 0x01, 0x0a, 0x0a, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, + 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x27, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0x25, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x31, 0x36, 0x30, 0x52, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x22, 0x5f, 0x0a, 0x0d, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x56, + 0x31, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x72, 0x6f, 0x6f, 0x66, + 0x73, 0x22, 0x49, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x72, + 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x22, 0xca, 0x01, 0x0a, + 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x72, 0x12, 0x2a, 0x0a, 0x05, 0x70, 0x6f, + 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x52, + 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xb2, 0x02, 0x0a, 0x08, 0x50, 0x65, + 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6e, + 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6e, 0x6f, 0x64, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, + 0x6e, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x04, 0x63, 0x61, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x12, 0x28, + 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, + 0x5f, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, + 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x69, 0x73, 0x5f, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x49, + 0x73, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, + 0x5f, 0x69, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x49, 0x73, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x22, 0x71, + 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x42, 0x6f, 0x64, 0x79, 0x56, 0x31, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x0b, + 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, + 0x61, 0x77, 0x61, 0x6c, 0x52, 0x0b, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, + 0x73, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6d, 0x61, 0x6a, + 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd1, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x61, 0x6a, 0x6f, 0x72, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd2, 0x86, 0x03, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x69, 0x6e, + 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x52, 0x0a, 0x15, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xd3, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x50, 0x61, 0x74, 0x63, 0x68, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x14, 0x5a, + 0x12, 0x2e, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1459,7 +1545,7 @@ func file_types_types_proto_rawDescGZIP() []byte { return file_types_types_proto_rawDescData } -var file_types_types_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_types_types_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_types_types_proto_goTypes = []any{ (*H128)(nil), // 0: types.H128 (*H160)(nil), // 1: types.H160 @@ -1471,13 +1557,14 @@ var file_types_types_proto_goTypes = []any{ (*ExecutionPayload)(nil), // 7: types.ExecutionPayload (*DepositRequest)(nil), // 8: types.DepositRequest (*WithdrawalRequest)(nil), // 9: types.WithdrawalRequest - (*Withdrawal)(nil), // 10: types.Withdrawal - (*BlobsBundleV1)(nil), // 11: types.BlobsBundleV1 - (*NodeInfoPorts)(nil), // 12: types.NodeInfoPorts - (*NodeInfoReply)(nil), // 13: types.NodeInfoReply - (*PeerInfo)(nil), // 14: types.PeerInfo - (*ExecutionPayloadBodyV1)(nil), // 15: types.ExecutionPayloadBodyV1 - (*descriptorpb.FileOptions)(nil), // 16: google.protobuf.FileOptions + (*ConsolidationRequest)(nil), // 10: types.ConsolidationRequest + (*Withdrawal)(nil), // 11: types.Withdrawal + (*BlobsBundleV1)(nil), // 12: types.BlobsBundleV1 + (*NodeInfoPorts)(nil), // 13: types.NodeInfoPorts + (*NodeInfoReply)(nil), // 14: types.NodeInfoReply + (*PeerInfo)(nil), // 15: types.PeerInfo + (*ExecutionPayloadBodyV1)(nil), // 16: types.ExecutionPayloadBodyV1 + (*descriptorpb.FileOptions)(nil), // 17: google.protobuf.FileOptions } var file_types_types_proto_depIdxs = []int32{ 0, // 0: types.H160.hi:type_name -> types.H128 @@ -1497,22 +1584,24 @@ var file_types_types_proto_depIdxs = []int32{ 2, // 14: types.ExecutionPayload.prev_randao:type_name -> types.H256 2, // 15: types.ExecutionPayload.base_fee_per_gas:type_name -> types.H256 2, // 16: types.ExecutionPayload.block_hash:type_name -> types.H256 - 10, // 17: types.ExecutionPayload.withdrawals:type_name -> types.Withdrawal + 11, // 17: types.ExecutionPayload.withdrawals:type_name -> types.Withdrawal 8, // 18: types.ExecutionPayload.deposit_requests:type_name -> types.DepositRequest 9, // 19: types.ExecutionPayload.withdrawal_requests:type_name -> types.WithdrawalRequest - 2, // 20: types.DepositRequest.withdrawal_credentials:type_name -> types.H256 - 1, // 21: types.WithdrawalRequest.source_address:type_name -> types.H160 - 1, // 22: types.Withdrawal.address:type_name -> types.H160 - 12, // 23: types.NodeInfoReply.ports:type_name -> types.NodeInfoPorts - 10, // 24: types.ExecutionPayloadBodyV1.withdrawals:type_name -> types.Withdrawal - 16, // 25: types.service_major_version:extendee -> google.protobuf.FileOptions - 16, // 26: types.service_minor_version:extendee -> google.protobuf.FileOptions - 16, // 27: types.service_patch_version:extendee -> google.protobuf.FileOptions - 28, // [28:28] is the sub-list for method output_type - 28, // [28:28] is the sub-list for method input_type - 28, // [28:28] is the sub-list for extension type_name - 25, // [25:28] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name + 10, // 20: types.ExecutionPayload.consolidation_requests:type_name -> types.ConsolidationRequest + 2, // 21: types.DepositRequest.withdrawal_credentials:type_name -> types.H256 + 1, // 22: types.WithdrawalRequest.source_address:type_name -> types.H160 + 1, // 23: types.ConsolidationRequest.source_address:type_name -> types.H160 + 1, // 24: types.Withdrawal.address:type_name -> types.H160 + 13, // 25: types.NodeInfoReply.ports:type_name -> types.NodeInfoPorts + 11, // 26: types.ExecutionPayloadBodyV1.withdrawals:type_name -> types.Withdrawal + 17, // 27: types.service_major_version:extendee -> google.protobuf.FileOptions + 17, // 28: types.service_minor_version:extendee -> google.protobuf.FileOptions + 17, // 29: types.service_patch_version:extendee -> google.protobuf.FileOptions + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 27, // [27:30] is the sub-list for extension extendee + 0, // [0:27] is the sub-list for field type_name } func init() { file_types_types_proto_init() } @@ -1642,7 +1731,7 @@ func file_types_types_proto_init() { } } file_types_types_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*Withdrawal); i { + switch v := v.(*ConsolidationRequest); i { case 0: return &v.state case 1: @@ -1654,7 +1743,7 @@ func file_types_types_proto_init() { } } file_types_types_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*BlobsBundleV1); i { + switch v := v.(*Withdrawal); i { case 0: return &v.state case 1: @@ -1666,7 +1755,7 @@ func file_types_types_proto_init() { } } file_types_types_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*NodeInfoPorts); i { + switch v := v.(*BlobsBundleV1); i { case 0: return &v.state case 1: @@ -1678,7 +1767,7 @@ func file_types_types_proto_init() { } } file_types_types_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*NodeInfoReply); i { + switch v := v.(*NodeInfoPorts); i { case 0: return &v.state case 1: @@ -1690,7 +1779,7 @@ func file_types_types_proto_init() { } } file_types_types_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*PeerInfo); i { + switch v := v.(*NodeInfoReply); i { case 0: return &v.state case 1: @@ -1702,6 +1791,18 @@ func file_types_types_proto_init() { } } file_types_types_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*PeerInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_types_types_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ExecutionPayloadBodyV1); i { case 0: return &v.state @@ -1721,7 +1822,7 @@ func file_types_types_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_types_types_proto_rawDesc, NumEnums: 0, - NumMessages: 16, + NumMessages: 17, NumExtensions: 3, NumServices: 0, }, diff --git a/erigon-lib/kv/helpers.go b/erigon-lib/kv/helpers.go index 3e6c6361ceb..e73c7f6b18b 100644 --- a/erigon-lib/kv/helpers.go +++ b/erigon-lib/kv/helpers.go @@ -19,7 +19,7 @@ package kv import ( "context" "encoding/binary" - "fmt" + "errors" "os" "sync" "sync/atomic" @@ -106,7 +106,7 @@ func bytes2bool(in []byte) bool { return in[0] == 1 } -var ErrChanged = fmt.Errorf("key must not change") +var ErrChanged = errors.New("key must not change") // EnsureNotChangedBool - used to store immutable config flags in db. protects from human mistakes func EnsureNotChangedBool(tx GetPut, bucket string, k []byte, value bool) (ok, enabled bool, err error) { diff --git a/erigon-lib/kv/mdbx/kv_mdbx.go b/erigon-lib/kv/mdbx/kv_mdbx.go index 7aed4bfc5b4..81961d848c8 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx.go +++ b/erigon-lib/kv/mdbx/kv_mdbx.go @@ -230,7 +230,7 @@ func PathDbMap() map[string]kv.RoDB { return maps.Clone(pathDbMap) } -var ErrDBDoesNotExists = fmt.Errorf("can't create database - because opening in `Accede` mode. probably another (main) process can create it") +var ErrDBDoesNotExists = errors.New("can't create database - because opening in `Accede` mode. probably another (main) process can create it") func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) { opts = opts.WriteMap(dbg.WriteMap()) @@ -759,7 +759,7 @@ func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { } if !db.trackTxBegin() { - return nil, fmt.Errorf("db closed") + return nil, errors.New("db closed") } // will return nil err if context is cancelled (may appear to acquire the semaphore) @@ -806,7 +806,7 @@ func (db *MdbxKV) beginRw(ctx context.Context, flags uint) (txn kv.RwTx, err err } if !db.trackTxBegin() { - return nil, fmt.Errorf("db closed") + return nil, errors.New("db closed") } runtime.LockOSThread() @@ -1007,7 +1007,7 @@ func (tx *MdbxTx) CreateBucket(name string) error { flags ^= kv.DupSort } if flags != 0 { - return fmt.Errorf("some not supported flag provided for bucket") + return errors.New("some not supported flag provided for bucket") } dbi, err = tx.tx.OpenDBI(name, nativeFlags, nil, nil) @@ -1369,7 +1369,6 @@ func (tx *MdbxTx) CursorDupSort(bucket string) (kv.CursorDupSort, error) { // methods here help to see better pprof picture func (c *MdbxCursor) set(k []byte) ([]byte, []byte, error) { return c.c.Get(k, nil, mdbx.Set) } func (c *MdbxCursor) getCurrent() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.GetCurrent) } -func (c *MdbxCursor) first() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.First) } func (c *MdbxCursor) next() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.Next) } func (c *MdbxCursor) nextDup() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.NextDup) } func (c *MdbxCursor) nextNoDup() ([]byte, []byte, error) { return c.c.Get(nil, nil, mdbx.NextNoDup) } @@ -1386,21 +1385,10 @@ func (c *MdbxCursor) getBoth(k, v []byte) ([]byte, error) { _, v, err := c.c.Get(k, v, mdbx.GetBoth) return v, err } -func (c *MdbxCursor) setRange(k []byte) ([]byte, []byte, error) { - return c.c.Get(k, nil, mdbx.SetRange) -} func (c *MdbxCursor) getBothRange(k, v []byte) ([]byte, error) { _, v, err := c.c.Get(k, v, mdbx.GetBothRange) return v, err } -func (c *MdbxCursor) firstDup() ([]byte, error) { - _, v, err := c.c.Get(nil, nil, mdbx.FirstDup) - return v, err -} -func (c *MdbxCursor) lastDup() ([]byte, error) { - _, v, err := c.c.Get(nil, nil, mdbx.LastDup) - return v, err -} func (c *MdbxCursor) First() ([]byte, []byte, error) { return c.Seek(nil) @@ -1432,16 +1420,22 @@ func (c *MdbxCursor) Seek(seek []byte) (k, v []byte, err error) { } if len(seek) == 0 { - k, v, err = c.first() - } else { - k, v, err = c.setRange(seek) + k, v, err = c.c.Get(nil, nil, mdbx.First) + if err != nil { + if mdbx.IsNotFound(err) { + return nil, nil, nil + } + return []byte{}, nil, fmt.Errorf("cursor.First: %w, bucket: %s, key: %x", err, c.bucketName, seek) + } + return k, v, nil } + + k, v, err = c.c.Get(seek, nil, mdbx.SetRange) if err != nil { if mdbx.IsNotFound(err) { return nil, nil, nil } - err = fmt.Errorf("failed MdbxKV cursor.seekInFiles(): %w, bucket: %s, key: %x", err, c.bucketName, seek) - return []byte{}, nil, err + return []byte{}, nil, fmt.Errorf("cursor.SetRange: %w, bucket: %s, key: %x", err, c.bucketName, seek) } return k, v, nil @@ -1451,7 +1445,7 @@ func (c *MdbxCursor) seekDupSort(seek []byte) (k, v []byte, err error) { b := c.bucketCfg from, to := b.DupFromLen, b.DupToLen if len(seek) == 0 { - k, v, err = c.first() + k, v, err = c.c.Get(nil, nil, mdbx.First) if err != nil { if mdbx.IsNotFound(err) { return nil, nil, nil @@ -1474,7 +1468,7 @@ func (c *MdbxCursor) seekDupSort(seek []byte) (k, v []byte, err error) { } else { seek1 = seek } - k, v, err = c.setRange(seek1) + k, v, err = c.c.Get(seek1, nil, mdbx.SetRange) if err != nil { if mdbx.IsNotFound(err) { return nil, nil, nil @@ -1799,7 +1793,7 @@ func (c *MdbxDupSortCursor) SeekBothRange(key, value []byte) ([]byte, error) { } func (c *MdbxDupSortCursor) FirstDup() ([]byte, error) { - v, err := c.firstDup() + _, v, err := c.c.Get(nil, nil, mdbx.FirstDup) if err != nil { if mdbx.IsNotFound(err) { return nil, nil @@ -1856,7 +1850,7 @@ func (c *MdbxDupSortCursor) PrevNoDup() ([]byte, []byte, error) { } func (c *MdbxDupSortCursor) LastDup() ([]byte, error) { - v, err := c.lastDup() + _, v, err := c.c.Get(nil, nil, mdbx.LastDup) if err != nil { if mdbx.IsNotFound(err) { return nil, nil diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation.go b/erigon-lib/kv/membatchwithdb/memory_mutation.go index 53112ff601f..49d1c8d45e4 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation.go @@ -723,10 +723,10 @@ func (m *MemoryMutation) CHandle() unsafe.Pointer { } type hasAggCtx interface { - AggTx() interface{} + AggTx() any } -func (m *MemoryMutation) AggTx() interface{} { +func (m *MemoryMutation) AggTx() any { return m.db.(hasAggCtx).AggTx() } diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go b/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go index d75e165ffb2..ea33e49b175 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation_cursor.go @@ -18,7 +18,7 @@ package membatchwithdb import ( "bytes" - "fmt" + "errors" "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/kv" @@ -107,7 +107,7 @@ func (m *memoryMutationCursor) getNextOnDb(t NextType) (key []byte, value []byte return } default: - err = fmt.Errorf("invalid next type") + err = errors.New("invalid next type") return } @@ -129,7 +129,7 @@ func (m *memoryMutationCursor) getNextOnDb(t NextType) (key []byte, value []byte return } default: - err = fmt.Errorf("invalid next type") + err = errors.New("invalid next type") return } } diff --git a/erigon-lib/kv/remotedb/kv_remote.go b/erigon-lib/kv/remotedb/kv_remote.go index 86490b25b93..ab511ec51bc 100644 --- a/erigon-lib/kv/remotedb/kv_remote.go +++ b/erigon-lib/kv/remotedb/kv_remote.go @@ -19,6 +19,7 @@ package remotedb import ( "bytes" "context" + "errors" "fmt" "runtime" "unsafe" @@ -191,16 +192,16 @@ func (db *DB) BeginTemporalRo(ctx context.Context) (kv.TemporalTx, error) { return t.(kv.TemporalTx), nil } func (db *DB) BeginRw(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("remote db provider doesn't support .BeginRw method") + return nil, errors.New("remote db provider doesn't support .BeginRw method") } func (db *DB) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("remote db provider doesn't support .BeginRw method") + return nil, errors.New("remote db provider doesn't support .BeginRw method") } func (db *DB) BeginTemporalRw(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("remote db provider doesn't support .BeginTemporalRw method") + return nil, errors.New("remote db provider doesn't support .BeginTemporalRw method") } func (db *DB) BeginTemporalRwNosync(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("remote db provider doesn't support .BeginTemporalRwNosync method") + return nil, errors.New("remote db provider doesn't support .BeginTemporalRwNosync method") } func (db *DB) View(ctx context.Context, f func(tx kv.Tx) error) (err error) { @@ -221,10 +222,10 @@ func (db *DB) ViewTemporal(ctx context.Context, f func(tx kv.TemporalTx) error) } func (db *DB) Update(ctx context.Context, f func(tx kv.RwTx) error) (err error) { - return fmt.Errorf("remote db provider doesn't support .Update method") + return errors.New("remote db provider doesn't support .Update method") } func (db *DB) UpdateNosync(ctx context.Context, f func(tx kv.RwTx) error) (err error) { - return fmt.Errorf("remote db provider doesn't support .UpdateNosync method") + return errors.New("remote db provider doesn't support .UpdateNosync method") } func (tx *tx) ViewID() uint64 { return tx.viewID } @@ -359,7 +360,7 @@ func (tx *tx) Cursor(bucket string) (kv.Cursor, error) { } func (tx *tx) ListBuckets() ([]string, error) { - return nil, fmt.Errorf("function ListBuckets is not implemented for remoteTx") + return nil, errors.New("function ListBuckets is not implemented for remoteTx") } // func (c *remoteCursor) Put(k []byte, v []byte) error { panic("not supported") } diff --git a/erigon-lib/kv/remotedbserver/remotedbserver.go b/erigon-lib/kv/remotedbserver/remotedbserver.go index be73a5112c4..b6ac49bc28b 100644 --- a/erigon-lib/kv/remotedbserver/remotedbserver.go +++ b/erigon-lib/kv/remotedbserver/remotedbserver.go @@ -535,7 +535,7 @@ func (s *KvServer) DomainGet(_ context.Context, req *remote.DomainGetReq) (reply if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) if !ok { - return fmt.Errorf("server DB doesn't implement kv.Temporal interface") + return errors.New("server DB doesn't implement kv.Temporal interface") } if req.Latest { reply.V, _, err = ttx.DomainGet(domainName, req.K, req.K2) @@ -559,7 +559,7 @@ func (s *KvServer) HistorySeek(_ context.Context, req *remote.HistorySeekReq) (r if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) if !ok { - return fmt.Errorf("server DB doesn't implement kv.Temporal interface") + return errors.New("server DB doesn't implement kv.Temporal interface") } reply.V, reply.Ok, err = ttx.HistorySeek(kv.History(req.Table), req.K, req.Ts) if err != nil { @@ -591,7 +591,7 @@ func (s *KvServer) IndexRange(_ context.Context, req *remote.IndexRangeReq) (*re if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) if !ok { - return fmt.Errorf("server DB doesn't implement kv.Temporal interface") + return errors.New("server DB doesn't implement kv.Temporal interface") } it, err := ttx.IndexRange(kv.InvertedIdx(req.Table), req.K, from, int(req.ToTs), order.By(req.OrderAscend), limit) if err != nil { @@ -623,6 +623,37 @@ func (s *KvServer) IndexRange(_ context.Context, req *remote.IndexRangeReq) (*re return reply, nil } +func (s *KvServer) HistoryRange(_ context.Context, req *remote.HistoryRangeReq) (*remote.Pairs, error) { + reply := &remote.Pairs{} + fromTs, limit := int(req.FromTs), int(req.Limit) + if err := s.with(req.TxId, func(tx kv.Tx) error { + ttx, ok := tx.(kv.TemporalTx) + if !ok { + return fmt.Errorf("server DB doesn't implement kv.Temporal interface") + } + it, err := ttx.HistoryRange(kv.History(req.Table), fromTs, int(req.ToTs), order.By(req.OrderAscend), limit) + if err != nil { + return err + } + defer it.Close() + for it.HasNext() { + k, v, err := it.Next() + if err != nil { + return err + } + key := bytesCopy(k) + value := bytesCopy(v) + reply.Keys = append(reply.Keys, key) + reply.Values = append(reply.Values, value) + limit-- + } + return nil + }); err != nil { + return nil, err + } + return reply, nil +} + func (s *KvServer) Range(_ context.Context, req *remote.RangeReq) (*remote.Pairs, error) { from, limit := req.FromPrefix, int(req.Limit) if req.PageToken != "" { diff --git a/erigon-lib/kv/stream/stream_test.go b/erigon-lib/kv/stream/stream_test.go index a812e7ae6ba..bc9550390f8 100644 --- a/erigon-lib/kv/stream/stream_test.go +++ b/erigon-lib/kv/stream/stream_test.go @@ -19,7 +19,7 @@ package stream_test import ( "bytes" "context" - "fmt" + "errors" "testing" "github.com/erigontech/erigon-lib/kv" @@ -244,7 +244,7 @@ func TestPaginated(t *testing.T) { }) t.Run("error", func(t *testing.T) { i := 0 - testErr := fmt.Errorf("test") + testErr := errors.New("test") s1 := stream.Paginate[uint64](func(pageToken string) (arr []uint64, nextPageToken string, err error) { i++ switch i { @@ -310,7 +310,7 @@ func TestPaginatedDual(t *testing.T) { }) t.Run("error", func(t *testing.T) { i := 0 - testErr := fmt.Errorf("test") + testErr := errors.New("test") s1 := stream.PaginateKV(func(pageToken string) (keys, values [][]byte, nextPageToken string, err error) { i++ switch i { diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go index b2f75a9fa6d..e0c537755c8 100644 --- a/erigon-lib/kv/tables.go +++ b/erigon-lib/kv/tables.go @@ -367,17 +367,18 @@ const ( StateCommitment = "StateCommitment" // BOR - BorReceipts = "BorReceipt" - BorFinality = "BorFinality" - BorTxLookup = "BlockBorTransactionLookup" // transaction_hash -> block_num_u64 - BorSeparate = "BorSeparate" // persisted snapshots of the Validator Sets, with their proposer priorities - BorEvents = "BorEvents" // event_id -> event_payload - BorEventNums = "BorEventNums" // block_num -> event_id (first event_id in that block) - BorSpans = "BorSpans" // span_id -> span (in JSON encoding) - BorMilestones = "BorMilestones" // milestone_id -> milestone (in JSON encoding) - BorMilestoneEnds = "BorMilestoneEnds" // start block_num -> milestone_id (first block of milestone) - BorCheckpoints = "BorCheckpoints" // checkpoint_id -> checkpoint (in JSON encoding) - BorCheckpointEnds = "BorCheckpointEnds" // start block_num -> checkpoint_id (first block of checkpoint) + BorReceipts = "BorReceipt" + BorFinality = "BorFinality" + BorTxLookup = "BlockBorTransactionLookup" // transaction_hash -> block_num_u64 + BorSeparate = "BorSeparate" // persisted snapshots of the Validator Sets, with their proposer priorities + BorEvents = "BorEvents" // event_id -> event_payload + BorEventNums = "BorEventNums" // block_num -> event_id (first event_id in that block) + BorSpans = "BorSpans" // span_id -> span (in JSON encoding) + BorMilestones = "BorMilestones" // milestone_id -> milestone (in JSON encoding) + BorMilestoneEnds = "BorMilestoneEnds" // start block_num -> milestone_id (first block of milestone) + BorCheckpoints = "BorCheckpoints" // checkpoint_id -> checkpoint (in JSON encoding) + BorCheckpointEnds = "BorCheckpointEnds" // start block_num -> checkpoint_id (first block of checkpoint) + BorProducerSelections = "BorProducerSelections" // span_id -> span selection with accumulated proposer priorities (in JSON encoding) // Downloader BittorrentCompletion = "BittorrentCompletion" @@ -627,6 +628,7 @@ var ChaindataTables = []string{ BorMilestoneEnds, BorCheckpoints, BorCheckpointEnds, + BorProducerSelections, TblAccountKeys, TblAccountVals, TblAccountHistoryKeys, @@ -848,16 +850,17 @@ var ChaindataTablesCfg = TableCfg{ } var BorTablesCfg = TableCfg{ - BorReceipts: {Flags: DupSort}, - BorFinality: {Flags: DupSort}, - BorTxLookup: {Flags: DupSort}, - BorEvents: {Flags: DupSort}, - BorEventNums: {Flags: DupSort}, - BorSpans: {Flags: DupSort}, - BorCheckpoints: {Flags: DupSort}, - BorCheckpointEnds: {Flags: DupSort}, - BorMilestones: {Flags: DupSort}, - BorMilestoneEnds: {Flags: DupSort}, + BorReceipts: {Flags: DupSort}, + BorFinality: {Flags: DupSort}, + BorTxLookup: {Flags: DupSort}, + BorEvents: {Flags: DupSort}, + BorEventNums: {Flags: DupSort}, + BorSpans: {Flags: DupSort}, + BorCheckpoints: {Flags: DupSort}, + BorCheckpointEnds: {Flags: DupSort}, + BorMilestones: {Flags: DupSort}, + BorMilestoneEnds: {Flags: DupSort}, + BorProducerSelections: {Flags: DupSort}, } var TxpoolTablesCfg = TableCfg{} @@ -1034,10 +1037,12 @@ func String2Domain(in string) (Domain, error) { case "commitment": return CommitmentDomain, nil default: - return 0, fmt.Errorf("unknown history name: %s", in) + return Domain(MaxUint16), fmt.Errorf("unknown history name: %s", in) } } +const MaxUint16 uint16 = 1<<16 - 1 + func (iip Appendable) String() string { switch iip { //case ReceiptsAppendable: @@ -1052,6 +1057,6 @@ func String2Appendable(in string) (Appendable, error) { //case "receipts": // return ReceiptsAppendable, nil default: - return 0, fmt.Errorf("unknown Appendable name: %s", in) + return Appendable(MaxUint16), fmt.Errorf("unknown Appendable name: %s", in) } } diff --git a/erigon-lib/kv/temporal/historyv2/account_changeset_test.go b/erigon-lib/kv/temporal/historyv2/account_changeset_test.go index c37a800c865..e86634d9d57 100644 --- a/erigon-lib/kv/temporal/historyv2/account_changeset_test.go +++ b/erigon-lib/kv/temporal/historyv2/account_changeset_test.go @@ -19,6 +19,7 @@ package historyv2 import ( "bytes" "encoding/hex" + "errors" "fmt" "reflect" "testing" @@ -36,7 +37,7 @@ func TestEncodingAccount(t *testing.T) { ch := m.New() // empty StorageChangeSset first err := m.Encode(1, ch, func(k, v []byte) error { - return fmt.Errorf("must never call") + return errors.New("must never call") }) assert.NoError(t, err) diff --git a/erigon-lib/kv/temporal/kv_temporal.go b/erigon-lib/kv/temporal/kv_temporal.go index 7ada8f7a313..adb5d8464a1 100644 --- a/erigon-lib/kv/temporal/kv_temporal.go +++ b/erigon-lib/kv/temporal/kv_temporal.go @@ -68,8 +68,8 @@ type DB struct { func New(db kv.RwDB, agg *state.Aggregator) (*DB, error) { return &DB{RwDB: db, agg: agg}, nil } -func (db *DB) Agg() *state.Aggregator { return db.agg } -func (db *DB) InternalDB() kv.RwDB { return db.RwDB } +func (db *DB) Agg() any { return db.agg } +func (db *DB) InternalDB() kv.RwDB { return db.RwDB } func (db *DB) BeginTemporalRo(ctx context.Context) (kv.TemporalTx, error) { kvTx, err := db.RwDB.BeginRo(ctx) //nolint:gocritic @@ -168,7 +168,7 @@ func (tx *Tx) ForceReopenAggCtx() { func (tx *Tx) WarmupDB(force bool) error { return tx.MdbxTx.WarmupDB(force) } func (tx *Tx) LockDBInRam() error { return tx.MdbxTx.LockDBInRam() } -func (tx *Tx) AggTx() interface{} { return tx.filesTx } +func (tx *Tx) AggTx() any { return tx.filesTx } func (tx *Tx) Agg() *state.Aggregator { return tx.db.agg } func (tx *Tx) Rollback() { tx.autoClose() diff --git a/erigon-lib/metrics/parsing.go b/erigon-lib/metrics/parsing.go index 91db2f03222..a838697da34 100644 --- a/erigon-lib/metrics/parsing.go +++ b/erigon-lib/metrics/parsing.go @@ -17,6 +17,7 @@ package metrics import ( + "errors" "fmt" "regexp" "strings" @@ -26,7 +27,7 @@ import ( func parseMetric(s string) (string, prometheus.Labels, error) { if len(s) == 0 { - return "", nil, fmt.Errorf("metric cannot be empty") + return "", nil, errors.New("metric cannot be empty") } ident, rest, ok := strings.Cut(s, "{") diff --git a/erigon-lib/metrics/register.go b/erigon-lib/metrics/register.go index 92e563162e9..cbd07501271 100644 --- a/erigon-lib/metrics/register.go +++ b/erigon-lib/metrics/register.go @@ -209,12 +209,3 @@ func GetOrCreateHistogram(name string) Histogram { return &histogram{h} } - -func GetOrCreateHistogramWithBuckets(name string) Histogram { - h, err := defaultSet.GetOrCreateHistogram(name) - if err != nil { - panic(fmt.Errorf("could not get or create new histogram: %w", err)) - } - - return &histogram{h} -} diff --git a/erigon-lib/mmap/total_memory_cgroups.go b/erigon-lib/mmap/total_memory_cgroups.go index dbca502d02f..05a4b0be873 100644 --- a/erigon-lib/mmap/total_memory_cgroups.go +++ b/erigon-lib/mmap/total_memory_cgroups.go @@ -88,7 +88,7 @@ func cgroupsV1MemoryLimit() (uint64, error) { if stat, err := cgroup.Stat(); err != nil { return 0, fmt.Errorf("failed to load memory cgroup1 stats: %w", err) } else if stat.Memory == nil || stat.Memory.Usage == nil { - return 0, fmt.Errorf("cgroup1 memory stats are nil; aborting") + return 0, errors.New("cgroup1 memory stats are nil; aborting") } else { return stat.Memory.Usage.Limit, nil } @@ -111,7 +111,7 @@ func cgroupsV2MemoryLimit() (uint64, error) { if stat, err := cgroup.Stat(); err != nil { return 0, fmt.Errorf("failed to load cgroup2 memory stats: %w", err) } else if stat.Memory == nil { - return 0, fmt.Errorf("cgroup2 memory stats are nil; aborting") + return 0, errors.New("cgroup2 memory stats are nil; aborting") } else { return stat.Memory.UsageLimit, nil } diff --git a/erigon-lib/recsplit/golomb_rice.go b/erigon-lib/recsplit/golomb_rice.go index 1e0d5799e8a..4e292572b59 100644 --- a/erigon-lib/recsplit/golomb_rice.go +++ b/erigon-lib/recsplit/golomb_rice.go @@ -82,7 +82,7 @@ func (g *GolombRice) appendFixed(v uint64, log2golomb int) { g.bitCount += log2golomb } -// Bits returns currrent number of bits in the compact encoding of the hash function representation +// Bits returns current number of bits in the compact encoding of the hash function representation func (g *GolombRice) Bits() int { return g.bitCount } diff --git a/erigon-lib/recsplit/recsplit.go b/erigon-lib/recsplit/recsplit.go index 27c20ece508..239b57195ba 100644 --- a/erigon-lib/recsplit/recsplit.go +++ b/erigon-lib/recsplit/recsplit.go @@ -21,6 +21,7 @@ import ( "context" "crypto/rand" "encoding/binary" + "errors" "fmt" "io" "math" @@ -39,7 +40,7 @@ import ( "github.com/erigontech/erigon-lib/recsplit/eliasfano32" ) -var ErrCollision = fmt.Errorf("duplicate key") +var ErrCollision = errors.New("duplicate key") const RecSplitLogPrefix = "recsplit" @@ -349,7 +350,7 @@ func (rs *RecSplit) golombParam(m uint16) int { // the slice underlying key is not getting accessed by RecSplit after this invocation. func (rs *RecSplit) AddKey(key []byte, offset uint64) error { if rs.built { - return fmt.Errorf("cannot add keys after perfect hash function had been built") + return errors.New("cannot add keys after perfect hash function had been built") } rs.hasher.Reset() rs.hasher.Write(key) //nolint:errcheck @@ -582,7 +583,7 @@ func (rs *RecSplit) loadFuncOffset(k, _ []byte, _ etl.CurrentTableReader, _ etl. // of building the perfect hash function and writing index into a file func (rs *RecSplit) Build(ctx context.Context) error { if rs.built { - return fmt.Errorf("already built") + return errors.New("already built") } if rs.keysAdded != rs.keyExpectedCount { return fmt.Errorf("rs %s expected keys %d, got %d", rs.indexFileName, rs.keyExpectedCount, rs.keysAdded) diff --git a/erigon-lib/rlp/parse.go b/erigon-lib/rlp/parse.go index 5d53bc1b4f6..09545f4347e 100644 --- a/erigon-lib/rlp/parse.go +++ b/erigon-lib/rlp/parse.go @@ -26,7 +26,7 @@ import ( ) var ( - ErrBase = fmt.Errorf("rlp") + ErrBase = errors.New("rlp") ErrParse = fmt.Errorf("%w parse", ErrBase) ErrDecode = fmt.Errorf("%w decode", ErrBase) ) diff --git a/erigon-lib/rlp2/parse.go b/erigon-lib/rlp2/parse.go index b69e7de5a12..bda7935e15a 100644 --- a/erigon-lib/rlp2/parse.go +++ b/erigon-lib/rlp2/parse.go @@ -26,7 +26,7 @@ import ( ) var ( - ErrBase = fmt.Errorf("rlp") + ErrBase = errors.New("rlp") ErrParse = fmt.Errorf("%w parse", ErrBase) ErrDecode = fmt.Errorf("%w decode", ErrBase) ErrUnexpectedEOF = fmt.Errorf("%w EOF", ErrBase) diff --git a/erigon-lib/seg/compress_test.go b/erigon-lib/seg/compress_test.go index 2d86902b361..2d0cea183a2 100644 --- a/erigon-lib/seg/compress_test.go +++ b/erigon-lib/seg/compress_test.go @@ -125,9 +125,6 @@ func TestCompressDict1(t *testing.T) { require.True(t, g.MatchPrefix([]byte(""))) require.True(t, g.MatchPrefix([]byte{})) - require.Equal(t, 1, g.MatchPrefixCmp([]byte("long"))) - require.Equal(t, 0, g.MatchPrefixCmp([]byte(""))) - require.Equal(t, 0, g.MatchPrefixCmp([]byte{})) word, _ := g.Next(nil) require.NotNil(t, word) require.Zero(t, len(word)) @@ -139,11 +136,6 @@ func TestCompressDict1(t *testing.T) { require.False(t, g.MatchPrefix([]byte("longnotmatch"))) require.True(t, g.MatchPrefix([]byte{})) - require.Equal(t, 0, g.MatchPrefixCmp([]byte("long"))) - require.Equal(t, 1, g.MatchPrefixCmp([]byte("longlong"))) - require.Equal(t, 1, g.MatchPrefixCmp([]byte("wordnotmatch"))) - require.Equal(t, 1, g.MatchPrefixCmp([]byte("longnotmatch"))) - require.Equal(t, 0, g.MatchPrefixCmp([]byte{})) _, _ = g.Next(nil) // next word is `word` @@ -155,13 +147,6 @@ func TestCompressDict1(t *testing.T) { require.False(t, g.MatchPrefix([]byte("wordnotmatch"))) require.False(t, g.MatchPrefix([]byte("longnotmatch"))) - require.Equal(t, -1, g.MatchPrefixCmp([]byte("long"))) - require.Equal(t, -1, g.MatchPrefixCmp([]byte("longlong"))) - require.Equal(t, 0, g.MatchPrefixCmp([]byte("word"))) - require.Equal(t, 0, g.MatchPrefixCmp([]byte(""))) - require.Equal(t, 0, g.MatchPrefixCmp(nil)) - require.Equal(t, 1, g.MatchPrefixCmp([]byte("wordnotmatch"))) - require.Equal(t, -1, g.MatchPrefixCmp([]byte("longnotmatch"))) _, _ = g.Next(nil) // next word is `longlongword %d` @@ -175,13 +160,6 @@ func TestCompressDict1(t *testing.T) { require.False(t, g.MatchPrefix([]byte("longnotmatch"))) require.True(t, g.MatchPrefix([]byte{})) - require.Equal(t, 0, g.MatchPrefixCmp([]byte(fmt.Sprintf("%d", i)))) - require.Equal(t, 0, g.MatchPrefixCmp([]byte(expectPrefix))) - require.Equal(t, 0, g.MatchPrefixCmp([]byte(expectPrefix+"long"))) - require.Equal(t, 0, g.MatchPrefixCmp([]byte(expectPrefix+"longword "))) - require.Equal(t, 1, g.MatchPrefixCmp([]byte("wordnotmatch"))) - require.Equal(t, 1, g.MatchPrefixCmp([]byte("longnotmatch"))) - require.Equal(t, 0, g.MatchPrefixCmp([]byte{})) savePos := g.dataP word, nextPos := g.Next(nil) expected := fmt.Sprintf("%d longlongword %d", i, i) diff --git a/erigon-lib/seg/decompress.go b/erigon-lib/seg/decompress.go index 01a03b1eafc..1729a2fb925 100644 --- a/erigon-lib/seg/decompress.go +++ b/erigon-lib/seg/decompress.go @@ -555,14 +555,14 @@ func (g *Getter) nextPos(clean bool) (pos uint64) { g.dataP++ g.dataBit = 0 } - table := g.posDict - if table != nil && table.bitLen == 0 { + table, dataLen, data := g.posDict, len(g.data), g.data + if table.bitLen == 0 { return table.pos[0] } for l := byte(0); l == 0; { - code := uint16(g.data[g.dataP]) >> g.dataBit - if 8-g.dataBit < table.bitLen && int(g.dataP)+1 < len(g.data) { - code |= uint16(g.data[g.dataP+1]) << (8 - g.dataBit) + code := uint16(data[g.dataP]) >> g.dataBit + if 8-g.dataBit < table.bitLen && int(g.dataP)+1 < dataLen { + code |= uint16(data[g.dataP+1]) << (8 - g.dataBit) } code &= (uint16(1) << table.bitLen) - 1 l = table.lens[code] @@ -817,105 +817,6 @@ func (g *Getter) SkipUncompressed() (uint64, int) { return g.dataP, int(wordLen) } -// Match returns -// -// 1 if the word at current offset is greater than the buf -// -// -1 if it is less than the buf -// -// 0 if they are equal. -func (g *Getter) Match(buf []byte) int { - savePos := g.dataP - wordLen := g.nextPos(true) - wordLen-- // because when create huffman tree we do ++ , because 0 is terminator - lenBuf := len(buf) - if wordLen == 0 || int(wordLen) != lenBuf { - if g.dataBit > 0 { - g.dataP++ - g.dataBit = 0 - } - if lenBuf != 0 || lenBuf != int(wordLen) { - g.dataP, g.dataBit = savePos, 0 - } - if lenBuf == int(wordLen) { - return 0 - } - if lenBuf < int(wordLen) { - return -1 - } - if lenBuf > int(wordLen) { - return 1 - } - } - - var bufPos int - // In the first pass, we only check patterns - for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) { - bufPos += int(pos) - 1 - pattern := g.nextPattern() - compared := bytes.Compare(buf[bufPos:bufPos+len(pattern)], pattern) - if compared != 0 { - g.dataP, g.dataBit = savePos, 0 - return compared - } - if lenBuf < bufPos+len(pattern) { - g.dataP, g.dataBit = savePos, 0 - return -1 - } - } - if g.dataBit > 0 { - g.dataP++ - g.dataBit = 0 - } - postLoopPos := g.dataP - g.dataP, g.dataBit = savePos, 0 - g.nextPos(true /* clean */) // Reset the state of huffman decoder - // Second pass - we check spaces not covered by the patterns - var lastUncovered int - bufPos = 0 - for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) { - bufPos += int(pos) - 1 - if bufPos > lastUncovered { - dif := uint64(bufPos - lastUncovered) - compared := bytes.Compare(buf[lastUncovered:bufPos], g.data[postLoopPos:postLoopPos+dif]) - if compared != 0 { - g.dataP, g.dataBit = savePos, 0 - return compared - } - if lenBuf < bufPos { - g.dataP, g.dataBit = savePos, 0 - return -1 - } - postLoopPos += dif - } - lastUncovered = bufPos + len(g.nextPattern()) - } - if int(wordLen) > lastUncovered { - dif := wordLen - uint64(lastUncovered) - - compared := bytes.Compare(buf[lastUncovered:wordLen], g.data[postLoopPos:postLoopPos+dif]) - if compared != 0 { - g.dataP, g.dataBit = savePos, 0 - return compared - } - if lenBuf < int(wordLen) { - g.dataP, g.dataBit = savePos, 0 - return -1 - } - postLoopPos += dif - } - if lenBuf < int(wordLen) { - g.dataP, g.dataBit = savePos, 0 - return -1 - } - if lenBuf > int(wordLen) { - g.dataP, g.dataBit = savePos, 0 - return 1 - } - g.dataP, g.dataBit = postLoopPos, 0 - return 0 -} - // MatchPrefix only checks if the word at the current offset has a buf prefix. Does not move offset to the next word. func (g *Getter) MatchPrefix(prefix []byte) bool { savePos := g.dataP @@ -1060,9 +961,7 @@ func (g *Getter) MatchCmp(buf []byte) int { return cmp } -// MatchPrefixCmp lexicographically compares given prefix with the word at the current offset in the file. -// returns 0 if buf == word, -1 if buf < word, 1 if buf > word -func (g *Getter) MatchPrefixCmp(prefix []byte) int { +func (g *Getter) MatchPrefixUncompressed(prefix []byte) bool { savePos := g.dataP defer func() { g.dataP, g.dataBit = savePos, 0 @@ -1072,63 +971,18 @@ func (g *Getter) MatchPrefixCmp(prefix []byte) int { wordLen-- // because when create huffman tree we do ++ , because 0 is terminator prefixLen := len(prefix) if wordLen == 0 && prefixLen != 0 { - return 1 + return true } if prefixLen == 0 { - return 0 + return false } - decoded := make([]byte, wordLen) - var bufPos int - // In the first pass, we only check patterns - // Only run this loop as far as the prefix goes, there is no need to check further - for pos := g.nextPos(false /* clean */); pos != 0; pos = g.nextPos(false) { - bufPos += int(pos) - 1 - if bufPos > prefixLen { - break - } - pattern := g.nextPattern() - copy(decoded[bufPos:], pattern) - } - - if g.dataBit > 0 { - g.dataP++ - g.dataBit = 0 - } - postLoopPos := g.dataP - g.dataP, g.dataBit = savePos, 0 - g.nextPos(true /* clean */) // Reset the state of huffman decoder - // Second pass - we check spaces not covered by the patterns - var lastUncovered int - bufPos = 0 - for pos := g.nextPos(false /* clean */); pos != 0 && lastUncovered < prefixLen; pos = g.nextPos(false) { - bufPos += int(pos) - 1 - if bufPos > lastUncovered { - dif := uint64(bufPos - lastUncovered) - copy(decoded[lastUncovered:bufPos], g.data[postLoopPos:postLoopPos+dif]) - postLoopPos += dif - } - lastUncovered = bufPos + len(g.nextPattern()) - } - if prefixLen > lastUncovered && int(wordLen) > lastUncovered { - dif := wordLen - uint64(lastUncovered) - copy(decoded[lastUncovered:wordLen], g.data[postLoopPos:postLoopPos+dif]) - // postLoopPos += dif - } - var cmp int - if prefixLen > int(wordLen) { - // TODO(racytech): handle this case - // e.g: prefix = 'aaacb' - // word = 'aaa' - cmp = bytes.Compare(prefix, decoded) - } else { - cmp = bytes.Compare(prefix, decoded[:prefixLen]) - } + g.nextPos(true) - return cmp + return bytes.HasPrefix(g.data[g.dataP:g.dataP+wordLen], prefix) } -func (g *Getter) MatchPrefixUncompressed(prefix []byte) int { +func (g *Getter) MatchCmpUncompressed(buf []byte) int { savePos := g.dataP defer func() { g.dataP, g.dataBit = savePos, 0 @@ -1136,23 +990,17 @@ func (g *Getter) MatchPrefixUncompressed(prefix []byte) int { wordLen := g.nextPos(true /* clean */) wordLen-- // because when create huffman tree we do ++ , because 0 is terminator - prefixLen := len(prefix) - if wordLen == 0 && prefixLen != 0 { + bufLen := len(buf) + if wordLen == 0 && bufLen != 0 { return 1 } - if prefixLen == 0 { - return 0 + if bufLen == 0 { + return -1 } g.nextPos(true) - // if prefixLen > int(wordLen) { - // // TODO(racytech): handle this case - // // e.g: prefix = 'aaacb' - // // word = 'aaa' - // } - - return bytes.Compare(prefix, g.data[g.dataP:g.dataP+wordLen]) + return bytes.Compare(buf, g.data[g.dataP:g.dataP+wordLen]) } // FastNext extracts a compressed word from current offset in the file diff --git a/erigon-lib/seg/decompress_bench_test.go b/erigon-lib/seg/decompress_bench_test.go index 2f524dc3f70..30e8cd85d85 100644 --- a/erigon-lib/seg/decompress_bench_test.go +++ b/erigon-lib/seg/decompress_bench_test.go @@ -65,16 +65,6 @@ func BenchmarkDecompressSkip(b *testing.B) { } } -func BenchmarkDecompressMatch(b *testing.B) { - t := new(testing.T) - d := prepareDict(t) - defer d.Close() - g := d.MakeGetter() - for i := 0; i < b.N; i++ { - _ = g.Match([]byte("longlongword")) - } -} - func BenchmarkDecompressMatchCmp(b *testing.B) { t := new(testing.T) d := prepareDict(t) @@ -99,17 +89,6 @@ func BenchmarkDecompressMatchPrefix(b *testing.B) { } } -func BenchmarkDecompressMatchPrefixCmp(b *testing.B) { - t := new(testing.T) - d := prepareDict(t) - defer d.Close() - g := d.MakeGetter() - - for i := 0; i < b.N; i++ { - _ = g.MatchPrefixCmp([]byte("longlongword")) - } -} - func BenchmarkDecompressTorrent(t *testing.B) { t.Skip() diff --git a/erigon-lib/seg/decompress_fuzz_test.go b/erigon-lib/seg/decompress_fuzz_test.go index a6f375a281e..645f237871a 100644 --- a/erigon-lib/seg/decompress_fuzz_test.go +++ b/erigon-lib/seg/decompress_fuzz_test.go @@ -83,7 +83,7 @@ func FuzzDecompressMatch(f *testing.F) { t.Fatalf("MatchCmp: expected match: %v\n", expected) } g.Reset(savePos) - ok := g.Match(expected) + ok := g.MatchCmp(expected) pos2 := g.dataP if ok != 0 { t.Fatalf("MatchBool: expected match: %v\n", expected) diff --git a/erigon-lib/seg/decompress_test.go b/erigon-lib/seg/decompress_test.go index 1b315df0390..9537c7c6760 100644 --- a/erigon-lib/seg/decompress_test.go +++ b/erigon-lib/seg/decompress_test.go @@ -88,7 +88,7 @@ func TestDecompressMatchOK(t *testing.T) { w := loremStrings[i] if i%2 != 0 { expected := fmt.Sprintf("%s %d", w, i) - cmp := g.Match([]byte(expected)) + cmp := g.MatchCmp([]byte(expected)) if cmp != 0 { t.Errorf("expexted match with %s", expected) } @@ -164,7 +164,7 @@ func TestDecompressMatchOKCondensed(t *testing.T) { for g.HasNext() { if i%2 != 0 { expected := fmt.Sprintf("word-%d", i) - cmp := g.Match([]byte(expected)) + cmp := g.MatchCmp([]byte(expected)) if cmp != 0 { t.Errorf("expexted match with %s", expected) } @@ -188,7 +188,7 @@ func TestDecompressMatchNotOK(t *testing.T) { for g.HasNext() { w := loremStrings[i] expected := fmt.Sprintf("%s %d", w, i+1) - cmp := g.Match([]byte(expected)) + cmp := g.MatchCmp([]byte(expected)) if cmp == 0 { t.Errorf("not expexted match with %s", expected) } else { @@ -241,47 +241,6 @@ func TestDecompressMatchPrefix(t *testing.T) { } } -func TestDecompressMatchPrefixCmp(t *testing.T) { - d := prepareLoremDict(t) - defer d.Close() - g := d.MakeGetter() - i := 0 - skipCount := 0 - for g.HasNext() { - w := loremStrings[i] - expected := []byte(fmt.Sprintf("%s %d", w, i+1)) - expected = expected[:len(expected)/2] - cmp := g.MatchPrefixCmp(expected) - if cmp != 0 { - t.Errorf("expexted match with %s", expected) - } - g.Skip() - skipCount++ - i++ - } - if skipCount != i { - t.Errorf("something wrong with match logic") - } - g.Reset(0) - skipCount = 0 - i = 0 - for g.HasNext() { - w := loremStrings[i] - expected := []byte(fmt.Sprintf("%s %d", w, i+1)) - expected = expected[:len(expected)/2] - if len(expected) > 0 { - expected[len(expected)-1]++ - cmp := g.MatchPrefixCmp(expected) - if cmp == 0 { - t.Errorf("not expexted match with %s", expected) - } - } - g.Skip() - skipCount++ - i++ - } -} - func prepareLoremDictUncompressed(t *testing.T) *Decompressor { t.Helper() logger := log.New() diff --git a/erigon-lib/state/aggregator.go b/erigon-lib/state/aggregator.go index 0fc52e5e285..831966a524d 100644 --- a/erigon-lib/state/aggregator.go +++ b/erigon-lib/state/aggregator.go @@ -164,7 +164,7 @@ func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint6 }, restrictSubsetFileDeletions: a.commitmentValuesTransform, } - if a.d[kv.AccountsDomain], err = NewDomain(cfg, aggregationStep, kv.FileAccountDomain, kv.TblAccountVals, kv.TblAccountHistoryKeys, kv.TblAccountHistoryVals, kv.TblAccountIdx, integrityCheck, logger); err != nil { + if a.d[kv.AccountsDomain], err = NewDomain(cfg, aggregationStep, kv.AccountsDomain, kv.TblAccountVals, kv.TblAccountHistoryKeys, kv.TblAccountHistoryVals, kv.TblAccountIdx, integrityCheck, logger); err != nil { return nil, err } cfg = domainCfg{ @@ -174,7 +174,7 @@ func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint6 }, restrictSubsetFileDeletions: a.commitmentValuesTransform, } - if a.d[kv.StorageDomain], err = NewDomain(cfg, aggregationStep, kv.FileStorageDomain, kv.TblStorageVals, kv.TblStorageHistoryKeys, kv.TblStorageHistoryVals, kv.TblStorageIdx, integrityCheck, logger); err != nil { + if a.d[kv.StorageDomain], err = NewDomain(cfg, aggregationStep, kv.StorageDomain, kv.TblStorageVals, kv.TblStorageHistoryKeys, kv.TblStorageHistoryVals, kv.TblStorageIdx, integrityCheck, logger); err != nil { return nil, err } cfg = domainCfg{ @@ -184,7 +184,7 @@ func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint6 }, largeVals: true, } - if a.d[kv.CodeDomain], err = NewDomain(cfg, aggregationStep, kv.FileCodeDomain, kv.TblCodeVals, kv.TblCodeHistoryKeys, kv.TblCodeHistoryVals, kv.TblCodeIdx, integrityCheck, logger); err != nil { + if a.d[kv.CodeDomain], err = NewDomain(cfg, aggregationStep, kv.CodeDomain, kv.TblCodeVals, kv.TblCodeHistoryKeys, kv.TblCodeHistoryVals, kv.TblCodeIdx, integrityCheck, logger); err != nil { return nil, err } cfg = domainCfg{ @@ -197,7 +197,7 @@ func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint6 restrictSubsetFileDeletions: a.commitmentValuesTransform, compress: CompressNone, } - if a.d[kv.CommitmentDomain], err = NewDomain(cfg, aggregationStep, kv.FileCommitmentDomain, kv.TblCommitmentVals, kv.TblCommitmentHistoryKeys, kv.TblCommitmentHistoryVals, kv.TblCommitmentIdx, integrityCheck, logger); err != nil { + if a.d[kv.CommitmentDomain], err = NewDomain(cfg, aggregationStep, kv.CommitmentDomain, kv.TblCommitmentVals, kv.TblCommitmentHistoryKeys, kv.TblCommitmentHistoryVals, kv.TblCommitmentIdx, integrityCheck, logger); err != nil { return nil, err } //aCfg := AppendableCfg{ diff --git a/erigon-lib/state/aggregator_bench_test.go b/erigon-lib/state/aggregator_bench_test.go index 2d0f69465b1..961ceee3654 100644 --- a/erigon-lib/state/aggregator_bench_test.go +++ b/erigon-lib/state/aggregator_bench_test.go @@ -60,7 +60,7 @@ type txWithCtx struct { } func WrapTxWithCtx(tx kv.Tx, ctx *AggregatorRoTx) *txWithCtx { return &txWithCtx{Tx: tx, ac: ctx} } -func (tx *txWithCtx) AggTx() interface{} { return tx.ac } +func (tx *txWithCtx) AggTx() any { return tx.ac } func BenchmarkAggregator_Processing(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/erigon-lib/state/appendable.go b/erigon-lib/state/appendable.go index ff2f34369be..945801f5c0f 100644 --- a/erigon-lib/state/appendable.go +++ b/erigon-lib/state/appendable.go @@ -69,7 +69,7 @@ type Appendable struct { // _visibleFiles - underscore in name means: don't use this field directly, use BeginFilesRo() // underlying array is immutable - means it's ready for zero-copy use - _visibleFiles []ctxItem + _visibleFiles []visibleFile table string // txnNum_u64 -> key (k+auto_increment) filenameBase string @@ -112,7 +112,7 @@ func NewAppendable(cfg AppendableCfg, aggregationStep uint64, filenameBase, tabl compression: CompressNone, //CompressKeys | CompressVals, } ap.indexList = withHashMap - ap._visibleFiles = []ctxItem{} + ap._visibleFiles = []visibleFile{} return &ap, nil } @@ -685,6 +685,7 @@ func (ap *Appendable) collate(ctx context.Context, step uint64, roTx kv.Tx) (App coll.Close() } }() + comp, err := seg.NewCompressor(ctx, "collate "+ap.filenameBase, coll.iiPath, ap.cfg.Dirs.Tmp, seg.MinPatternScore, ap.compressWorkers, log.LvlTrace, ap.logger) if err != nil { return coll, fmt.Errorf("create %s compressor: %w", ap.filenameBase, err) diff --git a/erigon-lib/state/archive.go b/erigon-lib/state/archive.go index dc39708cd06..73487dca47e 100644 --- a/erigon-lib/state/archive.go +++ b/erigon-lib/state/archive.go @@ -61,7 +61,7 @@ func (g *getter) MatchPrefix(prefix []byte) bool { if g.c&CompressKeys != 0 { return g.Getter.MatchPrefix(prefix) } - return g.Getter.MatchPrefixUncompressed(prefix) == 0 + return g.Getter.MatchPrefixUncompressed(prefix) } func (g *getter) Next(buf []byte) ([]byte, uint64) { diff --git a/erigon-lib/state/bps_tree.go b/erigon-lib/state/bps_tree.go index 3dc13db80cc..bd9fd0b9836 100644 --- a/erigon-lib/state/bps_tree.go +++ b/erigon-lib/state/bps_tree.go @@ -20,8 +20,12 @@ import ( "bytes" "errors" "fmt" + "unsafe" + + "github.com/c2h5oh/datasize" "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon-lib/recsplit/eliasfano32" ) @@ -53,9 +57,9 @@ func NewBpsTree(kv ArchiveGetter, offt *eliasfano32.EliasFano, M uint64, dataLoo } type BpsTree struct { - offt *eliasfano32.EliasFano - mx [][]Node - M uint64 + offt *eliasfano32.EliasFano // ef with offsets to key/vals + mx []Node + M uint64 // limit on amount of 'children' for node trace bool dataLookupFunc dataLookupFunc @@ -67,13 +71,14 @@ type BpsTreeIterator struct { i uint64 } +// Di returns ordinal number of current key in the tree func (it *BpsTreeIterator) Di() uint64 { return it.i } func (it *BpsTreeIterator) KVFromGetter(g ArchiveGetter) ([]byte, []byte, error) { if it == nil { - return nil, nil, fmt.Errorf("iterator is nil") + return nil, nil, errors.New("iterator is nil") } //fmt.Printf("kv from %p getter %p tree %p offt %d\n", it, g, it.t, it.i) k, v, err := it.t.dataLookupFunc(it.i, g) @@ -123,82 +128,66 @@ func (it *BpsTreeIterator) Next() bool { //} type Node struct { - off uint64 - di uint64 - prefix []byte + key []byte + off uint64 // offset in kv file to key + di uint64 // key ordinal number in kv } -func (b *BpsTree) traverse(g ArchiveGetter, mx [][]Node, n, di, i uint64) { - if i >= n { - return +func (b *BpsTree) WarmUp(kv ArchiveGetter) error { + N := b.offt.Count() + if N == 0 { + return nil } - - for j := uint64(1); j <= b.M; j += b.M / 2 { - ik := i*b.M + j - if ik >= n { - break - } - _, k, err := b.keyCmpFunc(nil, ik, g) - if err != nil { - panic(err) - } - if k != nil { - mx[di] = append(mx[di], Node{off: b.offt.Get(ik), prefix: common.Copy(k), di: ik}) - //fmt.Printf("d=%d k %x %d\n", di+1, k, offt) - } - b.traverse(g, mx, n, di, ik) + b.mx = make([]Node, 0, N/b.M) + if b.trace { + fmt.Printf("mx cap %d N=%d M=%d\n", cap(b.mx), N, b.M) } -} - -func (b *BpsTree) WarmUp(kv ArchiveGetter) error { - k := b.offt.Count() - d := logBase(k, b.M) - mx := make([][]Node, d+1) - _, key, err := b.keyCmpFunc(nil, 0, kv) - if err != nil { - return err - } - if key != nil { - mx[0] = append(mx[0], Node{off: b.offt.Get(0), prefix: common.Copy(key)}) - //fmt.Printf("d=%d k %x %d\n", di, k, offt) + step := b.M + if N < b.M { // cache all keys if less than M + step = 1 } - b.traverse(kv, mx, k, 0, 0) - if b.trace { - for i := 0; i < len(mx); i++ { - for j := 0; j < len(mx[i]); j++ { - fmt.Printf("mx[%d][%d] %x %d %d\n", i, j, mx[i][j].prefix, mx[i][j].off, mx[i][j].di) - } + // extremely stupid picking of needed nodes: + cachedBytes := uint64(0) + nsz := uint64(unsafe.Sizeof(Node{})) + for i := step; i < N; i += step { + di := i - 1 + _, key, err := b.keyCmpFunc(nil, di, kv) + if err != nil { + return err } + b.mx = append(b.mx, Node{off: b.offt.Get(di), key: common.Copy(key), di: di}) + cachedBytes += nsz + uint64(len(key)) } - b.mx = mx + + log.Root().Debug("WarmUp finished", "file", kv.FileName(), "M", b.M, "N", N, + "cached", fmt.Sprintf("%d %%%.5f", len(b.mx), float64(len(b.mx))/float64(N)*100), + "cacheSize", datasize.ByteSize(cachedBytes).HR(), "fileSize", datasize.ByteSize(kv.Size()).HR()) return nil } +// bs performs pre-seach over warmed-up list of nodes to figure out left and right bounds on di for key func (b *BpsTree) bs(x []byte) (n Node, dl, dr uint64) { dr = b.offt.Count() - for d, row := range b.mx { - m, l, r := 0, 0, len(row) //nolint - for l < r { - m = (l + r) >> 1 - n = row[m] + m, l, r := 0, 0, len(b.mx) //nolint + for l < r { + m = (l + r) >> 1 + n = b.mx[m] - if b.trace { - fmt.Printf("bs[%d][%d] i=%d %x\n", d, m, n.di, n.prefix) - } - switch bytes.Compare(n.prefix, x) { - case 0: - return n, n.di, n.di - case 1: - r = m - dr = n.di - case -1: - l = m + 1 - dl = n.di - } + if b.trace { + fmt.Printf("bs di:%d k:%x\n", n.di, n.key) + } + switch bytes.Compare(n.key, x) { + case 0: + return n, n.di, n.di + case 1: + r = m + dr = n.di + case -1: + l = m + 1 + dl = n.di } - } return n, dl, dr } @@ -208,94 +197,100 @@ func (b *BpsTree) bs(x []byte) (n Node, dl, dr uint64) { // If key is nil, returns first key and found=true // If found item.key has a prefix of key, returns found=false and item.key // if key is greater than all keys, returns nil, found=false -func (b *BpsTree) Seek(g ArchiveGetter, key []byte) (skey []byte, di uint64, found bool, err error) { - if key == nil && b.offt.Count() > 0 { - //return &BpsTreeIterator{t: b, i: 0}, nil - var cmp int - cmp, skey, err = b.keyCmpFunc(key, 0, g) - if err != nil { - return nil, 0, false, err - } - return skey, 0, cmp == 0, nil - } - - l, r := uint64(0), b.offt.Count() +func (b *BpsTree) Seek(g ArchiveGetter, seekKey []byte) (key, value []byte, di uint64, found bool, err error) { + //b.trace = true if b.trace { - fmt.Printf("seek %x [%d %d]\n", key, l, r) + fmt.Printf("seek %x\n", seekKey) } - defer func() { - if b.trace { - fmt.Printf("found %x [%d %d]\n", key, l, r) + if len(seekKey) == 0 && b.offt.Count() > 0 { + key, value, err = b.dataLookupFunc(0, g) + if err != nil { + return nil, nil, 0, false, err } - }() + //return key, value, 0, bytes.Compare(key, seekKey) >= 0, nil + return key, value, 0, bytes.Equal(key, seekKey), nil + } - n, dl, dr := b.bs(key) + n, l, r := b.bs(seekKey) // l===r when key is found if b.trace { - fmt.Printf("pivot %d n %x [%d %d]\n", n.di, n.prefix, dl, dr) + fmt.Printf("pivot di:%d di(LR): [%d %d] k: %x found: %t\n", n.di, l, r, n.key, l == r) + defer func() { fmt.Printf("found=%t %x [%d %d]\n", bytes.Equal(key, seekKey), seekKey, l, r) }() } - l, r = dl, dr - var m uint64 var cmp int for l < r { + if r-l <= DefaultBtreeStartSkip { // found small range, faster to scan now + cmp, key, err = b.keyCmpFunc(seekKey, l, g) + if err != nil { + return nil, nil, 0, false, err + } + if b.trace { + fmt.Printf("fs di:[%d %d] k: %x\n", l, r, key) + } + //fmt.Printf("N %d l %d cmp %d (found %x want %x)\n", b.offt.Count(), l, cmp, key, seekKey) + if cmp == 0 { + r = l + break + } else if cmp < 0 { //found key is greater than seekKey + if l+1 < b.offt.Count() { + l++ + continue + } + } + r = l + break + } + m = (l + r) >> 1 - cmp, skey, err = b.keyCmpFunc(key, m, g) + cmp, key, err = b.keyCmpFunc(seekKey, m, g) if err != nil { - return nil, 0, false, err + return nil, nil, 0, false, err } if b.trace { - fmt.Printf("lr %x [%d %d]\n", skey, l, r) + fmt.Printf("fs di:[%d %d] k: %x\n", l, r, key) } - switch cmp { - case 0: - return skey, m, true, nil - //return &BpsTreeIterator{t: b, i: m}, nil - case 1: + if cmp == 0 { + l, r = m, m + break + } else if cmp > 0 { r = m - case -1: + } else { l = m + 1 } + } + if l == r { m = l - //return &BpsTreeIterator{t: b, i: l}, nil } - - cmp, skey, err = b.keyCmpFunc(key, m, g) + key, value, err = b.dataLookupFunc(m, g) if err != nil { - return nil, 0, false, err + return nil, nil, 0, false, err } - return skey, m, cmp == 0, nil + return key, value, l, bytes.Equal(key, seekKey), nil } // returns first key which is >= key. // If key is nil, returns first key // if key is greater than all keys, returns nil func (b *BpsTree) Get(g ArchiveGetter, key []byte) ([]byte, bool, uint64, error) { - if key == nil && b.offt.Count() > 0 { + if b.trace { + fmt.Printf("get %x\n", key) + } + if len(key) == 0 && b.offt.Count() > 0 { k0, v0, err := b.dataLookupFunc(0, g) if err != nil || k0 != nil { return nil, false, 0, err } return v0, true, 0, nil } - - l, r := uint64(0), b.offt.Count() + n, l, r := b.bs(key) // l===r when key is found if b.trace { - fmt.Printf("seek %x [%d %d]\n", key, l, r) + fmt.Printf("pivot di: %d di(LR): [%d %d] k: %x found: %t\n", n.di, l, r, n.key, l == r) + defer func() { fmt.Printf("found %x [%d %d]\n", key, l, r) }() } - defer func() { - if b.trace { - fmt.Printf("found %x [%d %d]\n", key, l, r) - } - }() - n, dl, dr := b.bs(key) - if b.trace { - fmt.Printf("pivot %d n %x [%d %d]\n", n.di, n.prefix, dl, dr) - } - l, r = dl, dr var m uint64 for l < r { m = (l + r) >> 1 @@ -304,7 +299,7 @@ func (b *BpsTree) Get(g ArchiveGetter, key []byte) ([]byte, bool, uint64, error) return nil, false, 0, err } if b.trace { - fmt.Printf("lr [%d %d]\n", l, r) + fmt.Printf("fs [%d %d]\n", l, r) } switch cmp { @@ -345,3 +340,8 @@ func (b *BpsTree) Distances() (map[int]int, error) { } return distances, nil } + +func (b *BpsTree) Close() { + b.mx = nil + b.offt = nil +} diff --git a/erigon-lib/state/bpstree_bench_test.go b/erigon-lib/state/bpstree_bench_test.go new file mode 100644 index 00000000000..81e6b7fa9af --- /dev/null +++ b/erigon-lib/state/bpstree_bench_test.go @@ -0,0 +1,50 @@ +package state + +import ( + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/erigontech/erigon-lib/log/v3" +) + +func BenchmarkBpsTreeSeek(t *testing.B) { + tmp := t.TempDir() + logger := log.New() + keyCount, M := 12_000_000, 256 + t.Logf("N: %d, M: %d skip since shard <= %d", keyCount, M, DefaultBtreeStartSkip) + compressFlags := CompressKeys | CompressVals + + dataPath := generateKV(t, tmp, 52, 180, keyCount, logger, 0) + + indexPath := path.Join(tmp, filepath.Base(dataPath)+".bti") + buildBtreeIndex(t, dataPath, indexPath, compressFlags, 1, logger, true) + + kv, bt, err := OpenBtreeIndexAndDataFile(indexPath, dataPath, uint64(M), compressFlags, false) + require.NoError(t, err) + require.EqualValues(t, bt.KeyCount(), keyCount) + defer bt.Close() + defer kv.Close() + + var key []byte + + getter := NewArchiveGetter(kv.MakeGetter(), compressFlags) + getter.Reset(0) + + t.ResetTimer() + t.ReportAllocs() + //r := rand.New(rand.NewSource(0)) + for i := 0; i < t.N; i++ { + if !getter.HasNext() { + getter.Reset(0) + } + key, _ = getter.Next(key[:0]) + getter.Skip() + //_, err := bt.Seek(getter, keys[r.Intn(len(keys))]) + _, err := bt.Seek(getter, key) + require.NoError(t, err) + } + t.ReportAllocs() +} diff --git a/erigon-lib/state/btree_index.go b/erigon-lib/state/btree_index.go index a563c85098a..c42c5a89f53 100644 --- a/erigon-lib/state/btree_index.go +++ b/erigon-lib/state/btree_index.go @@ -51,6 +51,9 @@ const BtreeLogPrefix = "btree" // DefaultBtreeM - amount of keys on leaf of BTree // It will do log2(M) co-located-reads from data file - for binary-search inside leaf var DefaultBtreeM = uint64(256) + +const DefaultBtreeStartSkip = uint64(4) // defines smallest shard available for scan instead of binsearch + var ErrBtIndexLookupBounds = errors.New("BtIndex: lookup di bounds error") func logBase(n, base uint64) uint64 { @@ -611,7 +614,7 @@ func NewBtIndexWriter(args BtIndexWriterArgs, logger log.Logger) (*BtIndexWriter func (btw *BtIndexWriter) AddKey(key []byte, offset uint64) error { if btw.built { - return fmt.Errorf("cannot add keys after perfect hash function had been built") + return errors.New("cannot add keys after perfect hash function had been built") } binary.BigEndian.PutUint64(btw.numBuf[:], offset) @@ -643,7 +646,7 @@ func (btw *BtIndexWriter) loadFuncBucket(k, v []byte, _ etl.CurrentTableReader, // of building the perfect hash function and writing index into a file func (btw *BtIndexWriter) Build() error { if btw.built { - return fmt.Errorf("already built") + return errors.New("already built") } var err error if btw.indexF, err = os.Create(btw.tmpFilePath); err != nil { @@ -947,6 +950,9 @@ func (b *BtIndex) Close() { } b.file = nil } + if b.bplus != nil { + b.bplus.Close() + } } // Get - exact match of key. `k == nil` - means not found @@ -1010,24 +1016,22 @@ func (b *BtIndex) Seek(g ArchiveGetter, x []byte) (*Cursor, error) { if b.Empty() { return nil, nil } - - // defer func() { - // fmt.Printf("[Bindex][%s] seekInFiles '%x' -> '%x' di=%d\n", b.FileName(), x, cursor.Value(), cursor.d) - // }() - var ( - k []byte - dt uint64 - found bool - err error - ) - if UseBpsTree { - _, dt, found, err = b.bplus.Seek(g, x) - } else { - _, dt, found, err = b.alloc.Seek(g, x) + k, v, dt, _, err := b.bplus.Seek(g, x) + if err != nil /*|| !found*/ { + if errors.Is(err, ErrBtIndexLookupBounds) { + return nil, nil + } + return nil, err + } + if bytes.Compare(k, x) >= 0 { + return b.newCursor(context.Background(), k, v, dt, g), nil + } + return nil, nil } - _ = found - if err != nil /*|| !found*/ { + + _, dt, found, err := b.alloc.Seek(g, x) + if err != nil || !found { if errors.Is(err, ErrBtIndexLookupBounds) { return nil, nil } diff --git a/erigon-lib/state/btree_index_test.go b/erigon-lib/state/btree_index_test.go index 36f11d481df..6025ccf20cf 100644 --- a/erigon-lib/state/btree_index_test.go +++ b/erigon-lib/state/btree_index_test.go @@ -34,8 +34,8 @@ import ( ) func Test_BtreeIndex_Init2(t *testing.T) { - //mainnnet: storage.128-160.kv 110mil keys, 100mb bloomfilter of 0.01 (1%) miss-probability - //no much reason to merge bloomfilter - can merge them on starup + //mainnet: storage.128-160.kv 110mil keys, 100mb bloomfilter of 0.01 (1%) miss-probability + //no much reason to merge bloomfilter - can merge them on startup //1B keys: 1Gb sizes := []int{54, 74, 135, 139, 109, 105, 144} @@ -133,7 +133,7 @@ func Test_BtreeIndex_Seek(t *testing.T) { for i := 0; i < len(keys); i++ { cur, err := bt.Seek(getter, keys[i]) require.NoErrorf(t, err, "i=%d", i) - require.EqualValues(t, keys[i], cur.key) + require.EqualValuesf(t, keys[i], cur.key, "i=%d", i) require.NotEmptyf(t, cur.Value(), "i=%d", i) // require.EqualValues(t, uint64(i), cur.Value()) } @@ -175,6 +175,7 @@ func Test_BtreeIndex_Build(t *testing.T) { c, err := bt.Seek(getter, nil) require.NoError(t, err) + require.NotNil(t, c) for i := 0; i < len(keys); i++ { k := c.Key() if !bytes.Equal(keys[i], k) { @@ -302,7 +303,7 @@ func TestBpsTree_Seek(t *testing.T) { //tr := newTrie() ef := eliasfano32.NewEliasFano(uint64(keyCount), ps[len(ps)-1]) for i := 0; i < len(ps); i++ { - //tr.insert(Node{i: uint64(i), prefix: common.Copy(keys[i]), off: ps[i]}) + //tr.insert(Node{i: uint64(i), key: common.Copy(keys[i]), off: ps[i]}) ef.AddOffset(ps[i]) } ef.Build() @@ -315,7 +316,7 @@ func TestBpsTree_Seek(t *testing.T) { for i := 0; i < len(keys); i++ { sk := keys[i] - k, di, found, err := bp.Seek(g, sk[:len(sk)/2]) + k, _, di, found, err := bp.Seek(g, sk[:len(sk)/2]) _ = di _ = found require.NoError(t, err) diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index 8bf1d1488c9..29f2876a1f0 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -32,6 +32,8 @@ import ( "sync/atomic" "time" + "github.com/erigontech/erigon-lib/metrics" + btree2 "github.com/tidwall/btree" "golang.org/x/sync/errgroup" @@ -57,10 +59,10 @@ const StepsInColdFile = 64 var ( asserts = dbg.EnvBool("AGG_ASSERTS", false) traceFileLife = dbg.EnvString("AGG_TRACE_FILE_LIFE", "") - traceGetLatest = dbg.EnvString("AGG_TRACE_GET_LATEST", "") traceGetAsOf = dbg.EnvString("AGG_TRACE_GET_AS_OF", "") tracePutWithPrev = dbg.EnvString("AGG_TRACE_PUT_WITH_PREV", "") ) +var traceGetLatest, _ = kv.String2Domain(dbg.EnvString("AGG_TRACE_GET_LATEST", "")) // Domain is a part of the state (examples are Accounts, Storage, Code) // Domain should not have any go routines or locks @@ -72,6 +74,8 @@ var ( type Domain struct { *History + name kv.Domain + // dirtyFiles - list of ALL files - including: un-indexed-yet, garbage, merged-into-bigger-one, ... // thread-safe, but maybe need 1 RWLock for all trees in Aggregator // @@ -85,7 +89,7 @@ type Domain struct { // _visibleFiles - underscore in name means: don't use this field directly, use BeginFilesRo() // underlying array is immutable - means it's ready for zero-copy use - _visibleFiles []ctxItem + _visibleFiles []visibleFile integrityCheck func(name kv.Domain, fromStep, toStep uint64) bool @@ -111,11 +115,12 @@ type domainCfg struct { restrictSubsetFileDeletions bool } -func NewDomain(cfg domainCfg, aggregationStep uint64, filenameBase, valsTable, indexKeysTable, historyValsTable, indexTable string, integrityCheck func(name kv.Domain, fromStep, toStep uint64) bool, logger log.Logger) (*Domain, error) { +func NewDomain(cfg domainCfg, aggregationStep uint64, name kv.Domain, valsTable, indexKeysTable, historyValsTable, indexTable string, integrityCheck func(name kv.Domain, fromStep, toStep uint64) bool, logger log.Logger) (*Domain, error) { if cfg.hist.iiCfg.dirs.SnapDomain == "" { panic("empty `dirs` variable") } d := &Domain{ + name: name, valsTable: valsTable, compression: cfg.compress, dirtyFiles: btree2.NewBTreeGOptions[*filesItem](filesItemLess, btree2.Options{Degree: 128, NoLocks: false}), @@ -128,10 +133,10 @@ func NewDomain(cfg domainCfg, aggregationStep uint64, filenameBase, valsTable, i integrityCheck: integrityCheck, } - d._visibleFiles = []ctxItem{} + d._visibleFiles = []visibleFile{} var err error - if d.History, err = NewHistory(cfg.hist, aggregationStep, filenameBase, indexKeysTable, indexTable, historyValsTable, nil, logger); err != nil { + if d.History, err = NewHistory(cfg.hist, aggregationStep, name.String(), indexKeysTable, indexTable, historyValsTable, nil, logger); err != nil { return nil, err } @@ -489,7 +494,7 @@ func (dt *DomainRoTx) newWriter(tmpdir string, discard bool) *domainBufferedWrit aux: make([]byte, 0, 128), valsTable: dt.d.valsTable, largeVals: dt.d.largeVals, - values: etl.NewCollector(dt.d.filenameBase+"domain.flush", tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger).LogLvl(log.LvlTrace), + values: etl.NewCollector(dt.name.String()+"domain.flush", tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger).LogLvl(log.LvlTrace), h: dt.ht.newWriter(tmpdir, discardHistory), } @@ -692,6 +697,7 @@ func (ch *CursorHeap) Pop() interface{} { // DomainRoTx allows accesing the same domain from multiple go-routines type DomainRoTx struct { + name kv.Domain ht *HistoryRoTx d *Domain files visibleFiles @@ -705,25 +711,18 @@ type DomainRoTx struct { valsC kv.Cursor } -func (dt *DomainRoTx) getFromFileL0(i int, filekey []byte) ([]byte, bool, error) { - return dt.getFromFile(i, filekey) -} -func (dt *DomainRoTx) getFromFileL1(i int, filekey []byte) ([]byte, bool, error) { - return dt.getFromFile(i, filekey) -} -func (dt *DomainRoTx) getFromFileL2(i int, filekey []byte) ([]byte, bool, error) { - return dt.getFromFile(i, filekey) -} -func (dt *DomainRoTx) getFromFileL3(i int, filekey []byte) ([]byte, bool, error) { - return dt.getFromFile(i, filekey) -} -func (dt *DomainRoTx) getFromFileL4(i int, filekey []byte) ([]byte, bool, error) { - return dt.getFromFile(i, filekey) -} -func (dt *DomainRoTx) getFromFileLRecent(i int, filekey []byte) ([]byte, bool, error) { - return dt.getFromFile(i, filekey) +func domainReadMetric(name kv.Domain, level int) metrics.Summary { + if level > 4 { + level = 5 + } + return mxsKVGet[name][level] } + func (dt *DomainRoTx) getFromFile(i int, filekey []byte) ([]byte, bool, error) { + if dbg.KVReadLevelledMetrics { + defer domainReadMetric(dt.name, i).ObserveDuration(time.Now()) + } + g := dt.statelessGetter(i) if !(UseBtree || UseBpsTree) { reader := dt.statelessIdxReader(i) @@ -859,6 +858,7 @@ func (d *Domain) BeginFilesRo() *DomainRoTx { } } return &DomainRoTx{ + name: d.name, d: d, ht: d.History.BeginFilesRo(), files: files, @@ -1385,6 +1385,10 @@ var ( ) func (dt *DomainRoTx) getFromFiles(filekey []byte) (v []byte, found bool, fileStartTxNum uint64, fileEndTxNum uint64, err error) { + if len(dt.files) == 0 { + return nil, false, 0, 0, err + } + hi, _ := dt.ht.iit.hashKey(filekey) for i := len(dt.files) - 1; i >= 0; i-- { @@ -1394,55 +1398,42 @@ func (dt *DomainRoTx) getFromFiles(filekey []byte) (v []byte, found bool, fileSt //} if dt.files[i].src.existence != nil { if !dt.files[i].src.existence.ContainsHash(hi) { - if traceGetLatest == dt.d.filenameBase { + if traceGetLatest == dt.name { fmt.Printf("GetLatest(%s, %x) -> existence index %s -> false\n", dt.d.filenameBase, filekey, dt.files[i].src.existence.FileName) } continue } else { - if traceGetLatest == dt.d.filenameBase { + if traceGetLatest == dt.name { fmt.Printf("GetLatest(%s, %x) -> existence index %s -> true\n", dt.d.filenameBase, filekey, dt.files[i].src.existence.FileName) } } } else { - if traceGetLatest == dt.d.filenameBase { - fmt.Printf("GetLatest(%s, %x) -> existence index is nil %s\n", dt.d.filenameBase, filekey, dt.files[i].src.decompressor.FileName()) + if traceGetLatest == dt.name { + fmt.Printf("GetLatest(%s, %x) -> existence index is nil %s\n", dt.name.String(), filekey, dt.files[i].src.decompressor.FileName()) } } } //t := time.Now() - switch i { - case 0: - v, found, err = dt.getFromFileL0(i, filekey) - case 1: - v, found, err = dt.getFromFileL1(i, filekey) - case 2: - v, found, err = dt.getFromFileL2(i, filekey) - case 3: - v, found, err = dt.getFromFileL3(i, filekey) - case 4: - v, found, err = dt.getFromFileL4(i, filekey) - default: - v, found, err = dt.getFromFileLRecent(i, filekey) - } + v, found, err = dt.getFromFile(i, filekey) if err != nil { return nil, false, 0, 0, err } if !found { - if traceGetLatest == dt.d.filenameBase { - fmt.Printf("GetLatest(%s, %x) -> not found in file %s\n", dt.d.filenameBase, filekey, dt.files[i].src.decompressor.FileName()) + if traceGetLatest == dt.name { + fmt.Printf("GetLatest(%s, %x) -> not found in file %s\n", dt.name.String(), filekey, dt.files[i].src.decompressor.FileName()) } // LatestStateReadGrindNotFound.ObserveDuration(t) continue } - if traceGetLatest == dt.d.filenameBase { - fmt.Printf("GetLatest(%s, %x) -> found in file %s\n", dt.d.filenameBase, filekey, dt.files[i].src.decompressor.FileName()) + if traceGetLatest == dt.name { + fmt.Printf("GetLatest(%s, %x) -> found in file %s\n", dt.name.String(), filekey, dt.files[i].src.decompressor.FileName()) } //LatestStateReadGrind.ObserveDuration(t) return v, true, dt.files[i].startTxNum, dt.files[i].endTxNum, nil } - if traceGetLatest == dt.d.filenameBase { - fmt.Printf("GetLatest(%s, %x) -> not found in %d files\n", dt.d.filenameBase, filekey, len(dt.files)) + if traceGetLatest == dt.name { + fmt.Printf("GetLatest(%s, %x) -> not found in %d files\n", dt.name.String(), filekey, len(dt.files)) } return nil, false, 0, 0, nil @@ -1611,10 +1602,10 @@ func (dt *DomainRoTx) GetLatest(key1, key2 []byte, roTx kv.Tx) ([]byte, uint64, var found bool var err error - if traceGetLatest == dt.d.filenameBase { + if traceGetLatest == dt.name { defer func() { fmt.Printf("GetLatest(%s, '%x' -> '%x') (from db=%t; istep=%x stepInFiles=%d)\n", - dt.d.filenameBase, key, v, found, foundStep, dt.files.EndTxNum()/dt.d.aggregationStep) + dt.name.String(), key, v, found, foundStep, dt.files.EndTxNum()/dt.d.aggregationStep) }() } @@ -1783,18 +1774,18 @@ func (dt *DomainRoTx) Prune(ctx context.Context, rwTx kv.RwTx, step, txFrom, txT var valsCursor kv.RwCursor - ancientDomainValsCollector := etl.NewCollector(dt.d.filenameBase+".domain.collate", dt.d.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), dt.d.logger).LogLvl(log.LvlTrace) + ancientDomainValsCollector := etl.NewCollector(dt.name.String()+".domain.collate", dt.d.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), dt.d.logger).LogLvl(log.LvlTrace) defer ancientDomainValsCollector.Close() if dt.d.largeVals { valsCursor, err = rwTx.RwCursor(dt.d.valsTable) if err != nil { - return stat, fmt.Errorf("create %s domain values cursor: %w", dt.d.filenameBase, err) + return stat, fmt.Errorf("create %s domain values cursor: %w", dt.name.String(), err) } } else { valsCursor, err = rwTx.RwCursorDupSort(dt.d.valsTable) if err != nil { - return stat, fmt.Errorf("create %s domain values cursor: %w", dt.d.filenameBase, err) + return stat, fmt.Errorf("create %s domain values cursor: %w", dt.name.String(), err) } } defer valsCursor.Close() @@ -1808,7 +1799,7 @@ func (dt *DomainRoTx) Prune(ctx context.Context, rwTx kv.RwTx, step, txFrom, txT prunedKey, err := GetExecV3PruneProgress(rwTx, dt.d.valsTable) if err != nil { - dt.d.logger.Error("get domain pruning progress", "name", dt.d.filenameBase, "error", err) + dt.d.logger.Error("get domain pruning progress", "name", dt.name.String(), "error", err) } var k, v []byte @@ -1823,7 +1814,7 @@ func (dt *DomainRoTx) Prune(ctx context.Context, rwTx kv.RwTx, step, txFrom, txT var stepBytes []byte for ; k != nil; k, v, err = valsCursor.Next() { if err != nil { - return stat, fmt.Errorf("iterate over %s domain keys: %w", dt.d.filenameBase, err) + return stat, fmt.Errorf("iterate over %s domain keys: %w", dt.name.String(), err) } if dt.d.largeVals { @@ -1841,7 +1832,7 @@ func (dt *DomainRoTx) Prune(ctx context.Context, rwTx kv.RwTx, step, txFrom, txT return stat, fmt.Errorf("load domain values: %w", err) } if err := SaveExecV3PruneProgress(rwTx, dt.d.valsTable, k); err != nil { - return stat, fmt.Errorf("save domain pruning progress: %s, %w", dt.d.filenameBase, err) + return stat, fmt.Errorf("save domain pruning progress: %s, %w", dt.name.String(), err) } return stat, nil } @@ -1857,7 +1848,7 @@ func (dt *DomainRoTx) Prune(ctx context.Context, rwTx kv.RwTx, step, txFrom, txT // consider ctx exiting as incorrect outcome, error is returned return stat, ctx.Err() case <-logEvery.C: - dt.d.logger.Info("[snapshots] prune domain", "name", dt.d.filenameBase, + dt.d.logger.Info("[snapshots] prune domain", "name", dt.name.String(), "pruned keys", stat.Values, "steps", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(dt.d.aggregationStep), float64(txTo)/float64(dt.d.aggregationStep))) default: diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index e89b8af1318..ac3459cb844 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -48,7 +48,7 @@ import ( "github.com/erigontech/erigon-lib/types" ) -var ErrBehindCommitment = fmt.Errorf("behind commitment") +var ErrBehindCommitment = errors.New("behind commitment") // KvList sort.Interface to sort write list by keys type KvList struct { @@ -104,7 +104,10 @@ type SharedDomains struct { } type HasAggTx interface { - AggTx() interface{} + AggTx() any +} +type HasAgg interface { + Agg() any } func NewSharedDomains(tx kv.Tx, logger log.Logger) (*SharedDomains, error) { @@ -176,7 +179,7 @@ func (sd *SharedDomains) GetDiffset(tx kv.RwTx, blockHash common.Hash, blockNumb return ReadDiffSet(tx, blockNumber, blockHash) } -func (sd *SharedDomains) AggTx() interface{} { return sd.aggTx } +func (sd *SharedDomains) AggTx() any { return sd.aggTx } func (sd *SharedDomains) CanonicalReader() CanonicalsReader { return nil //return sd.aggTx.appendable[kv.ReceiptsAppendable].ap.cfg.iters @@ -422,12 +425,12 @@ func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch comm storageItem := sto.lookupFileByItsRange(fStartTxNum, fEndTxNum) if storageItem == nil { sd.logger.Crit(fmt.Sprintf("storage file of steps %d-%d not found\n", fStartTxNum/sd.aggTx.a.aggregationStep, fEndTxNum/sd.aggTx.a.aggregationStep)) - return nil, fmt.Errorf("storage file not found") + return nil, errors.New("storage file not found") } accountItem := acc.lookupFileByItsRange(fStartTxNum, fEndTxNum) if accountItem == nil { sd.logger.Crit(fmt.Sprintf("storage file of steps %d-%d not found\n", fStartTxNum/sd.aggTx.a.aggregationStep, fEndTxNum/sd.aggTx.a.aggregationStep)) - return nil, fmt.Errorf("account file not found") + return nil, errors.New("account file not found") } storageGetter := NewArchiveGetter(storageItem.decompressor.MakeGetter(), sto.d.compression) accountGetter := NewArchiveGetter(accountItem.decompressor.MakeGetter(), acc.d.compression) @@ -600,7 +603,7 @@ func (sd *SharedDomains) IndexAdd(table kv.InvertedIdx, key []byte) (err error) func (sd *SharedDomains) SetTx(tx kv.Tx) { if tx == nil { - panic(fmt.Errorf("tx is nil")) + panic("tx is nil") } sd.roTx = tx @@ -611,7 +614,7 @@ func (sd *SharedDomains) SetTx(tx kv.Tx) { sd.aggTx = casted.AggTx().(*AggregatorRoTx) if sd.aggTx == nil { - panic(fmt.Errorf("aggtx is nil")) + panic(errors.New("aggtx is nil")) } } @@ -972,7 +975,7 @@ func (sd *SharedDomains) DomainDel(domain kv.Domain, k1, k2 []byte, prevVal []by func (sd *SharedDomains) DomainDelPrefix(domain kv.Domain, prefix []byte) error { if domain != kv.StorageDomain { - return fmt.Errorf("DomainDelPrefix: not supported") + return errors.New("DomainDelPrefix: not supported") } type tuple struct { @@ -1018,7 +1021,7 @@ type SharedDomainsCommitmentContext struct { mode commitment.Mode branches map[string]cachedBranch keccak cryptozerocopy.KeccakState - updates *commitment.UpdateTree + updates *commitment.Updates patriciaTrie commitment.Trie justRestored atomic.Bool } @@ -1032,7 +1035,7 @@ func NewSharedDomainsCommitmentContext(sd *SharedDomains, mode commitment.Mode, keccak: sha3.NewLegacyKeccak256().(cryptozerocopy.KeccakState), } - ctx.patriciaTrie, ctx.updates = commitment.InitializeTrieAndUpdateTree(trieVariant, mode, sd.aggTx.a.tmpdir) + ctx.patriciaTrie, ctx.updates = commitment.InitializeTrieAndUpdates(trieVariant, mode, sd.aggTx.a.tmpdir) ctx.patriciaTrie.ResetContext(ctx) return ctx } @@ -1085,51 +1088,67 @@ func (sdc *SharedDomainsCommitmentContext) PutBranch(prefix []byte, data []byte, return sdc.sharedDomains.updateCommitmentData(prefix, data, prevData, prevStep) } -func (sdc *SharedDomainsCommitmentContext) GetAccount(plainKey []byte, cell *commitment.Cell) error { +func (sdc *SharedDomainsCommitmentContext) GetAccount(plainKey []byte) (*commitment.Update, error) { encAccount, _, err := sdc.sharedDomains.DomainGet(kv.AccountsDomain, plainKey, nil) if err != nil { - return fmt.Errorf("GetAccount failed: %w", err) + return nil, fmt.Errorf("GetAccount failed: %w", err) } - cell.Nonce = 0 - cell.Balance.Clear() + u := new(commitment.Update) + u.Reset() + if len(encAccount) > 0 { nonce, balance, chash := types.DecodeAccountBytesV3(encAccount) - cell.Nonce = nonce - cell.Balance.Set(balance) + u.Flags |= commitment.NonceUpdate + u.Nonce = nonce + u.Flags |= commitment.BalanceUpdate + u.Balance.Set(balance) if len(chash) > 0 { - copy(cell.CodeHash[:], chash) + u.Flags |= commitment.CodeUpdate + copy(u.CodeHash[:], chash) } } - if bytes.Equal(cell.CodeHash[:], commitment.EmptyCodeHash) { - cell.Delete = len(encAccount) == 0 - return nil + if u.CodeHash == commitment.EmptyCodeHashArray { + if len(encAccount) == 0 { + u.Flags = commitment.DeleteUpdate + } + return u, nil } code, _, err := sdc.sharedDomains.DomainGet(kv.CodeDomain, plainKey, nil) if err != nil { - return fmt.Errorf("GetAccount: failed to read latest code: %w", err) + return nil, fmt.Errorf("GetAccount/Code: failed to read latest code: %w", err) } if len(code) > 0 { sdc.keccak.Reset() sdc.keccak.Write(code) - sdc.keccak.Read(cell.CodeHash[:]) + sdc.keccak.Read(u.CodeHash[:]) + u.Flags |= commitment.CodeUpdate + } else { - cell.CodeHash = commitment.EmptyCodeHashArray + copy(u.CodeHash[:], commitment.EmptyCodeHashArray[:]) } - cell.Delete = len(encAccount) == 0 && len(code) == 0 - return nil + + if len(encAccount) == 0 && len(code) == 0 { + u.Flags = commitment.DeleteUpdate + } + return u, nil } -func (sdc *SharedDomainsCommitmentContext) GetStorage(plainKey []byte, cell *commitment.Cell) error { +func (sdc *SharedDomainsCommitmentContext) GetStorage(plainKey []byte) (*commitment.Update, error) { // Look in the summary table first enc, _, err := sdc.sharedDomains.DomainGet(kv.StorageDomain, plainKey, nil) if err != nil { - return err + return nil, err } - cell.StorageLen = len(enc) - copy(cell.Storage[:], enc) - cell.Delete = cell.StorageLen == 0 - return nil + u := new(commitment.Update) + u.StorageLen = len(enc) + if len(enc) == 0 { + u.Flags = commitment.DeleteUpdate + } else { + u.Flags |= commitment.StorageUpdate + copy(u.Storage[:u.StorageLen], enc) + } + return u, nil } func (sdc *SharedDomainsCommitmentContext) Reset() { @@ -1337,7 +1356,7 @@ func (sdc *SharedDomainsCommitmentContext) restorePatriciaState(value []byte) (u fmt.Printf("[commitment] restored state: block=%d txn=%d rootHash=%x\n", cs.blockNum, cs.txNum, rootHash) } } else { - return 0, 0, fmt.Errorf("state storing is only supported hex patricia trie") + return 0, 0, errors.New("state storing is only supported hex patricia trie") } return cs.blockNum, cs.txNum, nil } diff --git a/erigon-lib/state/domain_test.go b/erigon-lib/state/domain_test.go index f3f7fce79d0..9fc398c2114 100644 --- a/erigon-lib/state/domain_test.go +++ b/erigon-lib/state/domain_test.go @@ -83,7 +83,7 @@ func testDbAndDomainOfStep(t *testing.T, aggStep uint64, logger log.Logger) (kv. iiCfg: iiCfg{salt: &salt, dirs: dirs, db: db}, withLocalityIndex: false, withExistenceIndex: false, compression: CompressNone, historyLargeValues: true, }} - d, err := NewDomain(cfg, aggStep, kv.AccountsDomain.String(), valsTable, historyKeysTable, historyValsTable, indexTable, nil, logger) + d, err := NewDomain(cfg, aggStep, kv.AccountsDomain, valsTable, historyKeysTable, historyValsTable, indexTable, nil, logger) require.NoError(t, err) d.DisableFsync() d.compressWorkers = 1 @@ -1479,7 +1479,7 @@ func TestDomain_CanPruneAfterAggregation(t *testing.T) { dc = d.BeginFilesRo() can, untilStep = dc.canPruneDomainTables(tx, aggStep*stepToPrune) - require.False(t, can, "lattter step is not yet pruned") + require.False(t, can, "latter step is not yet pruned") require.EqualValues(t, stepToPrune, untilStep) dc.Close() diff --git a/erigon-lib/state/files_item.go b/erigon-lib/state/files_item.go index 1cccaec1c1f..bf7d393e822 100644 --- a/erigon-lib/state/files_item.go +++ b/erigon-lib/state/files_item.go @@ -39,7 +39,7 @@ import ( // such files must be hiddend from user (reader), but may be useful for background merging process, etc... // list of filesItem must be represented as Tree - because they may overlap -// ctxItem - class is used for good/visible files +// visibleFile - class is used for good/visible files type filesItem struct { decompressor *seg.Decompressor index *recsplit.Index @@ -173,9 +173,9 @@ func deleteMergeFile(dirtyFiles *btree2.BTreeG[*filesItem], outs []*filesItem, f } } -// ctxItem is like filesItem but only for good/visible files (indexed, not overlaped, not marked for deletion, etc...) -// it's ok to store ctxItem in array -type ctxItem struct { +// visibleFile is like filesItem but only for good/visible files (indexed, not overlaped, not marked for deletion, etc...) +// it's ok to store visibleFile in array +type visibleFile struct { getter *seg.Getter reader *recsplit.IndexReader startTxNum uint64 @@ -185,12 +185,12 @@ type ctxItem struct { src *filesItem } -func (i *ctxItem) hasTS(ts uint64) bool { return i.startTxNum <= ts && i.endTxNum > ts } -func (i *ctxItem) isSubSetOf(j *ctxItem) bool { return i.src.isSubsetOf(j.src) } //nolint -func (i *ctxItem) isSubsetOf(j *ctxItem) bool { return i.src.isSubsetOf(j.src) } //nolint +func (i *visibleFile) hasTS(ts uint64) bool { return i.startTxNum <= ts && i.endTxNum > ts } +func (i *visibleFile) isSubSetOf(j *visibleFile) bool { return i.src.isSubsetOf(j.src) } //nolint +func (i *visibleFile) isSubsetOf(j *visibleFile) bool { return i.src.isSubsetOf(j.src) } //nolint -func calcVisibleFiles(files *btree2.BTreeG[*filesItem], l idxList, trace bool) (roItems []ctxItem) { - newVisibleFiles := make([]ctxItem, 0, files.Len()) +func calcVisibleFiles(files *btree2.BTreeG[*filesItem], l idxList, trace bool) (roItems []visibleFile) { + newVisibleFiles := make([]visibleFile, 0, files.Len()) if trace { log.Warn("[dbg] calcVisibleFiles", "amount", files.Len()) } @@ -206,27 +206,27 @@ func calcVisibleFiles(files *btree2.BTreeG[*filesItem], l idxList, trace bool) ( // TODO: need somehow handle this case, but indices do not open in tests TestFindMergeRangeCornerCases if item.decompressor == nil { if trace { - log.Warn("[dbg] calcVisibleFiles1", "from", item.startTxNum, "to", item.endTxNum) + log.Warn("[dbg] calcVisibleFiles: decompressor not opened", "from", item.startTxNum, "to", item.endTxNum) } continue } if (l&withBTree != 0) && item.bindex == nil { if trace { - log.Warn("[dbg] calcVisibleFiles2", "f", item.decompressor.FileName()) + log.Warn("[dbg] calcVisibleFiles: BTindex not opened", "f", item.decompressor.FileName()) } //panic(fmt.Errorf("btindex nil: %s", item.decompressor.FileName())) continue } if (l&withHashMap != 0) && item.index == nil { if trace { - log.Warn("[dbg] calcVisibleFiles3", "f", item.decompressor.FileName()) + log.Warn("[dbg] calcVisibleFiles: RecSplit not opened", "f", item.decompressor.FileName()) } //panic(fmt.Errorf("index nil: %s", item.decompressor.FileName())) continue } if (l&withExistence != 0) && item.existence == nil { if trace { - log.Warn("[dbg] calcVisibleFiles4", "f", item.decompressor.FileName()) + log.Warn("[dbg] calcVisibleFiles: Existence not opened", "f", item.decompressor.FileName()) } //panic(fmt.Errorf("existence nil: %s", item.decompressor.FileName())) continue @@ -236,12 +236,13 @@ func calcVisibleFiles(files *btree2.BTreeG[*filesItem], l idxList, trace bool) ( // see super-set file, just drop sub-set files from list for len(newVisibleFiles) > 0 && newVisibleFiles[len(newVisibleFiles)-1].src.isSubsetOf(item) { if trace { - log.Warn("[dbg] calcVisibleFiles5", "f", newVisibleFiles[len(newVisibleFiles)-1].src.decompressor.FileName()) + log.Warn("[dbg] calcVisibleFiles: marked as garbage (is subset)", "item", item.decompressor.FileName(), + "of", newVisibleFiles[len(newVisibleFiles)-1].src.decompressor.FileName()) } newVisibleFiles[len(newVisibleFiles)-1].src = nil newVisibleFiles = newVisibleFiles[:len(newVisibleFiles)-1] } - newVisibleFiles = append(newVisibleFiles, ctxItem{ + newVisibleFiles = append(newVisibleFiles, visibleFile{ startTxNum: item.startTxNum, endTxNum: item.endTxNum, i: len(newVisibleFiles), @@ -251,13 +252,13 @@ func calcVisibleFiles(files *btree2.BTreeG[*filesItem], l idxList, trace bool) ( return true }) if newVisibleFiles == nil { - newVisibleFiles = []ctxItem{} + newVisibleFiles = []visibleFile{} } return newVisibleFiles } // visibleFiles have no garbage (overlaps, unindexed, etc...) -type visibleFiles []ctxItem +type visibleFiles []visibleFile // EndTxNum return txNum which not included in file - it will be first txNum in future file func (files visibleFiles) EndTxNum() uint64 { diff --git a/erigon-lib/state/history.go b/erigon-lib/state/history.go index 9b810e932c9..07ba67bfed5 100644 --- a/erigon-lib/state/history.go +++ b/erigon-lib/state/history.go @@ -63,7 +63,7 @@ type History struct { // _visibleFiles - underscore in name means: don't use this field directly, use BeginFilesRo() // underlying array is immutable - means it's ready for zero-copy use - _visibleFiles []ctxItem + _visibleFiles []visibleFile indexList idxList @@ -119,7 +119,7 @@ func NewHistory(cfg histCfg, aggregationStep uint64, filenameBase, indexKeysTabl snapshotsDisabled: cfg.snapshotsDisabled, keepRecentTxnInDB: cfg.keepTxInDB, } - h._visibleFiles = []ctxItem{} + h._visibleFiles = []visibleFile{} var err error h.InvertedIndex, err = NewInvertedIndex(cfg.iiCfg, aggregationStep, filenameBase, indexKeysTable, indexTable, func(fromStep, toStep uint64) bool { exists, err := dir.FileExist(h.vFilePath(fromStep, toStep)) @@ -1155,7 +1155,7 @@ func (ht *HistoryRoTx) Close() { ht.iit.Close() } -func (ht *HistoryRoTx) getFileDeprecated(from, to uint64) (it ctxItem, ok bool) { +func (ht *HistoryRoTx) getFileDeprecated(from, to uint64) (it visibleFile, ok bool) { for i := 0; i < len(ht.files); i++ { if ht.files[i].startTxNum == from && ht.files[i].endTxNum == to { return ht.files[i], true @@ -1163,7 +1163,7 @@ func (ht *HistoryRoTx) getFileDeprecated(from, to uint64) (it ctxItem, ok bool) } return it, false } -func (ht *HistoryRoTx) getFile(txNum uint64) (it ctxItem, ok bool) { +func (ht *HistoryRoTx) getFile(txNum uint64) (it visibleFile, ok bool) { for i := 0; i < len(ht.files); i++ { if ht.files[i].startTxNum <= txNum && ht.files[i].endTxNum > txNum { return ht.files[i], true @@ -2050,9 +2050,9 @@ func (hi *HistoryChangesIterDB) Next() ([]byte, []byte, uint64, error) { type HistoryStep struct { compressVals bool indexItem *filesItem - indexFile ctxItem + indexFile visibleFile historyItem *filesItem - historyFile ctxItem + historyFile visibleFile } // MakeSteps [0, toTxNum) @@ -2067,7 +2067,7 @@ func (h *History) MakeSteps(toTxNum uint64) []*HistoryStep { step := &HistoryStep{ compressVals: h.compression&CompressVals != 0, indexItem: item, - indexFile: ctxItem{ + indexFile: visibleFile{ startTxNum: item.startTxNum, endTxNum: item.endTxNum, getter: item.decompressor.MakeGetter(), @@ -2085,7 +2085,7 @@ func (h *History) MakeSteps(toTxNum uint64) []*HistoryStep { continue } steps[i].historyItem = item - steps[i].historyFile = ctxItem{ + steps[i].historyFile = visibleFile{ startTxNum: item.startTxNum, endTxNum: item.endTxNum, getter: item.decompressor.MakeGetter(), @@ -2102,14 +2102,14 @@ func (hs *HistoryStep) Clone() *HistoryStep { return &HistoryStep{ compressVals: hs.compressVals, indexItem: hs.indexItem, - indexFile: ctxItem{ + indexFile: visibleFile{ startTxNum: hs.indexFile.startTxNum, endTxNum: hs.indexFile.endTxNum, getter: hs.indexItem.decompressor.MakeGetter(), reader: recsplit.NewIndexReader(hs.indexItem.index), }, historyItem: hs.historyItem, - historyFile: ctxItem{ + historyFile: visibleFile{ startTxNum: hs.historyFile.startTxNum, endTxNum: hs.historyFile.endTxNum, getter: hs.historyItem.decompressor.MakeGetter(), diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index d7df581c27b..0bb8bf64eaf 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -72,7 +72,7 @@ type InvertedIndex struct { // _visibleFiles - underscore in name means: don't use this field directly, use BeginFilesRo() // underlying array is immutable - means it's ready for zero-copy use - _visibleFiles []ctxItem + _visibleFiles []visibleFile indexKeysTable string // txnNum_u64 -> key (k+auto_increment) indexTable string // k -> txnNum_u64 , Needs to be table with DupSort @@ -116,7 +116,7 @@ func NewInvertedIndex(cfg iiCfg, aggregationStep uint64, filenameBase, indexKeys } ii.indexList = withHashMap - ii._visibleFiles = []ctxItem{} + ii._visibleFiles = []visibleFile{} return &ii, nil } @@ -572,6 +572,10 @@ func (iit *InvertedIndexRoTx) statelessIdxReader(i int) *recsplit.IndexReader { } func (iit *InvertedIndexRoTx) seekInFiles(key []byte, txNum uint64) (found bool, equalOrHigherTxNum uint64) { + if len(iit.files) == 0 { + return false, 0 + } + hi, lo := iit.hashKey(key) for i := 0; i < len(iit.files); i++ { @@ -908,7 +912,7 @@ func (iit *InvertedIndexRoTx) DebugEFAllValuesAreInRange(ctx context.Context, fa logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() fromTxNum := fromStep * iit.ii.aggregationStep - iterStep := func(item ctxItem) error { + iterStep := func(item visibleFile) error { g := item.src.decompressor.MakeGetter() g.Reset(0) defer item.src.decompressor.EnableReadAhead().DisableReadAhead() @@ -976,7 +980,7 @@ type FrozenInvertedIdxIter struct { efIt stream.Uno[uint64] indexTable string - stack []ctxItem + stack []visibleFile nextN uint64 hasNext bool diff --git a/erigon-lib/state/merge.go b/erigon-lib/state/merge.go index dc15f5c93ef..b789c3cd117 100644 --- a/erigon-lib/state/merge.go +++ b/erigon-lib/state/merge.go @@ -1189,7 +1189,7 @@ func (iit *InvertedIndexRoTx) garbage(merged *filesItem) (outs []*filesItem) { return garbage(iit.ii.dirtyFiles, iit.files, merged) } -func garbage(dirtyFiles *btree.BTreeG[*filesItem], visibleFiles []ctxItem, merged *filesItem) (outs []*filesItem) { +func garbage(dirtyFiles *btree.BTreeG[*filesItem], visibleFiles []visibleFile, merged *filesItem) (outs []*filesItem) { if merged == nil { return } @@ -1212,7 +1212,7 @@ func garbage(dirtyFiles *btree.BTreeG[*filesItem], visibleFiles []ctxItem, merge }) return outs } -func hasCoverVisibleFile(visibleFiles []ctxItem, item *filesItem) bool { +func hasCoverVisibleFile(visibleFiles []visibleFile, item *filesItem) bool { for _, f := range visibleFiles { if item.isSubsetOf(f.src) { return true diff --git a/erigon-lib/state/metrics.go b/erigon-lib/state/metrics.go index 248e523edf0..91e1869a106 100644 --- a/erigon-lib/state/metrics.go +++ b/erigon-lib/state/metrics.go @@ -16,7 +16,10 @@ package state -import "github.com/erigontech/erigon-lib/metrics" +import ( + "github.com/erigontech/erigon-lib/kv" + "github.com/erigontech/erigon-lib/metrics" +) var ( //LatestStateReadWarm = metrics.GetOrCreateSummary(`latest_state_read{type="warm",found="yes"}`) //nolint @@ -57,3 +60,40 @@ var ( mxCommitmentRunning = metrics.GetOrCreateGauge("domain_running_commitment") mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took") ) + +var ( + mxsKVGet = [kv.DomainLen][]metrics.Summary{ + kv.AccountsDomain: { + metrics.GetOrCreateSummary(`kv_get{level="L0",domain="account"}`), + metrics.GetOrCreateSummary(`kv_get{level="L1",domain="account"}`), + metrics.GetOrCreateSummary(`kv_get{level="L2",domain="account"}`), + metrics.GetOrCreateSummary(`kv_get{level="L3",domain="account"}`), + metrics.GetOrCreateSummary(`kv_get{level="L4",domain="account"}`), + metrics.GetOrCreateSummary(`kv_get{level="recent",domain="account"}`), + }, + kv.StorageDomain: { + metrics.GetOrCreateSummary(`kv_get{level="L0",domain="storage"}`), + metrics.GetOrCreateSummary(`kv_get{level="L1",domain="storage"}`), + metrics.GetOrCreateSummary(`kv_get{level="L2",domain="storage"}`), + metrics.GetOrCreateSummary(`kv_get{level="L3",domain="storage"}`), + metrics.GetOrCreateSummary(`kv_get{level="L4",domain="storage"}`), + metrics.GetOrCreateSummary(`kv_get{level="recent",domain="storage"}`), + }, + kv.CodeDomain: { + metrics.GetOrCreateSummary(`kv_get{level="L0",domain="code"}`), + metrics.GetOrCreateSummary(`kv_get{level="L1",domain="code"}`), + metrics.GetOrCreateSummary(`kv_get{level="L2",domain="code"}`), + metrics.GetOrCreateSummary(`kv_get{level="L3",domain="code"}`), + metrics.GetOrCreateSummary(`kv_get{level="L4",domain="code"}`), + metrics.GetOrCreateSummary(`kv_get{level="recent",domain="code"}`), + }, + kv.CommitmentDomain: { + metrics.GetOrCreateSummary(`kv_get{level="L0",domain="commitment"}`), + metrics.GetOrCreateSummary(`kv_get{level="L1",domain="commitment"}`), + metrics.GetOrCreateSummary(`kv_get{level="L2",domain="commitment"}`), + metrics.GetOrCreateSummary(`kv_get{level="L3",domain="commitment"}`), + metrics.GetOrCreateSummary(`kv_get{level="L4",domain="commitment"}`), + metrics.GetOrCreateSummary(`kv_get{level="recent",domain="commitment"}`), + }, + } +) diff --git a/erigon-lib/txpool/pool.go b/erigon-lib/txpool/pool.go index 7ac81600223..39b063885e5 100644 --- a/erigon-lib/txpool/pool.go +++ b/erigon-lib/txpool/pool.go @@ -468,12 +468,12 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang if assert.Enable { for _, txn := range unwindTxs.Txs { if txn.SenderID == 0 { - panic(fmt.Errorf("onNewBlock.unwindTxs: senderID can't be zero")) + panic("onNewBlock.unwindTxs: senderID can't be zero") } } for _, txn := range minedTxs.Txs { if txn.SenderID == 0 { - panic(fmt.Errorf("onNewBlock.minedTxs: senderID can't be zero")) + panic("onNewBlock.minedTxs: senderID can't be zero") } } } @@ -515,7 +515,7 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang func (p *TxPool) processRemoteTxs(ctx context.Context) error { if !p.Started() { - return fmt.Errorf("txpool not started yet") + return errors.New("txpool not started yet") } defer processBatchTxsTimer.ObserveDuration(time.Now()) @@ -1235,7 +1235,7 @@ func (p *TxPool) addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *s if assert.Enable { for _, txn := range newTxs.Txs { if txn.SenderID == 0 { - panic(fmt.Errorf("senderID can't be zero")) + panic("senderID can't be zero") } } } @@ -1293,7 +1293,7 @@ func (p *TxPool) addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, if assert.Enable { for _, txn := range newTxs.Txs { if txn.SenderID == 0 { - panic(fmt.Errorf("senderID can't be zero")) + panic("senderID can't be zero") } } } diff --git a/erigon-lib/txpool/txpool_grpc_server.go b/erigon-lib/txpool/txpool_grpc_server.go index 7d4bfebbe8b..85f90ef0d43 100644 --- a/erigon-lib/txpool/txpool_grpc_server.go +++ b/erigon-lib/txpool/txpool_grpc_server.go @@ -65,7 +65,7 @@ type txPool interface { var _ txpool_proto.TxpoolServer = (*GrpcServer)(nil) // compile-time interface check var _ txpool_proto.TxpoolServer = (*GrpcDisabled)(nil) // compile-time interface check -var ErrPoolDisabled = fmt.Errorf("TxPool Disabled") +var ErrPoolDisabled = errors.New("TxPool Disabled") type GrpcDisabled struct { txpool_proto.UnimplementedTxpoolServer diff --git a/erigon-lib/txpool/txpoolcfg/txpoolcfg.go b/erigon-lib/txpool/txpoolcfg/txpoolcfg.go index 5cb562fd390..3c746a006a1 100644 --- a/erigon-lib/txpool/txpoolcfg/txpoolcfg.go +++ b/erigon-lib/txpool/txpoolcfg/txpoolcfg.go @@ -57,6 +57,7 @@ type Config struct { MdbxPageSize datasize.ByteSize MdbxDBSizeLimit datasize.ByteSize MdbxGrowthStep datasize.ByteSize + MdbxWriteMap bool NoGossip bool // this mode doesn't broadcast any txs, and if receive remote-txn - skip it } @@ -78,7 +79,8 @@ var DefaultConfig = Config{ PriceBump: 10, // Price bump percentage to replace an already existing transaction BlobPriceBump: 100, - NoGossip: false, + NoGossip: false, + MdbxWriteMap: false, } type DiscardReason uint8 diff --git a/erigon-lib/txpool/txpoolutil/all_components.go b/erigon-lib/txpool/txpoolutil/all_components.go index b2884a6f98c..22bfddb43a8 100644 --- a/erigon-lib/txpool/txpoolutil/all_components.go +++ b/erigon-lib/txpool/txpoolutil/all_components.go @@ -18,7 +18,7 @@ package txpoolutil import ( "context" - "fmt" + "errors" "math/big" "time" @@ -52,7 +52,7 @@ func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB } if cc != nil && !force { if cc.ChainID.Uint64() == 0 { - return nil, 0, fmt.Errorf("wrong chain config") + return nil, 0, errors.New("wrong chain config") } return cc, blockNum, nil } @@ -95,7 +95,7 @@ func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB return nil, 0, err } if cc.ChainID.Uint64() == 0 { - return nil, 0, fmt.Errorf("wrong chain config") + return nil, 0, errors.New("wrong chain config") } return cc, blockNum, nil } @@ -108,7 +108,8 @@ func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cach PageSize(uint64(16 * datasize.KB)). GrowthStep(16 * datasize.MB). DirtySpace(uint64(128 * datasize.MB)). - MapSize(1 * datasize.TB) + MapSize(1 * datasize.TB). + WriteMap(cfg.MdbxWriteMap) if cfg.MdbxPageSize.Bytes() > 0 { opts = opts.PageSize(cfg.MdbxPageSize.Bytes()) diff --git a/eth/backend.go b/eth/backend.go index 3a2ea693aa3..eeca6893b66 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -24,8 +24,10 @@ import ( "context" "errors" "fmt" + "github.com/erigontech/erigon/cl/phase1/core/checkpoint_sync" "github.com/erigontech/erigon/consensus/parlia" parliafinality "github.com/erigontech/erigon/consensus/parlia/finality" + "io/fs" "math" "math/big" "net" @@ -38,7 +40,7 @@ import ( "sync/atomic" "time" - "github.com/karrick/godirwalk" + "github.com/erigontech/erigon-lib/common/dir" "github.com/erigontech/mdbx-go/mdbx" lru "github.com/hashicorp/golang-lru/arc/v2" @@ -84,7 +86,6 @@ import ( "github.com/erigontech/erigon-lib/wrap" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/persistence/format/snapshot_format/getters" - clcore "github.com/erigontech/erigon/cl/phase1/core" executionclient "github.com/erigontech/erigon/cl/phase1/execution_client" "github.com/erigontech/erigon/cl/utils/eth_clock" "github.com/erigontech/erigon/cmd/caplin/caplin1" @@ -137,7 +138,6 @@ import ( "github.com/erigontech/erigon/turbo/shards" "github.com/erigontech/erigon/turbo/silkworm" "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" - "github.com/erigontech/erigon/turbo/snapshotsync/snap" stages2 "github.com/erigontech/erigon/turbo/stages" "github.com/erigontech/erigon/turbo/stages/headerdownload" ) @@ -221,6 +221,7 @@ type Ethereum struct { silkwormSentryService *silkworm.SentryService polygonSyncService polygonsync.Service + polygonBridge bridge.PolygonBridge stopNode func() error } @@ -239,8 +240,7 @@ const blockBufferSize = 128 // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethereum, error) { - config.Snapshot.Enabled = config.Sync.UseSnapshots - if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(libcommon.Big0) <= 0 { + if config.Miner.GasPrice == nil || config.Miner.GasPrice.Sign() <= 0 { logger.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice) config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice) } @@ -316,7 +316,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger genesisSpec = nil } var genesisErr error - chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverridePragueTime, tmpdir, logger) + chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverridePragueTime, dirs, logger) if _, ok := genesisErr.(*chain.ConfigCompatError); genesisErr != nil && !ok { return genesisErr } @@ -332,21 +332,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger setBorDefaultMinerGasPrice(chainConfig, config, logger) setBorDefaultTxPoolPriceLimit(chainConfig, config.TxPool, logger) - if err := chainKv.Update(context.Background(), func(tx kv.RwTx) error { - isCorrectSync, useSnapshots, err := snap.EnsureNotChanged(tx, config.Snapshot) - if err != nil { - return err - } - // if we are in the incorrect syncmode then we change it to the appropriate one - if !isCorrectSync { - config.Sync.UseSnapshots = useSnapshots - config.Snapshot.Enabled = ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && useSnapshots - } - return nil - }); err != nil { - return nil, err - } - logger.Info("Initialised chain configuration", "config", chainConfig, "genesis", genesis.Hash()) if dbg.OnlyCreateDB { logger.Info("done") @@ -563,6 +548,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger var heimdallClient heimdall.HeimdallClient var polygonBridge bridge.Service + var heimdallService heimdall.Service if chainConfig.Bor != nil { if !config.WithoutHeimdall { @@ -571,12 +557,15 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger if config.PolygonSync { polygonBridge = bridge.Assemble(config.Dirs.DataDir, logger, consensusConfig.(*borcfg.BorConfig), heimdallClient.FetchStateSyncEvents, bor.GenesisContractStateReceiverABI()) + heimdallService = heimdall.AssembleService(consensusConfig.(*borcfg.BorConfig), config.HeimdallURL, dirs.DataDir, tmpdir, logger) + + backend.polygonBridge = polygonBridge } flags.Milestone = config.WithHeimdallMilestones } - backend.engine = ethconsensusconfig.CreateConsensusEngine(ctx, stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, blockReader, false /* readonly */, logger, polygonBridge) + backend.engine = ethconsensusconfig.CreateConsensusEngine(ctx, stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, blockReader, false /* readonly */, logger, polygonBridge, heimdallService) inMemoryExecution := func(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, notifications *shards.Notifications) error { @@ -693,13 +682,12 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger blobStore = parlia.BlobStore } - loopBreakCheck := stages2.NewLoopBreakCheck(config, nil) // proof-of-work mining mining := stagedsync.New( config.Sync, stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPoolDB, nil, tmpdir, backend.blockReader), - stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures, false, nil), stagedsync.StageExecuteBlocksCfg( + stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, recents, signatures, false, nil), stagedsync.StageExecuteBlocksCfg( backend.chainDB, config.Prune, config.BatchSize, @@ -714,10 +702,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.sentriesClient.Hd, config.Genesis, config.Sync, - agg, stages2.SilkwormForExecutionStage(backend.silkworm, config), ), - stagedsync.StageSendersCfg(backend.chainDB, chainConfig, config.Sync, false, dirs.Tmp, config.Prune, blockReader, backend.sentriesClient.Hd, loopBreakCheck), + stagedsync.StageSendersCfg(backend.chainDB, chainConfig, config.Sync, false, dirs.Tmp, config.Prune, blockReader, backend.sentriesClient.Hd), stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, backend.txPool, backend.txPoolDB, blockReader), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit, backend.blockReader, latestBlockBuiltStore), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, @@ -728,6 +715,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger ethashApi = casted.APIs(nil)[1].Service.(*ethash.API) } + // setup snapcfg + if err := loadSnapshotsEitherFromDiskIfNeeded(dirs, chainConfig.ChainName); err != nil { + return nil, err + } + // proof-of-stake mining assembleBlockPOS := func(param *core.BlockBuilderParameters, interrupt *int32) (*types.BlockWithReceipts, error) { miningStatePos := stagedsync.NewProposingState(&config.Miner) @@ -736,7 +728,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger config.Sync, stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPoolDB, param, tmpdir, backend.blockReader), - stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miningStatePos, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, nil, recents, signatures, false, nil), + stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miningStatePos, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, recents, signatures, false, nil), stagedsync.StageExecuteBlocksCfg( backend.chainDB, config.Prune, @@ -752,10 +744,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.sentriesClient.Hd, config.Genesis, config.Sync, - agg, stages2.SilkwormForExecutionStage(backend.silkworm, config), ), - stagedsync.StageSendersCfg(backend.chainDB, chainConfig, config.Sync, false, dirs.Tmp, config.Prune, blockReader, backend.sentriesClient.Hd, loopBreakCheck), + stagedsync.StageSendersCfg(backend.chainDB, chainConfig, config.Sync, false, dirs.Tmp, config.Prune, blockReader, backend.sentriesClient.Hd), stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, backend.txPool, backend.txPoolDB, blockReader), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit, backend.blockReader, latestBlockBuiltStore)), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, logger) // We start the mining step @@ -912,7 +903,8 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger hook := stages2.NewHook(backend.sentryCtx, backend.chainDB, backend.notifications, backend.stagedSync, backend.blockReader, backend.chainConfig, backend.logger, backend.sentriesClient.SetStatus) - if !config.Sync.UseSnapshots && backend.downloaderClient != nil { + useSnapshots := blockReader != nil && (blockReader.FreezingCfg().ProduceE2 || blockReader.FreezingCfg().ProduceE3) + if !useSnapshots && backend.downloaderClient != nil { for _, p := range blockReader.AllTypes() { backend.downloaderClient.ProhibitNewDownloads(ctx, &protodownloader.ProhibitNewDownloadsRequest{ Type: p.Name(), @@ -966,7 +958,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger if err != nil { return nil, err } - state, err := clcore.RetrieveBeaconState(ctx, beaconCfg, clparams.NetworkType(config.NetworkID)) + + config.CaplinConfig.NetworkId = clparams.NetworkType(config.NetworkID) + state, err := checkpoint_sync.ReadOrFetchLatestBeaconState(ctx, dirs, beaconCfg, config.CaplinConfig) if err != nil { return nil, err } @@ -995,8 +989,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger log.Info("Starting caplin") eth1Getter := getters.NewExecutionSnapshotReader(ctx, beaconCfg, blockReader, backend.chainDB) if err := caplin1.RunCaplinPhase1(ctx, executionEngine, config, networkCfg, beaconCfg, ethClock, - state, dirs, eth1Getter, backend.downloaderClient, config.CaplinConfig.Backfilling, - config.CaplinConfig.BlobBackfilling, config.CaplinConfig.Archive, indiciesDB, blobStorage, creds, + state, dirs, eth1Getter, backend.downloaderClient, indiciesDB, blobStorage, creds, blockSnapBuildSema, caplinOpt...); err != nil { logger.Error("could not start caplin", "err", err) } @@ -1008,15 +1001,13 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.polygonSyncService = polygonsync.NewService( logger, chainConfig, - dirs.DataDir, - tmpdir, polygonSyncSentry(sentries), p2pConfig.MaxPeers, statusDataProvider, - config.HeimdallURL, executionRpc, config.LoopBlockLimit, polygonBridge, + heimdallService, ) } @@ -1040,7 +1031,7 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig badBlockHeader, hErr := rawdb.ReadHeaderByHash(tx, config.BadBlockHash) if badBlockHeader != nil { unwindPoint := badBlockHeader.Number.Uint64() - 1 - if err := s.stagedSync.UnwindTo(unwindPoint, stagedsync.BadBlock(config.BadBlockHash, fmt.Errorf("Init unwind")), tx); err != nil { + if err := s.stagedSync.UnwindTo(unwindPoint, stagedsync.BadBlock(config.BadBlockHash, errors.New("Init unwind")), tx); err != nil { return err } } @@ -1072,7 +1063,7 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig } } - s.apiList = jsonrpc.APIList(chainKv, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, &httpRpcCfg, s.engine, s.logger) + s.apiList = jsonrpc.APIList(chainKv, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, &httpRpcCfg, s.engine, s.logger, s.polygonBridge) if config.SilkwormRpcDaemon && httpRpcCfg.Enabled { interface_log_settings := silkworm.RpcInterfaceLogSettings{ @@ -1126,7 +1117,7 @@ func (s *Ethereum) Etherbase() (eb libcommon.Address, err error) { if etherbase != (libcommon.Address{}) { return etherbase, nil } - return libcommon.Address{}, fmt.Errorf("etherbase must be explicitly specified") + return libcommon.Address{}, errors.New("etherbase must be explicitly specified") } // isLocalBlock checks whether the specified block is mined @@ -1379,6 +1370,26 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, stateDiffClient return nil } +// loadSnapshotsEitherFromDiskOrRemotely loads the snapshots to be downloaded from the disk if they exist, otherwise it loads them from the remote. +func loadSnapshotsEitherFromDiskIfNeeded(dirs datadir.Dirs, chainName string) error { + preverifiedToml := filepath.Join(dirs.Snap, "preverified.toml") + + exists, err := dir.FileExist(preverifiedToml) + if err != nil { + return err + } + if exists { + // Read the preverified.toml and load the snapshots + haveToml, err := os.ReadFile(preverifiedToml) + if err != nil { + return err + } + snapcfg.SetToml(chainName, haveToml) + return nil + } + return dir.WriteFileWithFsync(preverifiedToml, snapcfg.GetToml(chainName), 0644) +} + func (s *Ethereum) IsMining() bool { return s.config.Miner.Enabled } func (s *Ethereum) ChainKV() kv.RwDB { return s.chainDB } @@ -1716,31 +1727,26 @@ func (s *Ethereum) ExecutionModule() *eth1.EthereumExecutionModule { // RemoveContents is like os.RemoveAll, but preserve dir itself func RemoveContents(dirname string) error { - err := godirwalk.Walk(dirname, &godirwalk.Options{ - ErrorCallback: func(s string, err error) godirwalk.ErrorAction { - if os.IsNotExist(err) { - return godirwalk.SkipNode - } - return godirwalk.Halt - }, - FollowSymbolicLinks: true, - Unsorted: true, - Callback: func(osPathname string, d *godirwalk.Dirent) error { - if osPathname == dirname { - return nil - } - if d.IsSymlink() { - return nil - } - return os.RemoveAll(filepath.Join(dirname, d.Name())) - }, - PostChildrenCallback: nil, - ScratchBuffer: nil, - AllowNonDirectory: false, - }) + d, err := os.Open(dirname) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // ignore due to windows + _ = os.MkdirAll(dirname, 0o755) + return nil + } + return err + } + defer d.Close() + files, err := dir.ReadDir(dirname) if err != nil { return err } + for _, file := range files { + err = os.RemoveAll(filepath.Join(dirname, file.Name())) + if err != nil { + return err + } + } return nil } diff --git a/eth/backend_test.go b/eth/backend_test.go new file mode 100644 index 00000000000..77ba895e6b1 --- /dev/null +++ b/eth/backend_test.go @@ -0,0 +1,52 @@ +package eth + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRemoveContents(t *testing.T) { + tmpDirName := t.TempDir() + //t.Logf("creating %s/root...", rootName) + rootName := filepath.Join(tmpDirName, "root") + err := os.Mkdir(rootName, 0750) + require.NoError(t, err) + //fmt.Println("OK") + for i := 0; i < 3; i++ { + outerName := filepath.Join(rootName, fmt.Sprintf("outer_%d", i+1)) + //t.Logf("creating %s... ", outerName) + err = os.Mkdir(outerName, 0750) + require.NoError(t, err) + //t.Logf("OK") + for j := 0; j < 2; j++ { + innerName := filepath.Join(outerName, fmt.Sprintf("inner_%d", j+1)) + //t.Logf("creating %s... ", innerName) + err = os.Mkdir(innerName, 0750) + require.NoError(t, err) + //t.Log("OK") + for k := 0; k < 2; k++ { + innestName := filepath.Join(innerName, fmt.Sprintf("innest_%d", k+1)) + //t.Logf("creating %s... ", innestName) + err = os.Mkdir(innestName, 0750) + require.NoError(t, err) + //t.Log("OK") + } + } + } + list, err := os.ReadDir(rootName) + require.NoError(t, err) + + require.Len(t, list, 3) + + err = RemoveContents(rootName) + require.NoError(t, err) + + list, err = os.ReadDir(rootName) + require.NoError(t, err) + + require.Len(t, list, 0) +} diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index cfb2a2b3d1a..7f13f0785ca 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -21,7 +21,6 @@ package ethconfig import ( - "github.com/erigontech/erigon-lib/chain/networkname" "math/big" "os" "os/user" @@ -75,7 +74,6 @@ var LightClientGPO = gaspricecfg.Config{ // Defaults contains default settings for use on the Ethereum main net. var Defaults = Config{ Sync: Sync{ - UseSnapshots: true, ExecWorkerCount: estimate.ReconstituteState.WorkersHalf(), //only half of CPU, other half will spend for snapshots build/merge/prune ReconWorkerCount: estimate.ReconstituteState.Workers(), BodyCacheLimit: 256 * 1024 * 1024, @@ -105,7 +103,6 @@ var Defaults = Config{ ImportMode: false, Snapshot: BlocksFreezing{ - Enabled: true, KeepBlocks: false, ProduceE2: true, ProduceE3: true, @@ -139,7 +136,6 @@ func init() { //go:generate gencodec -dir . -type Config -formats toml -out gen_config.go type BlocksFreezing struct { - Enabled bool KeepBlocks bool // produce new snapshots of blocks but don't remove blocks from DB ProduceE2 bool // produce new block files ProduceE3 bool // produce new state files @@ -150,9 +146,6 @@ type BlocksFreezing struct { func (s BlocksFreezing) String() string { var out []string - if s.Enabled { - out = append(out, "--snapshots=true") - } if s.KeepBlocks { out = append(out, "--"+FlagSnapKeepBlocks+"=true") } @@ -168,8 +161,8 @@ var ( FlagSnapStateStop = "snap.state.stop" ) -func NewSnapCfg(enabled, keepBlocks, produceE2, produceE3 bool) BlocksFreezing { - return BlocksFreezing{Enabled: enabled, KeepBlocks: keepBlocks, ProduceE2: produceE2, ProduceE3: produceE3} +func NewSnapCfg(keepBlocks, produceE2, produceE3 bool) BlocksFreezing { + return BlocksFreezing{KeepBlocks: keepBlocks, ProduceE2: produceE2, ProduceE3: produceE3} } // Config contains configuration options for ETH protocol. @@ -276,8 +269,6 @@ type Config struct { } type Sync struct { - UseSnapshots bool - // LoopThrottle sets a minimum time between staged loop iterations LoopThrottle time.Duration ExecWorkerCount int @@ -293,17 +284,3 @@ type Sync struct { UploadFrom rpc.BlockNumber FrozenBlockLimit uint64 } - -// Chains where snapshots are enabled by default -var ChainsWithSnapshots = map[string]struct{}{ - networkname.MainnetChainName: {}, - networkname.SepoliaChainName: {}, - networkname.MumbaiChainName: {}, - networkname.AmoyChainName: {}, - networkname.BSCChainName: {}, - networkname.BorMainnetChainName: {}, - networkname.GnosisChainName: {}, - networkname.ChiadoChainName: {}, -} - -func UseSnapshotsByChainName(chain string) bool { return true } diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 2c57d177fa0..ddbdba296de 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -3,15 +3,19 @@ package ethconfig import ( - "time" + "math/big" "github.com/c2h5oh/datasize" "github.com/erigontech/erigon-lib/chain" - libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/datadir" + "github.com/erigontech/erigon-lib/downloader/downloadercfg" + "github.com/erigontech/erigon-lib/txpool/txpoolcfg" + "github.com/erigontech/erigon/cl/beacon/beacon_router_configuration" + "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/consensus/ethash/ethashcfg" "github.com/erigontech/erigon/core/types" "github.com/erigontech/erigon/eth/gasprice/gaspricecfg" - "github.com/erigontech/erigon/ethdb/prune" "github.com/erigontech/erigon/params" ) @@ -25,23 +29,51 @@ func (c Config) MarshalTOML() (interface{}, error) { Prune prune.Mode BatchSize datasize.ByteSize ImportMode bool - BadBlockHash libcommon.Hash + BadBlockHash common.Hash Snapshot BlocksFreezing - BlockDownloaderWindow int + Downloader *downloadercfg.Cfg + BeaconRouter beacon_router_configuration.RouterConfiguration + CaplinConfig clparams.CaplinConfig + Dirs datadir.Dirs ExternalSnapshotDownloaderAddr string - Whitelist map[uint64]libcommon.Hash `toml:"-"` + Whitelist map[uint64]common.Hash `toml:"-"` Miner params.MiningConfig Ethash ethashcfg.Config Clique params.ConsensusSnapshotConfig Aura chain.AuRaConfig - Parlia chain.ParliaConfig - TxPool DeprecatedTxPoolConfig + DeprecatedTxPool DeprecatedTxPoolConfig + TxPool txpoolcfg.Config GPO gaspricecfg.Config RPCGasCap uint64 `toml:",omitempty"` RPCTxFeeCap float64 `toml:",omitempty"` StateStream bool - BodyDownloadTimeoutSeconds int - SyncLoopThrottle time.Duration + HeimdallURL string + WithoutHeimdall bool + WithHeimdallMilestones bool + WithHeimdallWaypointRecording bool + PolygonSync bool + PolygonSyncStage bool + Ethstats string + InternalCL bool + CaplinDiscoveryAddr string + CaplinDiscoveryPort uint64 + CaplinDiscoveryTCPPort uint64 + SentinelAddr string + SentinelPort uint64 + OverridePragueTime *big.Int `toml:",omitempty"` + SilkwormExecution bool + SilkwormRpcDaemon bool + SilkwormSentry bool + SilkwormVerbosity string + SilkwormNumContexts uint32 + SilkwormRpcLogEnabled bool + SilkwormRpcLogDirPath string + SilkwormRpcLogMaxFileSize uint16 + SilkwormRpcLogMaxFiles uint16 + SilkwormRpcLogDumpResponse bool + SilkwormRpcNumWorkers uint32 + SilkwormRpcJsonCompatibility bool + DisableTxPoolGossip bool } var enc Config enc.Genesis = c.Genesis @@ -52,18 +84,49 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.ImportMode = c.ImportMode enc.BadBlockHash = c.BadBlockHash enc.Snapshot = c.Snapshot + enc.Downloader = c.Downloader + enc.BeaconRouter = c.BeaconRouter + enc.CaplinConfig = c.CaplinConfig + enc.Dirs = c.Dirs enc.ExternalSnapshotDownloaderAddr = c.ExternalSnapshotDownloaderAddr enc.Whitelist = c.Whitelist enc.Miner = c.Miner enc.Ethash = c.Ethash enc.Clique = c.Clique enc.Aura = c.Aura - enc.Parlia = c.Parlia - enc.TxPool = c.DeprecatedTxPool + enc.DeprecatedTxPool = c.DeprecatedTxPool + enc.TxPool = c.TxPool enc.GPO = c.GPO enc.RPCGasCap = c.RPCGasCap enc.RPCTxFeeCap = c.RPCTxFeeCap enc.StateStream = c.StateStream + enc.HeimdallURL = c.HeimdallURL + enc.WithoutHeimdall = c.WithoutHeimdall + enc.WithHeimdallMilestones = c.WithHeimdallMilestones + enc.WithHeimdallWaypointRecording = c.WithHeimdallWaypointRecording + enc.PolygonSync = c.PolygonSync + enc.PolygonSyncStage = c.PolygonSyncStage + enc.Ethstats = c.Ethstats + enc.InternalCL = c.InternalCL + enc.CaplinDiscoveryAddr = c.CaplinDiscoveryAddr + enc.CaplinDiscoveryPort = c.CaplinDiscoveryPort + enc.CaplinDiscoveryTCPPort = c.CaplinDiscoveryTCPPort + enc.SentinelAddr = c.SentinelAddr + enc.SentinelPort = c.SentinelPort + enc.OverridePragueTime = c.OverridePragueTime + enc.SilkwormExecution = c.SilkwormExecution + enc.SilkwormRpcDaemon = c.SilkwormRpcDaemon + enc.SilkwormSentry = c.SilkwormSentry + enc.SilkwormVerbosity = c.SilkwormVerbosity + enc.SilkwormNumContexts = c.SilkwormNumContexts + enc.SilkwormRpcLogEnabled = c.SilkwormRpcLogEnabled + enc.SilkwormRpcLogDirPath = c.SilkwormRpcLogDirPath + enc.SilkwormRpcLogMaxFileSize = c.SilkwormRpcLogMaxFileSize + enc.SilkwormRpcLogMaxFiles = c.SilkwormRpcLogMaxFiles + enc.SilkwormRpcLogDumpResponse = c.SilkwormRpcLogDumpResponse + enc.SilkwormRpcNumWorkers = c.SilkwormRpcNumWorkers + enc.SilkwormRpcJsonCompatibility = c.SilkwormRpcJsonCompatibility + enc.DisableTxPoolGossip = c.DisableTxPoolGossip return &enc, nil } @@ -76,23 +139,51 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { Prune *prune.Mode BatchSize *datasize.ByteSize ImportMode *bool - BadBlockHash *libcommon.Hash + BadBlockHash *common.Hash Snapshot *BlocksFreezing - BlockDownloaderWindow *int + Downloader *downloadercfg.Cfg + BeaconRouter *beacon_router_configuration.RouterConfiguration + CaplinConfig *clparams.CaplinConfig + Dirs *datadir.Dirs ExternalSnapshotDownloaderAddr *string - Whitelist map[uint64]libcommon.Hash `toml:"-"` + Whitelist map[uint64]common.Hash `toml:"-"` Miner *params.MiningConfig Ethash *ethashcfg.Config Clique *params.ConsensusSnapshotConfig Aura *chain.AuRaConfig - Parlia *chain.ParliaConfig - TxPool *DeprecatedTxPoolConfig + DeprecatedTxPool *DeprecatedTxPoolConfig + TxPool *txpoolcfg.Config GPO *gaspricecfg.Config RPCGasCap *uint64 `toml:",omitempty"` RPCTxFeeCap *float64 `toml:",omitempty"` StateStream *bool - BodyDownloadTimeoutSeconds *int - SyncLoopThrottle *time.Duration + HeimdallURL *string + WithoutHeimdall *bool + WithHeimdallMilestones *bool + WithHeimdallWaypointRecording *bool + PolygonSync *bool + PolygonSyncStage *bool + Ethstats *string + InternalCL *bool + CaplinDiscoveryAddr *string + CaplinDiscoveryPort *uint64 + CaplinDiscoveryTCPPort *uint64 + SentinelAddr *string + SentinelPort *uint64 + OverridePragueTime *big.Int `toml:",omitempty"` + SilkwormExecution *bool + SilkwormRpcDaemon *bool + SilkwormSentry *bool + SilkwormVerbosity *string + SilkwormNumContexts *uint32 + SilkwormRpcLogEnabled *bool + SilkwormRpcLogDirPath *string + SilkwormRpcLogMaxFileSize *uint16 + SilkwormRpcLogMaxFiles *uint16 + SilkwormRpcLogDumpResponse *bool + SilkwormRpcNumWorkers *uint32 + SilkwormRpcJsonCompatibility *bool + DisableTxPoolGossip *bool } var dec Config if err := unmarshal(&dec); err != nil { @@ -122,6 +213,18 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.Snapshot != nil { c.Snapshot = *dec.Snapshot } + if dec.Downloader != nil { + c.Downloader = dec.Downloader + } + if dec.BeaconRouter != nil { + c.BeaconRouter = *dec.BeaconRouter + } + if dec.CaplinConfig != nil { + c.CaplinConfig = *dec.CaplinConfig + } + if dec.Dirs != nil { + c.Dirs = *dec.Dirs + } if dec.ExternalSnapshotDownloaderAddr != nil { c.ExternalSnapshotDownloaderAddr = *dec.ExternalSnapshotDownloaderAddr } @@ -140,11 +243,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.Aura != nil { c.Aura = *dec.Aura } - if dec.Parlia != nil { - c.Parlia = *dec.Parlia + if dec.DeprecatedTxPool != nil { + c.DeprecatedTxPool = *dec.DeprecatedTxPool } if dec.TxPool != nil { - c.DeprecatedTxPool = *dec.TxPool + c.TxPool = *dec.TxPool } if dec.GPO != nil { c.GPO = *dec.GPO @@ -158,5 +261,86 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.StateStream != nil { c.StateStream = *dec.StateStream } + if dec.HeimdallURL != nil { + c.HeimdallURL = *dec.HeimdallURL + } + if dec.WithoutHeimdall != nil { + c.WithoutHeimdall = *dec.WithoutHeimdall + } + if dec.WithHeimdallMilestones != nil { + c.WithHeimdallMilestones = *dec.WithHeimdallMilestones + } + if dec.WithHeimdallWaypointRecording != nil { + c.WithHeimdallWaypointRecording = *dec.WithHeimdallWaypointRecording + } + if dec.PolygonSync != nil { + c.PolygonSync = *dec.PolygonSync + } + if dec.PolygonSyncStage != nil { + c.PolygonSyncStage = *dec.PolygonSyncStage + } + if dec.Ethstats != nil { + c.Ethstats = *dec.Ethstats + } + if dec.InternalCL != nil { + c.InternalCL = *dec.InternalCL + } + if dec.CaplinDiscoveryAddr != nil { + c.CaplinDiscoveryAddr = *dec.CaplinDiscoveryAddr + } + if dec.CaplinDiscoveryPort != nil { + c.CaplinDiscoveryPort = *dec.CaplinDiscoveryPort + } + if dec.CaplinDiscoveryTCPPort != nil { + c.CaplinDiscoveryTCPPort = *dec.CaplinDiscoveryTCPPort + } + if dec.SentinelAddr != nil { + c.SentinelAddr = *dec.SentinelAddr + } + if dec.SentinelPort != nil { + c.SentinelPort = *dec.SentinelPort + } + if dec.OverridePragueTime != nil { + c.OverridePragueTime = dec.OverridePragueTime + } + if dec.SilkwormExecution != nil { + c.SilkwormExecution = *dec.SilkwormExecution + } + if dec.SilkwormRpcDaemon != nil { + c.SilkwormRpcDaemon = *dec.SilkwormRpcDaemon + } + if dec.SilkwormSentry != nil { + c.SilkwormSentry = *dec.SilkwormSentry + } + if dec.SilkwormVerbosity != nil { + c.SilkwormVerbosity = *dec.SilkwormVerbosity + } + if dec.SilkwormNumContexts != nil { + c.SilkwormNumContexts = *dec.SilkwormNumContexts + } + if dec.SilkwormRpcLogEnabled != nil { + c.SilkwormRpcLogEnabled = *dec.SilkwormRpcLogEnabled + } + if dec.SilkwormRpcLogDirPath != nil { + c.SilkwormRpcLogDirPath = *dec.SilkwormRpcLogDirPath + } + if dec.SilkwormRpcLogMaxFileSize != nil { + c.SilkwormRpcLogMaxFileSize = *dec.SilkwormRpcLogMaxFileSize + } + if dec.SilkwormRpcLogMaxFiles != nil { + c.SilkwormRpcLogMaxFiles = *dec.SilkwormRpcLogMaxFiles + } + if dec.SilkwormRpcLogDumpResponse != nil { + c.SilkwormRpcLogDumpResponse = *dec.SilkwormRpcLogDumpResponse + } + if dec.SilkwormRpcNumWorkers != nil { + c.SilkwormRpcNumWorkers = *dec.SilkwormRpcNumWorkers + } + if dec.SilkwormRpcJsonCompatibility != nil { + c.SilkwormRpcJsonCompatibility = *dec.SilkwormRpcJsonCompatibility + } + if dec.DisableTxPoolGossip != nil { + c.DisableTxPoolGossip = *dec.DisableTxPoolGossip + } return nil } diff --git a/eth/ethconsensusconfig/config.go b/eth/ethconsensusconfig/config.go index 5a3c06030ef..dee18520c92 100644 --- a/eth/ethconsensusconfig/config.go +++ b/eth/ethconsensusconfig/config.go @@ -49,7 +49,7 @@ import ( func CreateConsensusEngine(ctx context.Context, nodeConfig *nodecfg.Config, chainConfig *chain.Config, config interface{}, notify []string, noVerify bool, heimdallClient heimdall.HeimdallClient, withoutHeimdall bool, blockReader services.FullBlockReader, readonly bool, - logger log.Logger, polygonBridge bridge.Service, + logger log.Logger, polygonBridge bridge.Service, heimdallService heimdall.Service, ) consensus.Engine { var eng consensus.Engine @@ -159,7 +159,7 @@ func CreateConsensusEngine(ctx context.Context, nodeConfig *nodecfg.Config, chai panic(err) } - eng = bor.New(chainConfig, db, blockReader, spanner, heimdallClient, genesisContractsClient, logger, polygonBridge) + eng = bor.New(chainConfig, db, blockReader, spanner, heimdallClient, genesisContractsClient, logger, polygonBridge, heimdallService) } } @@ -192,5 +192,5 @@ func CreateConsensusEngineBareBones(ctx context.Context, chainConfig *chain.Conf } return CreateConsensusEngine(ctx, &nodecfg.Config{}, chainConfig, consensusConfig, nil /* notify */, true, /* noVerify */ - nil /* heimdallClient */, true /* withoutHeimdall */, nil /* blockReader */, false /* readonly */, logger, nil) + nil /* heimdallClient */, true /* withoutHeimdall */, nil /* blockReader */, false /* readonly */, logger, nil, nil) } diff --git a/eth/filters/api.go b/eth/filters/api.go index 3e2383f1ca4..97fadcbbda0 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -418,7 +418,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty api.filtersMu.Unlock() if !found || f.typ != LogsSubscription { - return nil, fmt.Errorf("filter not found") + return nil, errors.New("filter not found") } var filter *Filter @@ -477,7 +477,7 @@ func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { } } - return []interface{}{}, fmt.Errorf("filter not found") + return []interface{}{}, errors.New("filter not found") } */ @@ -499,7 +499,7 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { if raw.BlockHash != nil { if raw.FromBlock != nil || raw.ToBlock != nil { // BlockHash is mutually exclusive with FromBlock/ToBlock criteria - return fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") + return errors.New("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") } args.BlockHash = raw.BlockHash } else { @@ -572,11 +572,11 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { } args.Topics[i] = append(args.Topics[i], parsed) } else { - return fmt.Errorf("invalid topic(s)") + return errors.New("invalid topic(s)") } } default: - return fmt.Errorf("invalid topic(s)") + return errors.New("invalid topic(s)") } } } diff --git a/eth/gasprice/feehistory_test.go b/eth/gasprice/feehistory_test.go index a462f095cb4..eab55261fa1 100644 --- a/eth/gasprice/feehistory_test.go +++ b/eth/gasprice/feehistory_test.go @@ -24,14 +24,17 @@ import ( "errors" "testing" + "github.com/erigontech/erigon-lib/kv/kvcache" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/eth/gasprice" "github.com/erigontech/erigon/eth/gasprice/gaspricecfg" "github.com/erigontech/erigon/rpc" + "github.com/erigontech/erigon/rpc/rpccfg" "github.com/erigontech/erigon/turbo/jsonrpc" ) func TestFeeHistory(t *testing.T) { + var cases = []struct { pending bool maxHeader, maxBlock int @@ -62,35 +65,44 @@ func TestFeeHistory(t *testing.T) { MaxHeaderHistory: c.maxHeader, MaxBlockHistory: c.maxBlock, } - backend := newTestBackend(t) //, big.NewInt(16), c.pending) - cache := jsonrpc.NewGasPriceCache() - oracle := gasprice.NewOracle(backend, config, cache, log.New()) - first, reward, baseFee, ratio, err := oracle.FeeHistory(context.Background(), c.count, c.last, c.percent) + func() { + m := newTestBackend(t) //, big.NewInt(16), c.pending) + defer m.Close() - expReward := c.expCount - if len(c.percent) == 0 { - expReward = 0 - } - expBaseFee := c.expCount - if expBaseFee != 0 { - expBaseFee++ - } + baseApi := jsonrpc.NewBaseApi(nil, kvcache.NewDummy(), m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil) + tx, _ := m.DB.BeginRo(m.Ctx) + defer tx.Rollback() - if first.Uint64() != c.expFirst { - t.Fatalf("Test case %d: first block mismatch, want %d, got %d", i, c.expFirst, first) - } - if len(reward) != expReward { - t.Fatalf("Test case %d: reward array length mismatch, want %d, got %d", i, expReward, len(reward)) - } - if len(baseFee) != expBaseFee { - t.Fatalf("Test case %d: baseFee array length mismatch, want %d, got %d", i, expBaseFee, len(baseFee)) - } - if len(ratio) != c.expCount { - t.Fatalf("Test case %d: gasUsedRatio array length mismatch, want %d, got %d", i, c.expCount, len(ratio)) - } - if err != c.expErr && !errors.Is(err, c.expErr) { - t.Fatalf("Test case %d: error mismatch, want %v, got %v", i, c.expErr, err) - } + cache := jsonrpc.NewGasPriceCache() + oracle := gasprice.NewOracle(jsonrpc.NewGasPriceOracleBackend(tx, baseApi), config, cache, log.New()) + + first, reward, baseFee, ratio, err := oracle.FeeHistory(context.Background(), c.count, c.last, c.percent) + + expReward := c.expCount + if len(c.percent) == 0 { + expReward = 0 + } + expBaseFee := c.expCount + if expBaseFee != 0 { + expBaseFee++ + } + + if first.Uint64() != c.expFirst { + t.Fatalf("Test case %d: first block mismatch, want %d, got %d", i, c.expFirst, first) + } + if len(reward) != expReward { + t.Fatalf("Test case %d: reward array length mismatch, want %d, got %d", i, expReward, len(reward)) + } + if len(baseFee) != expBaseFee { + t.Fatalf("Test case %d: baseFee array length mismatch, want %d, got %d", i, expBaseFee, len(baseFee)) + } + if len(ratio) != c.expCount { + t.Fatalf("Test case %d: gasUsedRatio array length mismatch, want %d, got %d", i, c.expCount, len(ratio)) + } + if err != c.expErr && !errors.Is(err, c.expErr) { + t.Fatalf("Test case %d: error mismatch, want %v, got %v", i, c.expErr, err) + } + }() } } diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index bce78a2e25d..15e4614f48c 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -27,78 +27,24 @@ import ( "github.com/holiman/uint256" - "github.com/erigontech/erigon-lib/chain" + "github.com/erigontech/erigon-lib/kv/kvcache" + "github.com/erigontech/erigon/rpc/rpccfg" + libcommon "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/eth/gasprice/gaspricecfg" "github.com/erigontech/erigon/turbo/jsonrpc" - "github.com/erigontech/erigon/turbo/services" "github.com/erigontech/erigon/turbo/stages/mock" "github.com/erigontech/erigon/core" - "github.com/erigontech/erigon/core/rawdb" "github.com/erigontech/erigon/core/types" "github.com/erigontech/erigon/crypto" "github.com/erigontech/erigon/eth/gasprice" "github.com/erigontech/erigon/params" - "github.com/erigontech/erigon/rpc" ) -type testBackend struct { - db kv.RwDB - cfg *chain.Config - blockReader services.FullBlockReader -} - -func (b *testBackend) GetReceipts(ctx context.Context, block *types.Block) (types.Receipts, error) { - tx, err := b.db.BeginRo(context.Background()) - if err != nil { - return nil, err - } - defer tx.Rollback() - - receipts := rawdb.ReadReceipts(tx, block, nil) - return receipts, nil -} - -func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - return nil, nil - //if b.pending { - // block := b.chain.GetBlockByNumber(testHead + 1) - // return block, b.chain.GetReceiptsByHash(block.Hash()) - //} -} -func (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - tx, err := b.db.BeginRo(context.Background()) - if err != nil { - return nil, err - } - defer tx.Rollback() - if number == rpc.LatestBlockNumber { - return rawdb.ReadCurrentHeader(tx), nil - } - return b.blockReader.HeaderByNumber(ctx, tx, uint64(number)) -} - -func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { - tx, err := b.db.BeginRo(context.Background()) - if err != nil { - return nil, err - } - defer tx.Rollback() +func newTestBackend(t *testing.T) *mock.MockSentry { - if number == rpc.LatestBlockNumber { - return b.blockReader.CurrentBlock(tx) - } - return b.blockReader.BlockByNumber(ctx, tx, uint64(number)) -} - -func (b *testBackend) ChainConfig() *chain.Config { - return b.cfg -} - -func newTestBackend(t *testing.T) *testBackend { var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key.PublicKey) @@ -126,27 +72,7 @@ func newTestBackend(t *testing.T) *testBackend { if err = m.InsertChain(chain); err != nil { t.Error(err) } - return &testBackend{db: m.DB, cfg: params.TestChainConfig, blockReader: m.BlockReader} -} - -func (b *testBackend) CurrentHeader() *types.Header { - tx, err := b.db.BeginRo(context.Background()) - if err != nil { - panic(err) - } - defer tx.Rollback() - return rawdb.ReadCurrentHeader(tx) -} - -func (b *testBackend) GetBlockByNumber(number uint64) *types.Block { - tx, err := b.db.BeginRo(context.Background()) - if err != nil { - panic(err) - } - defer tx.Rollback() - - block, _ := b.blockReader.BlockByNumber(context.Background(), tx, number) - return block + return m } func TestSuggestPrice(t *testing.T) { @@ -155,9 +81,15 @@ func TestSuggestPrice(t *testing.T) { Percentile: 60, Default: big.NewInt(params.GWei), } - backend := newTestBackend(t) + + m := newTestBackend(t) //, big.NewInt(16), c.pending) + baseApi := jsonrpc.NewBaseApi(nil, kvcache.NewDummy(), m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil) + + tx, _ := m.DB.BeginRo(m.Ctx) + defer tx.Rollback() + cache := jsonrpc.NewGasPriceCache() - oracle := gasprice.NewOracle(backend, config, cache, log.New()) + oracle := gasprice.NewOracle(jsonrpc.NewGasPriceOracleBackend(tx, baseApi), config, cache, log.New()) // The gas price sampled is: 32G, 31G, 30G, 29G, 28G, 27G got, err := oracle.SuggestTipCap(context.Background()) diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index a096e4c33ea..63c1d4a5ed1 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -20,6 +20,7 @@ package eth_test import ( + "github.com/erigontech/erigon/turbo/jsonrpc/receipts" "math/big" "testing" @@ -87,7 +88,7 @@ func TestGetBlockReceipts(t *testing.T) { } // Assemble the test environment m := mockWithGenerator(t, 4, generator) - + receiptsGetter := receipts.NewGenerator(32, m.BlockReader, m.Engine) // Collect the hashes to request, and the response to expect var ( hashes []libcommon.Hash @@ -100,13 +101,16 @@ func TestGetBlockReceipts(t *testing.T) { hashes = append(hashes, block.Hash()) // If known, encode and queue for response packet - r := rawdb.ReadReceipts(tx, block, nil) + + r, err := receiptsGetter.GetReceipts(m.Ctx, m.ChainConfig, tx, block) + require.NoError(t, err) encoded, err := rlp.EncodeToBytes(r) require.NoError(t, err) receipts = append(receipts, encoded) } return nil }) + require.NoError(t, err) b, err := rlp.EncodeToBytes(eth.GetReceiptsPacket66{RequestId: 1, GetReceiptsPacket: hashes}) require.NoError(t, err) @@ -121,14 +125,10 @@ func TestGetBlockReceipts(t *testing.T) { expect, err := rlp.EncodeToBytes(eth.ReceiptsRLPPacket66{RequestId: 1, ReceiptsRLPPacket: receipts}) require.NoError(t, err) - if m.HistoryV3 { - // GetReceiptsMsg disabled for historyV3 - } else { - m.ReceiveWg.Wait() - sent := m.SentMessage(0) - require.Equal(t, eth.ToProto[m.SentryClient.Protocol()][eth.ReceiptsMsg], sent.Id) - require.Equal(t, expect, sent.Data) - } + m.ReceiveWg.Wait() + sent := m.SentMessage(0) + require.Equal(t, eth.ToProto[m.SentryClient.Protocol()][eth.ReceiptsMsg], sent.Id) + require.Equal(t, expect, sent.Data) } // newTestBackend creates a chain with a number of explicitly defined blocks and diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index 01aa6d71a7f..f089053a70a 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -22,6 +22,7 @@ package eth import ( "context" "fmt" + "github.com/erigontech/erigon-lib/chain" libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/kv" @@ -160,7 +161,7 @@ func AnswerGetBlockBodiesQuery(db kv.Tx, query GetBlockBodiesPacket, blockReader } type ReceiptsGetter interface { - GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Tx, block *types.Block, senders []libcommon.Address) (types.Receipts, error) + GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Tx, block *types.Block) (types.Receipts, error) } func AnswerGetReceiptsQuery(ctx context.Context, cfg *chain.Config, receiptsGetter ReceiptsGetter, br services.FullBlockReader, db kv.Tx, query GetReceiptsPacket) ([]rlp.RawValue, error) { //nolint:unparam @@ -180,7 +181,7 @@ func AnswerGetReceiptsQuery(ctx context.Context, cfg *chain.Config, receiptsGett return nil, nil } // Retrieve the requested block's receipts - b, s, err := br.BlockWithSenders(context.Background(), db, hash, *number) + b, _, err := br.BlockWithSenders(context.Background(), db, hash, *number) if err != nil { return nil, err } @@ -188,7 +189,7 @@ func AnswerGetReceiptsQuery(ctx context.Context, cfg *chain.Config, receiptsGett return nil, nil } - results, err := receiptsGetter.GetReceipts(ctx, cfg, db, b, s) + results, err := receiptsGetter.GetReceipts(ctx, cfg, db, b) if err != nil { return nil, err } diff --git a/eth/stagedsync/README.md b/eth/stagedsync/README.md index 140e3e2923e..9918fba09f1 100644 --- a/eth/stagedsync/README.md +++ b/eth/stagedsync/README.md @@ -45,7 +45,7 @@ state.unwindOrder = []*Stage{ } ``` -## Preprocessing with [ETL](https://github.com/erigontech/erigon-lib/tree/main/etl) +## Preprocessing with [ETL](https://github.com/erigontech/erigon/tree/main/erigon-lib/etl) Some stages use our ETL framework to sort data by keys before inserting it into the database. diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index 9725e8182d6..ac0559e334c 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -180,7 +180,7 @@ func ExecV3(ctx context.Context, batchSize := cfg.batchSize chainDb := cfg.db blockReader := cfg.blockReader - agg, engine := cfg.agg, cfg.engine + engine := cfg.engine chainConfig, genesis := cfg.chainConfig, cfg.genesis applyTx := txc.Tx @@ -198,6 +198,7 @@ func ExecV3(ctx context.Context, } } + agg := cfg.db.(state2.HasAgg).Agg().(*state2.Aggregator) if initialCycle { agg.SetCollateAndBuildWorkers(min(2, estimate.StateV3Collate.Workers())) agg.SetCompressWorkers(estimate.CompressSnapshot.Workers()) @@ -246,7 +247,6 @@ func ExecV3(ctx context.Context, } return lastTxNum == inputTxNum, nil } - // Cases: // 1. Snapshots > ExecutionStage: snapshots can have half-block data `10.4`. Get right txNum from SharedDomains (after SeekCommitment) // 2. ExecutionStage > Snapshots: no half-block data possible. Rely on DB. @@ -284,8 +284,8 @@ func ExecV3(ctx context.Context, inputTxNum = _min outputTxNum.Store(inputTxNum) - //_max, _ := rawdbv3.TxNums.Max(applyTx, _blockNum) - //log.Info(fmt.Sprintf("[commitment] found domain.txn %d, inputTxn %d, offset %d. DB found block %d {%d, %d}\n", doms.TxNum(), inputTxNum, offsetFromBlockBeginning, _blockNum, _min, _max)) + //_max, _ := rawdbv3.TxNums.Max(applyTx, blockNum) + //fmt.Printf("[commitment] found domain.txn %d, inputTxn %d, offset %d. DB found block %d {%d, %d}\n", doms.TxNum(), inputTxNum, offsetFromBlockBeginning, blockNum, _min, _max) doms.SetBlockNum(_blockNum) doms.SetTxNum(inputTxNum) return nil @@ -450,6 +450,7 @@ func ExecV3(ctx context.Context, case <-logEvery.C: stepsInDB := rawdbhelpers.IdxStepsCountV3(tx) + progress.Log(rs, in, rws, rs.DoneCount(), 0, inputBlockNum.Load(), outputBlockNum.GetValueUint64(), outputTxNum.Load(), execRepeats.GetValueUint64(), stepsInDB, shouldGenerateChangesets) if agg.HasBackgroundFilesBuild() { logger.Info(fmt.Sprintf("[%s] Background files build", execStage.LogPrefix()), "progress", agg.BackgroundProgress()) @@ -760,6 +761,10 @@ Loop: txTask.SystemTxIndex = systemTxIndex } } + if cfg.genesis != nil { + txTask.Config = cfg.genesis.Config + } + if txTask.TxNum <= txNumInDB && txTask.TxNum > 0 { inputTxNum++ skipPostEvaluation = true @@ -907,7 +912,7 @@ Loop: //} // If we skip post evaluation, then we should compute root hash ASAP for fail-fast aggregatorRo := applyTx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx) - if !skipPostEvaluation && (rs.SizeEstimate() < commitThreshold || inMemExec) && !aggregatorRo.CanPrune(applyTx, outputTxNum.Load()) { + if (!skipPostEvaluation && rs.SizeEstimate() < commitThreshold && !aggregatorRo.CanPrune(applyTx, outputTxNum.Load())) || inMemExec { break } var ( @@ -1116,7 +1121,7 @@ func flushAndCheckCommitmentV3(ctx context.Context, header *types.Header, applyT } logger.Error(fmt.Sprintf("[%s] Wrong trie root of block %d: %x, expected (from header): %x. Block hash: %x", e.LogPrefix(), header.Number.Uint64(), rh, header.Root.Bytes(), header.Hash())) if cfg.badBlockHalt { - return false, fmt.Errorf("wrong trie root") + return false, errors.New("wrong trie root") } if cfg.hd != nil { cfg.hd.ReportBadHeaderPoS(header.Hash(), header.ParentHash) @@ -1347,6 +1352,7 @@ func reconstituteStep(last bool, reconWorkers[i] = exec3.NewReconWorker(lock.RLocker(), reconstWorkersCtx, rs, localAs, blockReader, chainConfig, logger, genesis, engine, chainTxs[i]) reconWorkers[i].SetTx(roTxs[i]) reconWorkers[i].SetChainTx(chainTxs[i]) + reconWorkers[i].SetDirs(dirs) } rollbackCount := uint64(0) @@ -1547,11 +1553,11 @@ func reconstituteStep(last bool, return err } - plainStateCollector := etl.NewCollector(fmt.Sprintf("%s recon plainState", s.LogPrefix()), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) + plainStateCollector := etl.NewCollector(s.LogPrefix()+" recon plainState", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) defer plainStateCollector.Close() - codeCollector := etl.NewCollector(fmt.Sprintf("%s recon code", s.LogPrefix()), dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) + codeCollector := etl.NewCollector(s.LogPrefix()+" recon code", dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) defer codeCollector.Close() - plainContractCollector := etl.NewCollector(fmt.Sprintf("%s recon plainContract", s.LogPrefix()), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) + plainContractCollector := etl.NewCollector(s.LogPrefix()+" recon plainContract", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) defer plainContractCollector.Close() var transposedKey []byte @@ -1790,7 +1796,7 @@ func ReconstituteState(ctx context.Context, s *StageState, dirs datadir.Dirs, wo return fmt.Errorf("blockNum for mininmaxTxNum=%d not found. See lastBlockNum=%d,lastTxNum=%d", toTxNum, lastBn, lastTn) } if blockNum == 0 { - return fmt.Errorf("not enough transactions in the history data") + return errors.New("not enough transactions in the history data") } blockNum-- txNum, err = rawdbv3.TxNums.Max(tx, blockNum) @@ -1830,11 +1836,11 @@ func ReconstituteState(ctx context.Context, s *StageState, dirs datadir.Dirs, wo } } db.Close() - plainStateCollector := etl.NewCollector(fmt.Sprintf("%s recon plainState", s.LogPrefix()), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) + plainStateCollector := etl.NewCollector(s.LogPrefix()+" recon plainState", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) defer plainStateCollector.Close() - codeCollector := etl.NewCollector(fmt.Sprintf("%s recon code", s.LogPrefix()), dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) + codeCollector := etl.NewCollector(s.LogPrefix()+" recon code", dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) defer codeCollector.Close() - plainContractCollector := etl.NewCollector(fmt.Sprintf("%s recon plainContract", s.LogPrefix()), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) + plainContractCollector := etl.NewCollector(s.LogPrefix()+" recon plainContract", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) defer plainContractCollector.Close() fillWorker := exec3.NewFillWorker(txNum, aggSteps[len(aggSteps)-1]) diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 81fb7601ced..8c05f7bdc78 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -54,7 +54,6 @@ type BodiesCfg struct { chanConfig chain.Config blockReader services.FullBlockReader blockWriter *blockio.BlockWriter - loopBreakCheck func(int) bool } func StageBodiesCfg(db kv.RwDB, blobStore services.BlobStorage, bd *bodydownload.BodyDownload, @@ -63,11 +62,11 @@ func StageBodiesCfg(db kv.RwDB, blobStore services.BlobStorage, bd *bodydownload chanConfig chain.Config, blockReader services.FullBlockReader, blockWriter *blockio.BlockWriter, - loopBreakCheck func(int) bool) BodiesCfg { +) BodiesCfg { return BodiesCfg{ db: db, bd: bd, blobStore: blobStore, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, blockReader: blockReader, - blockWriter: blockWriter, loopBreakCheck: loopBreakCheck} + blockWriter: blockWriter} } // BodiesForward progresses Bodies stage in the forward direction @@ -290,10 +289,6 @@ func BodiesForward(s *StageState, u Unwinder, ctx context.Context, tx kv.RwTx, c } cfg.bd.AdvanceLow() } - - if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(i)) { - return true, nil - } } d5 += time.Since(start) diff --git a/eth/stagedsync/stage_bodies_test.go b/eth/stagedsync/stage_bodies_test.go index 710ceeab52f..36b88f8cf14 100644 --- a/eth/stagedsync/stage_bodies_test.go +++ b/eth/stagedsync/stage_bodies_test.go @@ -25,7 +25,6 @@ import ( libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/u256" - "github.com/erigontech/erigon-lib/config3" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/rawdbv3" "github.com/erigontech/erigon-lib/log/v3" @@ -91,11 +90,9 @@ func TestBodiesCanonical(t *testing.T) { var e1 rawdbv3.ErrTxNumsAppendWithGap require.True(errors.As(err, &e1)) - if config3.EnableHistoryV4InTest { - // this should see same error inside then retry from last block available, therefore return no error - err = bw.MakeBodiesCanonical(tx, 5) - require.NoError(err) - } + // this should see same error inside then retry from last block available, therefore return no error + err = bw.MakeBodiesCanonical(tx, 5) + require.NoError(err) } func TestBodiesUnwind(t *testing.T) { @@ -131,12 +128,10 @@ func TestBodiesUnwind(t *testing.T) { require.NoError(err) require.Equal(2+10*(3+2), int(n)) // genesis 2 system txs + from 1, 10 block with 3 txn in each - if m.HistoryV3 { - lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx) - require.NoError(err) - require.Equal(10, int(lastBlockNum)) - require.Equal(1+10*(3+2), int(lastTxNum)) - } + lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx) + require.NoError(err) + require.Equal(10, int(lastBlockNum)) + require.Equal(1+10*(3+2), int(lastTxNum)) err = bw.MakeBodiesNonCanonical(tx, 5+1) // block 5 already canonical, start from next one require.NoError(err) @@ -145,12 +140,10 @@ func TestBodiesUnwind(t *testing.T) { require.NoError(err) require.Equal(2+10*(3+2), int(n)) // genesis 2 system txs + from 1, 5 block with 3 txn in each - if m.HistoryV3 { - lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx) - require.NoError(err) - require.Equal(5, int(lastBlockNum)) - require.Equal(1+5*(3+2), int(lastTxNum)) - } + lastBlockNum, lastTxNum, err = rawdbv3.TxNums.Last(tx) + require.NoError(err) + require.Equal(5, int(lastBlockNum)) + require.Equal(1+5*(3+2), int(lastTxNum)) } { _, err = rawdb.WriteRawBodyIfNotExists(tx, libcommon.Hash{11}, 11, b) @@ -168,12 +161,10 @@ func TestBodiesUnwind(t *testing.T) { require.NoError(err) require.Equal(2+11*(3+2), int(n)) - if m.HistoryV3 { - lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx) - require.NoError(err) - require.Equal(11, int(lastBlockNum)) - require.Equal(1+11*(3+2), int(lastTxNum)) - } + lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx) + require.NoError(err) + require.Equal(11, int(lastBlockNum)) + require.Equal(1+11*(3+2), int(lastTxNum)) } { @@ -185,12 +176,10 @@ func TestBodiesUnwind(t *testing.T) { require.NoError(err) require.Equal(2+11*(3+2), int(n)) // from 0, 5 block with 3 txn in each - if m.HistoryV3 { - lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx) - require.NoError(err) - require.Equal(5, int(lastBlockNum)) - require.Equal(1+5*(3+2), int(lastTxNum)) - } + lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx) + require.NoError(err) + require.Equal(5, int(lastBlockNum)) + require.Equal(1+5*(3+2), int(lastTxNum)) err = bw.MakeBodiesCanonical(tx, 5+1) // block 5 already canonical, start from next one require.NoError(err) @@ -198,11 +187,9 @@ func TestBodiesUnwind(t *testing.T) { require.NoError(err) require.Equal(2+11*(3+2), int(n)) - if m.HistoryV3 { - lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx) - require.NoError(err) - require.Equal(11, int(lastBlockNum)) - require.Equal(1+11*(3+2), int(lastTxNum)) - } + lastBlockNum, lastTxNum, err = rawdbv3.TxNums.Last(tx) + require.NoError(err) + require.Equal(11, int(lastBlockNum)) + require.Equal(1+11*(3+2), int(lastTxNum)) } } diff --git a/eth/stagedsync/stage_bor_heimdall.go b/eth/stagedsync/stage_bor_heimdall.go index f5e814c1052..79e44d0734c 100644 --- a/eth/stagedsync/stage_bor_heimdall.go +++ b/eth/stagedsync/stage_bor_heimdall.go @@ -30,14 +30,13 @@ import ( lru "github.com/hashicorp/golang-lru/arc/v2" "golang.org/x/sync/errgroup" - "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon-lib/chain" - "github.com/erigontech/erigon-lib/chain/networkname" libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/dbg" "github.com/erigontech/erigon-lib/kv" + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/accounts/abi" - "github.com/erigontech/erigon/common/math" "github.com/erigontech/erigon/consensus" "github.com/erigontech/erigon/core/types" "github.com/erigontech/erigon/dataflow" @@ -71,7 +70,6 @@ type BorHeimdallCfg struct { hd *headerdownload.HeaderDownload penalize func(context.Context, []headerdownload.PenaltyItem) stateReceiverABI abi.ABI - loopBreakCheck func(int) bool recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot] signatures *lru.ARCCache[libcommon.Hash, libcommon.Address] recordWaypoints bool @@ -87,7 +85,6 @@ func StageBorHeimdallCfg( blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload, penalize func(context.Context, []headerdownload.PenaltyItem), - loopBreakCheck func(int) bool, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], signatures *lru.ARCCache[libcommon.Hash, libcommon.Address], recordWaypoints bool, @@ -109,7 +106,6 @@ func StageBorHeimdallCfg( hd: hd, penalize: penalize, stateReceiverABI: bor.GenesisContractStateReceiverABI(), - loopBreakCheck: loopBreakCheck, recents: recents, signatures: signatures, recordWaypoints: recordWaypoints, @@ -117,8 +113,6 @@ func StageBorHeimdallCfg( } } -var lastMumbaiEventRecord *heimdall.EventRecordWithTime - func BorHeimdallForward( s *StageState, u Unwinder, @@ -269,6 +263,7 @@ func BorHeimdallForward( return err } if header == nil { + _, _ = cfg.blockReader.HeaderByNumber(dbg.ContextWithDebug(ctx, true), tx, blockNum) return fmt.Errorf("header not found: %d", blockNum) } @@ -364,15 +359,6 @@ func BorHeimdallForward( var endStateSyncEventId uint64 - // mumbai event records have stopped being produced as of march 2024 - // as part of the goerli decom - so there is no point trying to - // fetch them - if cfg.chainConfig.ChainName == networkname.MumbaiChainName { - if nextEventRecord == nil { - nextEventRecord = lastMumbaiEventRecord - } - } - if nextEventRecord == nil || header.Time > uint64(nextEventRecord.Time.Unix()) { var records int @@ -408,16 +394,6 @@ func BorHeimdallForward( if !errors.Is(err, heimdall.ErrEventRecordNotFound) { return err } - - if cfg.chainConfig.ChainName == networkname.MumbaiChainName && lastStateSyncEventID == 276850 { - lastMumbaiEventRecord = &heimdall.EventRecordWithTime{ - EventRecord: heimdall.EventRecord{ - ID: 276851, - }, - Time: time.Unix(math.MaxInt64, 0), - } - } - endStateSyncEventId = lastStateSyncEventID } } @@ -427,10 +403,6 @@ func BorHeimdallForward( fetchTime += callTime syncEventTime = syncEventTime + time.Since(syncEventStart) - if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(blockNum-lastBlockNum)) { - headNumber = blockNum - break - } } if err = s.Update(tx, headNumber); err != nil { @@ -702,7 +674,7 @@ func initValidatorSets( } if zeroSpanBytes == nil { - return nil, fmt.Errorf("zero span not found") + return nil, errors.New("zero span not found") } var zeroSpan heimdall.Span diff --git a/eth/stagedsync/stage_call_traces.go b/eth/stagedsync/stage_call_traces.go deleted file mode 100644 index f145696011b..00000000000 --- a/eth/stagedsync/stage_call_traces.go +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package stagedsync - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/hex" - "fmt" - "runtime" - "time" - - "github.com/RoaringBitmap/roaring/roaring64" - "github.com/c2h5oh/datasize" - - libcommon "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/common/dbg" - "github.com/erigontech/erigon-lib/common/hexutility" - "github.com/erigontech/erigon-lib/common/length" - "github.com/erigontech/erigon-lib/etl" - "github.com/erigontech/erigon-lib/kv" - "github.com/erigontech/erigon-lib/kv/bitmapdb" - "github.com/erigontech/erigon-lib/log/v3" - - "github.com/erigontech/erigon/common/math" - "github.com/erigontech/erigon/ethdb/prune" - "github.com/erigontech/erigon/params" -) - -type CallTracesCfg struct { - db kv.RwDB - prune prune.Mode - ToBlock uint64 // not setting this params means no limit - tmpdir string -} - -func StageCallTracesCfg( - db kv.RwDB, - prune prune.Mode, - toBlock uint64, - tmpdir string, -) CallTracesCfg { - return CallTracesCfg{ - db: db, - prune: prune, - ToBlock: toBlock, - tmpdir: tmpdir, - } -} - -func SpawnCallTraces(s *StageState, tx kv.RwTx, cfg CallTracesCfg, ctx context.Context, logger log.Logger) error { - useExternalTx := tx != nil - if !useExternalTx { - var err error - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - quit := ctx.Done() - endBlock, err := s.ExecutionAt(tx) - if cfg.ToBlock > 0 && cfg.ToBlock < endBlock { - endBlock = cfg.ToBlock - } - logPrefix := s.LogPrefix() - if err != nil { - return fmt.Errorf("getting last executed block: %w", err) - } - if endBlock == s.BlockNumber { - return nil - } - - if err := promoteCallTraces(logPrefix, tx, s.BlockNumber+1, endBlock, bitmapsBufLimit, bitmapsFlushEvery, quit, cfg.tmpdir, logger); err != nil { - return err - } - - if err := s.Update(tx, endBlock); err != nil { - return err - } - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - - return nil -} - -func promoteCallTraces(logPrefix string, tx kv.RwTx, startBlock, endBlock uint64, bufLimit datasize.ByteSize, flushEvery time.Duration, quit <-chan struct{}, tmpdir string, logger log.Logger) error { - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - - froms := map[string]*roaring64.Bitmap{} - tos := map[string]*roaring64.Bitmap{} - collectorFrom := etl.NewCollector(logPrefix, tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) - defer collectorFrom.Close() - collectorTo := etl.NewCollector(logPrefix, tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) - defer collectorTo.Close() - checkFlushEvery := time.NewTicker(flushEvery) - defer checkFlushEvery.Stop() - - traceCursor, err := tx.RwCursorDupSort(kv.CallTraceSet) - if err != nil { - return fmt.Errorf("failed to create cursor: %w", err) - } - defer traceCursor.Close() - - var k, v []byte - prev := startBlock - for k, v, err = traceCursor.Seek(hexutility.EncodeTs(startBlock)); k != nil; k, v, err = traceCursor.Next() { - if err != nil { - return err - } - blockNum := binary.BigEndian.Uint64(k) - if blockNum > endBlock { - break - } - if len(v) != length.Addr+1 { - return fmt.Errorf(" wrong size of value in CallTraceSet: %x (size %d)", v, len(v)) - } - mapKey := string(v[:length.Addr]) - if v[length.Addr]&1 > 0 { - m, ok := froms[mapKey] - if !ok { - m = roaring64.New() - froms[mapKey] = m - } - m.Add(blockNum) - } - if v[length.Addr]&2 > 0 { - m, ok := tos[mapKey] - if !ok { - m = roaring64.New() - tos[mapKey] = m - } - m.Add(blockNum) - } - select { - default: - case <-logEvery.C: - var m runtime.MemStats - dbg.ReadMemStats(&m) - speed := float64(blockNum-prev) / float64(logInterval/time.Second) - prev = blockNum - - logger.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockNum, - "blk/second", speed, - "alloc", libcommon.ByteCount(m.Alloc), - "sys", libcommon.ByteCount(m.Sys)) - case <-checkFlushEvery.C: - if needFlush64(froms, bufLimit) { - if err := flushBitmaps64(collectorFrom, froms); err != nil { - return err - } - - froms = map[string]*roaring64.Bitmap{} - } - - if needFlush64(tos, bufLimit) { - if err := flushBitmaps64(collectorTo, tos); err != nil { - return err - } - - tos = map[string]*roaring64.Bitmap{} - } - } - } - if err = flushBitmaps64(collectorFrom, froms); err != nil { - return err - } - if err = flushBitmaps64(collectorTo, tos); err != nil { - return err - } - - // Clean up before loading call traces to reclaim space - var prunedMin uint64 = math.MaxUint64 - var prunedMax uint64 = 0 - for k, _, err = traceCursor.First(); k != nil; k, _, err = traceCursor.NextNoDup() { - if err != nil { - return err - } - blockNum := binary.BigEndian.Uint64(k) - if blockNum+params.FullImmutabilityThreshold >= endBlock { - break - } - select { - default: - case <-logEvery.C: - var m runtime.MemStats - dbg.ReadMemStats(&m) - logger.Info(fmt.Sprintf("[%s] Pruning call trace table", logPrefix), "number", blockNum, - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - } - if err = tx.Delete(kv.CallTraceSet, k); err != nil { - return fmt.Errorf("remove trace call set for block %d: %w", blockNum, err) - } - if blockNum < prunedMin { - prunedMin = blockNum - } - if blockNum > prunedMax { - prunedMax = blockNum - } - } - if prunedMax != 0 && prunedMax > prunedMin+16 { - logger.Info(fmt.Sprintf("[%s] Pruned call trace intermediate table", logPrefix), "from", prunedMin, "to", prunedMax) - } - - if err := finaliseCallTraces(collectorFrom, collectorTo, logPrefix, tx, quit); err != nil { - return err - } - - return nil -} - -func finaliseCallTraces(collectorFrom, collectorTo *etl.Collector, logPrefix string, tx kv.RwTx, quit <-chan struct{}) error { - var buf = bytes.NewBuffer(nil) - lastChunkKey := make([]byte, 128) - reader := bytes.NewReader(nil) - reader2 := bytes.NewReader(nil) - var loaderFunc = func(k []byte, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - reader.Reset(v) - currentBitmap := roaring64.New() - if _, err := currentBitmap.ReadFrom(reader); err != nil { - return err - } - lastChunkKey = lastChunkKey[:len(k)+8] - copy(lastChunkKey, k) - binary.BigEndian.PutUint64(lastChunkKey[len(k):], ^uint64(0)) - lastChunkBytes, err := table.Get(lastChunkKey) - if err != nil { - return fmt.Errorf("find last chunk failed: %w", err) - } - - if len(lastChunkBytes) > 0 { - lastChunk := roaring64.New() - reader2.Reset(lastChunkBytes) - _, err = lastChunk.ReadFrom(reader2) - if err != nil { - return fmt.Errorf("couldn't read last log index chunk: %w, len(lastChunkBytes)=%d", err, len(lastChunkBytes)) - } - currentBitmap.Or(lastChunk) // merge last existing chunk from db - next loop will overwrite it - } - if err := bitmapdb.WalkChunkWithKeys64(k, currentBitmap, bitmapdb.ChunkLimit, func(chunkKey []byte, chunk *roaring64.Bitmap) error { - buf.Reset() - if _, err := chunk.WriteTo(buf); err != nil { - return err - } - return next(k, chunkKey, buf.Bytes()) - }); err != nil { - return err - } - return nil - } - if err := collectorFrom.Load(tx, kv.CallFromIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { - return err - } - if err := collectorTo.Load(tx, kv.CallToIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { - return err - } - return nil -} - -func UnwindCallTraces(u *UnwindState, s *StageState, tx kv.RwTx, cfg CallTracesCfg, ctx context.Context, logger log.Logger) (err error) { - if s.BlockNumber <= u.UnwindPoint { - return nil - } - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - logPrefix := u.LogPrefix() - if s.BlockNumber-u.UnwindPoint > 16 { - logger.Info(fmt.Sprintf("[%s] Unwind", logPrefix), "from", s.BlockNumber, "to", u.UnwindPoint) - } - if err := DoUnwindCallTraces(logPrefix, tx, s.BlockNumber, u.UnwindPoint, ctx, cfg.tmpdir, logger); err != nil { - return err - } - - if err := u.Done(tx); err != nil { - return err - } - - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - - return nil -} - -func DoUnwindCallTraces(logPrefix string, db kv.RwTx, from, to uint64, ctx context.Context, tmpdir string, logger log.Logger) error { - froms := etl.NewCollector(logPrefix, tmpdir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) - defer froms.Close() - tos := etl.NewCollector(logPrefix, tmpdir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) - defer tos.Close() - - logEvery := time.NewTicker(30 * time.Second) - defer logEvery.Stop() - - traceCursor, err := db.RwCursorDupSort(kv.CallTraceSet) - if err != nil { - return fmt.Errorf("create cursor for call traces: %w", err) - } - defer traceCursor.Close() - - var k, v []byte - prev := to + 1 - for k, v, err = traceCursor.Seek(hexutility.EncodeTs(to + 1)); k != nil; k, v, err = traceCursor.Next() { - if err != nil { - return err - } - blockNum := binary.BigEndian.Uint64(k) - if blockNum >= from { - break - } - if len(v) != length.Addr+1 { - return fmt.Errorf("wrong size of value in CallTraceSet: %x (size %d)", v, len(v)) - } - mapKey := v[:length.Addr] - if v[length.Addr]&1 > 0 { - if err = froms.Collect(mapKey, nil); err != nil { - return err - } - } - if v[length.Addr]&2 > 0 { - if err = tos.Collect(mapKey, nil); err != nil { - return err - } - } - select { - case <-logEvery.C: - var m runtime.MemStats - dbg.ReadMemStats(&m) - speed := float64(blockNum-prev) / float64(logInterval/time.Second) - prev = blockNum - - logger.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockNum, - "blk/second", speed, - "alloc", libcommon.ByteCount(m.Alloc), - "sys", libcommon.ByteCount(m.Sys)) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - } - - if err = froms.Load(db, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - return bitmapdb.TruncateRange64(db, kv.CallFromIndex, k, to+1) - }, etl.TransformArgs{}); err != nil { - return fmt.Errorf("TruncateRange: bucket=%s, %w", kv.CallFromIndex, err) - } - - if err = tos.Load(db, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - return bitmapdb.TruncateRange64(db, kv.CallToIndex, k, to+1) - }, etl.TransformArgs{}); err != nil { - return fmt.Errorf("TruncateRange: bucket=%s, %w", kv.CallFromIndex, err) - } - return nil -} - -func PruneCallTraces(s *PruneState, tx kv.RwTx, cfg CallTracesCfg, ctx context.Context, logger log.Logger) (err error) { - logPrefix := s.LogPrefix() - - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - if cfg.prune.History.Enabled() { - if err = pruneCallTraces(tx, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), ctx, cfg.tmpdir, logger); err != nil { - return err - } - } - if err := s.Done(tx); err != nil { - return err - } - - if !useExternalTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil -} - -func pruneCallTraces(tx kv.RwTx, logPrefix string, pruneTo uint64, ctx context.Context, tmpdir string, logger log.Logger) error { - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - - froms := etl.NewCollector(logPrefix, tmpdir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) - defer froms.Close() - tos := etl.NewCollector(logPrefix, tmpdir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) - defer tos.Close() - - { - traceCursor, err := tx.CursorDupSort(kv.CallTraceSet) - if err != nil { - return fmt.Errorf("create cursor for call traces: %w", err) - } - defer traceCursor.Close() - - var k, v []byte - for k, v, err = traceCursor.First(); k != nil; k, v, err = traceCursor.Next() { - if err != nil { - return err - } - blockNum := binary.BigEndian.Uint64(k) - if blockNum >= pruneTo { - break - } - if len(v) != length.Addr+1 { - return fmt.Errorf("wrong size of value in CallTraceSet: %x (size %d)", v, len(v)) - } - mapKey := v[:length.Addr] - if v[length.Addr]&1 > 0 { - if err := froms.Collect(mapKey, nil); err != nil { - return err - } - } - if v[length.Addr]&2 > 0 { - if err := tos.Collect(mapKey, nil); err != nil { - return err - } - } - select { - case <-logEvery.C: - var m runtime.MemStats - dbg.ReadMemStats(&m) - logger.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockNum, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - } - } - - { - c, err := tx.RwCursor(kv.CallFromIndex) - if err != nil { - return err - } - defer c.Close() - - if err := froms.Load(tx, "", func(from, _ []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { - for k, _, err := c.Seek(from); k != nil; k, _, err = c.Next() { - if err != nil { - return err - } - blockNum := binary.BigEndian.Uint64(k[length.Addr:]) - if !bytes.HasPrefix(k, from) || blockNum >= pruneTo { - break - } - if err = c.DeleteCurrent(); err != nil { - return fmt.Errorf("failed delete, block=%d: %w", blockNum, err) - } - } - select { - case <-logEvery.C: - logger.Info(fmt.Sprintf("[%s]", logPrefix), "table", kv.CallFromIndex, "key", hex.EncodeToString(from)) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - return nil - }, etl.TransformArgs{}); err != nil { - return err - } - } - { - c, err := tx.RwCursor(kv.CallToIndex) - if err != nil { - return err - } - defer c.Close() - - if err := tos.Load(tx, "", func(to, _ []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { - for k, _, err := c.Seek(to); k != nil; k, _, err = c.Next() { - if err != nil { - return err - } - blockNum := binary.BigEndian.Uint64(k[length.Addr:]) - if !bytes.HasPrefix(k, to) || blockNum >= pruneTo { - break - } - if err = c.DeleteCurrent(); err != nil { - return fmt.Errorf("failed delete, block=%d: %w", blockNum, err) - } - } - select { - case <-logEvery.C: - logger.Info(fmt.Sprintf("[%s]", logPrefix), "table", kv.CallToIndex, "key", hex.EncodeToString(to)) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - return nil - }, etl.TransformArgs{}); err != nil { - return err - } - } - return nil -} diff --git a/eth/stagedsync/stage_call_traces_test.go b/eth/stagedsync/stage_call_traces_test.go deleted file mode 100644 index cbd8ca4cec0..00000000000 --- a/eth/stagedsync/stage_call_traces_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package stagedsync - -import ( - "context" - "testing" - "time" - - "github.com/RoaringBitmap/roaring/roaring64" - "github.com/stretchr/testify/require" - - "github.com/erigontech/erigon-lib/common/datadir" - "github.com/erigontech/erigon-lib/common/hexutility" - "github.com/erigontech/erigon-lib/kv" - "github.com/erigontech/erigon-lib/kv/bitmapdb" - "github.com/erigontech/erigon-lib/kv/temporal/temporaltest" - "github.com/erigontech/erigon-lib/log/v3" - - "github.com/erigontech/erigon/eth/stagedsync/stages" -) - -func genTestCallTraceSet(t *testing.T, tx kv.RwTx, to uint64) { - v := [21]byte{} - for i := uint64(0); i < to; i++ { - v[19] = byte(i % 5) - if i%2 == 0 { - v[20] = 1 - } - if i%2 == 1 { - v[20] = 2 - } - err := tx.Put(kv.CallTraceSet, hexutility.EncodeTs(i), v[:]) - require.NoError(t, err) - } -} - -func TestCallTrace(t *testing.T) { - t.Skip("this stage is disabled in E3") - - logger := log.New() - ctx, require := context.Background(), require.New(t) - db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) - tx, err := db.BeginRw(context.Background()) - require.NoError(err) - defer tx.Rollback() - - genTestCallTraceSet(t, tx, 30) - addr := [20]byte{} - addr[19] = byte(1) - froms := func() *roaring64.Bitmap { - b, err := bitmapdb.Get64(tx, kv.CallFromIndex, addr[:], 0, 30) - require.NoError(err) - return b - } - tos := func() *roaring64.Bitmap { - b, err := bitmapdb.Get64(tx, kv.CallToIndex, addr[:], 0, 30) - require.NoError(err) - return b - } - - err = stages.SaveStageProgress(tx, stages.Execution, 30) - require.NoError(err) - - // forward 0->20 - err = promoteCallTraces("test", tx, 0, 20, 0, time.Nanosecond, ctx.Done(), "", logger) - require.NoError(err) - require.Equal([]uint64{6, 16}, froms().ToArray()) - require.Equal([]uint64{1, 11}, tos().ToArray()) - - // unwind 20->10 - err = DoUnwindCallTraces("test", tx, 20, 10, ctx, "", logger) - require.NoError(err) - require.Equal([]uint64{6}, froms().ToArray()) - require.Equal([]uint64{1}, tos().ToArray()) - - // forward 10->30 - err = promoteCallTraces("test", tx, 10, 30, 0, time.Nanosecond, ctx.Done(), "", logger) - require.NoError(err) - require.Equal([]uint64{6, 16, 26}, froms().ToArray()) - require.Equal([]uint64{1, 11, 21}, tos().ToArray()) - - // prune 0 -> 10 - err = pruneCallTraces(tx, "test", 10, ctx, "", logger) - require.NoError(err) -} diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 1af71330fee..c5408f84d72 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -18,6 +18,7 @@ package stagedsync import ( "context" + "errors" "fmt" "time" @@ -80,7 +81,6 @@ type ExecuteBlockCfg struct { historyV3 bool syncCfg ethconfig.Sync genesis *types.Genesis - agg *libstate.Aggregator silkworm *silkworm.Silkworm blockProduction bool @@ -102,9 +102,12 @@ func StageExecuteBlocksCfg( hd headerDownloader, genesis *types.Genesis, syncCfg ethconfig.Sync, - agg *libstate.Aggregator, silkworm *silkworm.Silkworm, ) ExecuteBlockCfg { + if dirs.SnapDomain == "" { + panic("empty `dirs` variable") + } + return ExecuteBlockCfg{ db: db, prune: pm, @@ -121,7 +124,6 @@ func StageExecuteBlocksCfg( genesis: genesis, historyV3: true, syncCfg: syncCfg, - agg: agg, silkworm: silkworm, } } @@ -154,7 +156,7 @@ func ExecBlockV3(s *StageState, u Unwinder, txc wrap.TxContainer, toBlock uint64 return nil } -var ErrTooDeepUnwind = fmt.Errorf("too deep unwind") +var ErrTooDeepUnwind = errors.New("too deep unwind") func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, accumulator *shards.Accumulator, logger log.Logger) (err error) { var domains *libstate.SharedDomains @@ -387,7 +389,7 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - pruneTimeout := 3 * time.Second + pruneTimeout := 1 * time.Second if s.CurrentSyncCycle.IsInitialCycle { pruneTimeout = 12 * time.Hour } diff --git a/eth/stagedsync/stage_finish.go b/eth/stagedsync/stage_finish.go index f768598385a..c0f203e4b53 100644 --- a/eth/stagedsync/stage_finish.go +++ b/eth/stagedsync/stage_finish.go @@ -199,7 +199,7 @@ func NotifyNewHeaders(ctx context.Context, finishStageBeforeSync uint64, finishS notifier.OnLogs(logs) } logTiming := time.Since(t) - logger.Info("RPC Daemon notified of new headers", "from", notifyFrom-1, "to", notifyTo, "amount", len(headersRlp), "hash", notifyToHash, "header sending", headerTiming, "log sending", logTiming) + logger.Debug("RPC Daemon notified of new headers", "from", notifyFrom-1, "to", notifyTo, "amount", len(headersRlp), "hash", notifyToHash, "header sending", headerTiming, "log sending", logTiming) } return nil } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 2e1a0f1a1fd..62fbd641b4e 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -37,7 +37,6 @@ import ( "github.com/erigontech/erigon-lib/state" "github.com/erigontech/erigon/core/rawdb/blockio" "github.com/erigontech/erigon/eth/ethconfig" - "github.com/erigontech/erigon/eth/stagedsync/stages" "github.com/erigontech/erigon/common" "github.com/erigontech/erigon/core/rawdb" @@ -69,8 +68,7 @@ type HeadersCfg struct { blockWriter *blockio.BlockWriter notifications *shards.Notifications - syncConfig ethconfig.Sync - loopBreakCheck func(int) bool + syncConfig ethconfig.Sync } func StageHeadersCfg( @@ -88,7 +86,7 @@ func StageHeadersCfg( blockWriter *blockio.BlockWriter, tmpdir string, notifications *shards.Notifications, - loopBreakCheck func(int) bool) HeadersCfg { +) HeadersCfg { return HeadersCfg{ db: db, hd: headerDownload, @@ -104,7 +102,6 @@ func StageHeadersCfg( blockReader: blockReader, blockWriter: blockWriter, notifications: notifications, - loopBreakCheck: loopBreakCheck, } } @@ -118,7 +115,7 @@ func SpawnStageHeaders(s *StageState, u Unwinder, ctx context.Context, tx kv.RwT } defer tx.Rollback() } - if s.CurrentSyncCycle.IsInitialCycle && cfg.blockReader.FreezingCfg().Enabled { + if s.CurrentSyncCycle.IsInitialCycle { if err := cfg.hd.AddHeadersFromSnapshot(tx, cfg.blockReader); err != nil { return err } @@ -267,15 +264,8 @@ Loop: } } - if cfg.syncConfig.LoopBlockLimit > 0 { - if bodyProgress, err := stages.GetStageProgress(tx, stages.Bodies); err == nil { - if cfg.hd.Progress() > bodyProgress && cfg.hd.Progress()-bodyProgress > uint64(cfg.syncConfig.LoopBlockLimit*2) { - break - } - } - } - - if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(cfg.hd.Progress()-startProgress)) { + loopBlockLimit := uint64(cfg.syncConfig.LoopBlockLimit) + if loopBlockLimit > 0 && cfg.hd.Progress() > startProgress+loopBlockLimit { break } diff --git a/eth/stagedsync/stage_indexes.go b/eth/stagedsync/stage_indexes.go deleted file mode 100644 index 7170a18918b..00000000000 --- a/eth/stagedsync/stage_indexes.go +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package stagedsync - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "runtime" - "slices" - "time" - - "github.com/erigontech/erigon-lib/kv/dbutils" - - "github.com/RoaringBitmap/roaring/roaring64" - "github.com/c2h5oh/datasize" - - libcommon "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/common/dbg" - "github.com/erigontech/erigon-lib/common/hexutility" - "github.com/erigontech/erigon-lib/common/length" - "github.com/erigontech/erigon-lib/etl" - "github.com/erigontech/erigon-lib/kv" - "github.com/erigontech/erigon-lib/kv/bitmapdb" - "github.com/erigontech/erigon-lib/kv/temporal/historyv2" - "github.com/erigontech/erigon-lib/log/v3" - - "github.com/erigontech/erigon/common/changeset" - "github.com/erigontech/erigon/ethdb" - "github.com/erigontech/erigon/ethdb/prune" -) - -type HistoryCfg struct { - db kv.RwDB - bufLimit datasize.ByteSize - prune prune.Mode - flushEvery time.Duration - tmpdir string -} - -func StageHistoryCfg(db kv.RwDB, prune prune.Mode, tmpDir string) HistoryCfg { - return HistoryCfg{ - db: db, - prune: prune, - bufLimit: bitmapsBufLimit, - flushEvery: bitmapsFlushEvery, - tmpdir: tmpDir, - } -} - -func SpawnAccountHistoryIndex(s *StageState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context, logger log.Logger) error { - useExternalTx := tx != nil - if !useExternalTx { - var err error - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - quitCh := ctx.Done() - - endBlock, err := s.ExecutionAt(tx) - logPrefix := s.LogPrefix() - if err != nil { - return fmt.Errorf(" getting last executed block: %w", err) - } - if endBlock <= s.BlockNumber { - return nil - } - - var startBlock uint64 - if s.BlockNumber > 0 { - startBlock = s.BlockNumber + 1 - } - stopChangeSetsLookupAt := endBlock + 1 - - pruneTo := cfg.prune.History.PruneTo(endBlock) - if startBlock < pruneTo { - startBlock = pruneTo - } - - if err := promoteHistory(logPrefix, tx, kv.AccountChangeSet, startBlock, stopChangeSetsLookupAt, cfg, quitCh, logger); err != nil { - return err - } - - if err := s.Update(tx, endBlock); err != nil { - return err - } - - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - return nil -} - -func SpawnStorageHistoryIndex(s *StageState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context, logger log.Logger) error { - useExternalTx := tx != nil - if !useExternalTx { - var err error - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - quitCh := ctx.Done() - - executionAt, err := s.ExecutionAt(tx) - logPrefix := s.LogPrefix() - if err != nil { - return fmt.Errorf("getting last executed block: %w", err) - } - if executionAt <= s.BlockNumber { - return nil - } - - var startChangeSetsLookupAt uint64 - if s.BlockNumber > 0 { - startChangeSetsLookupAt = s.BlockNumber + 1 - } - stopChangeSetsLookupAt := executionAt + 1 - - if err := promoteHistory(logPrefix, tx, kv.StorageChangeSet, startChangeSetsLookupAt, stopChangeSetsLookupAt, cfg, quitCh, logger); err != nil { - return err - } - - if err := s.Update(tx, executionAt); err != nil { - return err - } - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - return nil -} - -func promoteHistory(logPrefix string, tx kv.RwTx, changesetBucket string, start, stop uint64, cfg HistoryCfg, quit <-chan struct{}, logger log.Logger) error { - logEvery := time.NewTicker(30 * time.Second) - defer logEvery.Stop() - - updates := map[string]*roaring64.Bitmap{} - checkFlushEvery := time.NewTicker(cfg.flushEvery) - defer checkFlushEvery.Stop() - - collectorUpdates := etl.NewCollector(logPrefix, cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) - defer collectorUpdates.Close() - - if err := changeset.ForRange(tx, changesetBucket, start, stop, func(blockN uint64, k, v []byte) error { - if err := libcommon.Stopped(quit); err != nil { - return err - } - - k = dbutils.CompositeKeyWithoutIncarnation(k) - - select { - default: - case <-logEvery.C: - var m runtime.MemStats - dbg.ReadMemStats(&m) - log.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockN, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - case <-checkFlushEvery.C: - if needFlush64(updates, cfg.bufLimit) { - if err := flushBitmaps64(collectorUpdates, updates); err != nil { - return err - } - updates = map[string]*roaring64.Bitmap{} - } - } - - m, ok := updates[string(k)] - if !ok { - m = roaring64.New() - updates[string(k)] = m - } - m.Add(blockN) - - return nil - }); err != nil { - return err - } - - if err := flushBitmaps64(collectorUpdates, updates); err != nil { - return err - } - - var currentBitmap = roaring64.New() - var buf = bytes.NewBuffer(nil) - - lastChunkKey := make([]byte, 128) - var loaderFunc = func(k []byte, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - if _, err := currentBitmap.ReadFrom(bytes.NewReader(v)); err != nil { - return err - } - - lastChunkKey = lastChunkKey[:len(k)+8] - copy(lastChunkKey, k) - binary.BigEndian.PutUint64(lastChunkKey[len(k):], ^uint64(0)) - lastChunkBytes, err := table.Get(lastChunkKey) - if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) { - return fmt.Errorf("find last chunk failed: %w", err) - } - if len(lastChunkBytes) > 0 { - lastChunk := roaring64.New() - _, err = lastChunk.ReadFrom(bytes.NewReader(lastChunkBytes)) - if err != nil { - return fmt.Errorf("couldn't read last log index chunk: %w, len(lastChunkBytes)=%d", err, len(lastChunkBytes)) - } - - currentBitmap.Or(lastChunk) // merge last existing chunk from tx - next loop will overwrite it - } - if err = bitmapdb.WalkChunkWithKeys64(k, currentBitmap, bitmapdb.ChunkLimit, func(chunkKey []byte, chunk *roaring64.Bitmap) error { - buf.Reset() - if _, err = chunk.WriteTo(buf); err != nil { - return err - } - return next(k, chunkKey, buf.Bytes()) - }); err != nil { - return err - } - currentBitmap.Clear() - return nil - } - - if err := collectorUpdates.Load(tx, historyv2.Mapper[changesetBucket].IndexBucket, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { - return err - } - return nil -} - -func UnwindAccountHistoryIndex(u *UnwindState, s *StageState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - quitCh := ctx.Done() - logPrefix := s.LogPrefix() - if err := unwindHistory(logPrefix, tx, kv.AccountChangeSet, u.UnwindPoint, cfg, quitCh); err != nil { - return err - } - - if err := u.Done(tx); err != nil { - return err - } - - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - return nil -} - -func UnwindStorageHistoryIndex(u *UnwindState, s *StageState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context) (err error) { - useExternalTx := tx != nil - if !useExternalTx { - var err error - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - quitCh := ctx.Done() - - logPrefix := s.LogPrefix() - if err := unwindHistory(logPrefix, tx, kv.StorageChangeSet, u.UnwindPoint, cfg, quitCh); err != nil { - return err - } - - if err := u.Done(tx); err != nil { - return err - } - - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - return nil -} - -func unwindHistory(logPrefix string, db kv.RwTx, csBucket string, to uint64, cfg HistoryCfg, quitCh <-chan struct{}) error { - logEvery := time.NewTicker(30 * time.Second) - defer logEvery.Stop() - - updates := map[string]struct{}{} - if err := historyv2.ForEach(db, csBucket, hexutility.EncodeTs(to), func(blockN uint64, k, v []byte) error { - select { - case <-logEvery.C: - var m runtime.MemStats - dbg.ReadMemStats(&m) - log.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockN, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - case <-quitCh: - return libcommon.ErrStopped - default: - } - k = dbutils.CompositeKeyWithoutIncarnation(k) - updates[string(k)] = struct{}{} - return nil - }); err != nil { - return err - } - - if err := truncateBitmaps64(db, historyv2.Mapper[csBucket].IndexBucket, updates, to); err != nil { - return err - } - return nil -} - -func needFlush64(bitmaps map[string]*roaring64.Bitmap, memLimit datasize.ByteSize) bool { - sz := uint64(0) - for _, m := range bitmaps { - sz += m.GetSizeInBytes() * 2 // for golang's overhead - } - const memoryNeedsForKey = 32 * 2 * 2 // len(key) * (string and bytes) overhead * go's map overhead - return uint64(len(bitmaps)*memoryNeedsForKey)+sz > uint64(memLimit) -} - -func flushBitmaps64(c *etl.Collector, inMem map[string]*roaring64.Bitmap) error { - for k, v := range inMem { - v.RunOptimize() - if v.GetCardinality() == 0 { - continue - } - newV := bytes.NewBuffer(make([]byte, 0, v.GetSerializedSizeInBytes())) - if _, err := v.WriteTo(newV); err != nil { - return err - } - if err := c.Collect([]byte(k), newV.Bytes()); err != nil { - return err - } - } - return nil -} - -func truncateBitmaps64(tx kv.RwTx, bucket string, inMem map[string]struct{}, to uint64) error { - keys := make([]string, 0, len(inMem)) - for k := range inMem { - keys = append(keys, k) - } - slices.Sort(keys) - for _, k := range keys { - if err := bitmapdb.TruncateRange64(tx, bucket, []byte(k), to+1); err != nil { - return fmt.Errorf("fail TruncateRange: bucket=%s, %w", bucket, err) - } - } - - return nil -} - -func PruneAccountHistoryIndex(s *PruneState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context, logger log.Logger) (err error) { - if !cfg.prune.History.Enabled() { - return nil - } - logPrefix := s.LogPrefix() - - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - pruneTo := cfg.prune.History.PruneTo(s.ForwardProgress) - if err = pruneHistoryIndex(tx, kv.AccountChangeSet, logPrefix, cfg.tmpdir, pruneTo, ctx, logger); err != nil { - return err - } - if err = s.Done(tx); err != nil { - return err - } - - if !useExternalTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil -} - -func PruneStorageHistoryIndex(s *PruneState, tx kv.RwTx, cfg HistoryCfg, ctx context.Context, logger log.Logger) (err error) { - if !cfg.prune.History.Enabled() { - return nil - } - logPrefix := s.LogPrefix() - - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - pruneTo := cfg.prune.History.PruneTo(s.ForwardProgress) - if err = pruneHistoryIndex(tx, kv.StorageChangeSet, logPrefix, cfg.tmpdir, pruneTo, ctx, logger); err != nil { - return err - } - if err = s.Done(tx); err != nil { - return err - } - - if !useExternalTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil -} - -func pruneHistoryIndex(tx kv.RwTx, csTable, logPrefix, tmpDir string, pruneTo uint64, ctx context.Context, logger log.Logger) error { - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - - collector := etl.NewCollector(logPrefix, tmpDir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) - defer collector.Close() - - if err := changeset.ForRange(tx, csTable, 0, pruneTo, func(blockNum uint64, k, _ []byte) error { - select { - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s]", logPrefix), "table", csTable, "block_num", blockNum) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - - return collector.Collect(k, nil) - }); err != nil { - return err - } - - c, err := tx.RwCursor(historyv2.Mapper[csTable].IndexBucket) - if err != nil { - return fmt.Errorf("failed to create cursor for pruning %w", err) - } - defer c.Close() - prefixLen := length.Addr - if csTable == kv.StorageChangeSet { - prefixLen = length.Hash - } - if err := collector.Load(tx, "", func(addr, _ []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - select { - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s]", logPrefix), "table", historyv2.Mapper[csTable].IndexBucket, "key", hex.EncodeToString(addr)) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - for k, _, err := c.Seek(addr); k != nil; k, _, err = c.Next() { - if err != nil { - return err - } - blockNum := binary.BigEndian.Uint64(k[prefixLen:]) - if !bytes.HasPrefix(k, addr) || blockNum >= pruneTo { - break - } - if err = c.DeleteCurrent(); err != nil { - return fmt.Errorf("failed to remove for block %d: %w", blockNum, err) - } - } - return nil - }, etl.TransformArgs{}); err != nil { - return err - } - - return nil -} diff --git a/eth/stagedsync/stage_indexes_test.go b/eth/stagedsync/stage_indexes_test.go deleted file mode 100644 index cb00afa2ccf..00000000000 --- a/eth/stagedsync/stage_indexes_test.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package stagedsync - -import ( - "context" - "encoding/binary" - "fmt" - "reflect" - "sort" - "strconv" - "testing" - "time" - - common2 "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/kv/dbutils" - - "github.com/RoaringBitmap/roaring/roaring64" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/erigontech/erigon-lib/common/length" - "github.com/erigontech/erigon-lib/kv" - "github.com/erigontech/erigon-lib/kv/bitmapdb" - kv2 "github.com/erigontech/erigon-lib/kv/memdb" - "github.com/erigontech/erigon-lib/kv/temporal/historyv2" - "github.com/erigontech/erigon-lib/log/v3" - - "github.com/erigontech/erigon/common" - "github.com/erigontech/erigon/common/math" - "github.com/erigontech/erigon/crypto" - "github.com/erigontech/erigon/ethdb/prune" -) - -func TestIndexGenerator_GenerateIndex_SimpleCase(t *testing.T) { - logger := log.New() - db := kv2.NewTestDB(t) - cfg := StageHistoryCfg(db, prune.DefaultMode, t.TempDir()) - test := func(blocksNum int, csBucket string) func(t *testing.T) { - return func(t *testing.T) { - tx, err := db.BeginRw(context.Background()) - require.NoError(t, err) - defer tx.Rollback() - - csInfo, ok := historyv2.Mapper[csBucket] - if !ok { - t.Fatal("incorrect cs bucket") - } - addrs, expecedIndexes := generateTestData(t, tx, csBucket, blocksNum) - cfgCopy := cfg - cfgCopy.bufLimit = 10 - cfgCopy.flushEvery = time.Microsecond - err = promoteHistory("logPrefix", tx, csBucket, 0, uint64(blocksNum/2), cfgCopy, nil, logger) - require.NoError(t, err) - err = promoteHistory("logPrefix", tx, csBucket, uint64(blocksNum/2), uint64(blocksNum), cfgCopy, nil, logger) - require.NoError(t, err) - - checkIndex(t, tx, csInfo.IndexBucket, addrs[0], expecedIndexes[string(addrs[0])]) - checkIndex(t, tx, csInfo.IndexBucket, addrs[1], expecedIndexes[string(addrs[1])]) - checkIndex(t, tx, csInfo.IndexBucket, addrs[2], expecedIndexes[string(addrs[2])]) - - } - } - - t.Run("account plain state", test(2100, kv.AccountChangeSet)) - t.Run("storage plain state", test(2100, kv.StorageChangeSet)) - -} - -func TestIndexGenerator_Truncate(t *testing.T) { - logger := log.New() - buckets := []string{kv.AccountChangeSet, kv.StorageChangeSet} - tmpDir, ctx := t.TempDir(), context.Background() - kv := kv2.NewTestDB(t) - cfg := StageHistoryCfg(kv, prune.DefaultMode, t.TempDir()) - for i := range buckets { - csbucket := buckets[i] - - tx, err := kv.BeginRw(context.Background()) - require.NoError(t, err) - defer tx.Rollback() - - hashes, expected := generateTestData(t, tx, csbucket, 2100) - mp := historyv2.Mapper[csbucket] - indexBucket := mp.IndexBucket - cfgCopy := cfg - cfgCopy.bufLimit = 10 - cfgCopy.flushEvery = time.Microsecond - err = promoteHistory("logPrefix", tx, csbucket, 0, uint64(2100), cfgCopy, nil, logger) - require.NoError(t, err) - - reduceSlice := func(arr []uint64, timestamtTo uint64) []uint64 { - pos := sort.Search(len(arr), func(i int) bool { - return arr[i] > timestamtTo - }) - return arr[:pos] - } - - //t.Run("truncate to 2050 "+csbucket, func(t *testing.T) { - expected[string(hashes[0])] = reduceSlice(expected[string(hashes[0])], 2050) - expected[string(hashes[1])] = reduceSlice(expected[string(hashes[1])], 2050) - expected[string(hashes[2])] = reduceSlice(expected[string(hashes[2])], 2050) - - err = unwindHistory("logPrefix", tx, csbucket, 2050, cfg, nil) - require.NoError(t, err) - - checkIndex(t, tx, indexBucket, hashes[0], expected[string(hashes[0])]) - checkIndex(t, tx, indexBucket, hashes[1], expected[string(hashes[1])]) - checkIndex(t, tx, indexBucket, hashes[2], expected[string(hashes[2])]) - //}) - - //t.Run("truncate to 2000 "+csbucket, func(t *testing.T) { - expected[string(hashes[0])] = reduceSlice(expected[string(hashes[0])], 2000) - expected[string(hashes[1])] = reduceSlice(expected[string(hashes[1])], 2000) - expected[string(hashes[2])] = reduceSlice(expected[string(hashes[2])], 2000) - - err = unwindHistory("logPrefix", tx, csbucket, 2000, cfg, nil) - require.NoError(t, err) - - checkIndex(t, tx, indexBucket, hashes[0], expected[string(hashes[0])]) - checkIndex(t, tx, indexBucket, hashes[1], expected[string(hashes[1])]) - checkIndex(t, tx, indexBucket, hashes[2], expected[string(hashes[2])]) - //}) - - //t.Run("truncate to 1999 "+csbucket, func(t *testing.T) { - err = unwindHistory("logPrefix", tx, csbucket, 1999, cfg, nil) - require.NoError(t, err) - expected[string(hashes[0])] = reduceSlice(expected[string(hashes[0])], 1999) - expected[string(hashes[1])] = reduceSlice(expected[string(hashes[1])], 1999) - expected[string(hashes[2])] = reduceSlice(expected[string(hashes[2])], 1999) - - checkIndex(t, tx, indexBucket, hashes[0], expected[string(hashes[0])]) - checkIndex(t, tx, indexBucket, hashes[1], expected[string(hashes[1])]) - checkIndex(t, tx, indexBucket, hashes[2], expected[string(hashes[2])]) - bm, err := bitmapdb.Get64(tx, indexBucket, hashes[0], 1999, math.MaxUint32) - require.NoError(t, err) - if bm.GetCardinality() > 0 && bm.Maximum() > 1999 { - t.Fatal(bm.Maximum()) - } - bm, err = bitmapdb.Get64(tx, indexBucket, hashes[1], 1999, math.MaxUint32) - require.NoError(t, err) - if bm.GetCardinality() > 0 && bm.Maximum() > 1999 { - t.Fatal() - } - //}) - - //t.Run("truncate to 999 "+csbucket, func(t *testing.T) { - expected[string(hashes[0])] = reduceSlice(expected[string(hashes[0])], 999) - expected[string(hashes[1])] = reduceSlice(expected[string(hashes[1])], 999) - expected[string(hashes[2])] = reduceSlice(expected[string(hashes[2])], 999) - - err = unwindHistory("logPrefix", tx, csbucket, 999, cfg, nil) - if err != nil { - t.Fatal(err) - } - bm, err = bitmapdb.Get64(tx, indexBucket, hashes[0], 999, math.MaxUint32) - require.NoError(t, err) - if bm.GetCardinality() > 0 && bm.Maximum() > 999 { - t.Fatal() - } - bm, err = bitmapdb.Get64(tx, indexBucket, hashes[1], 999, math.MaxUint32) - require.NoError(t, err) - if bm.GetCardinality() > 0 && bm.Maximum() > 999 { - t.Fatal() - } - - checkIndex(t, tx, indexBucket, hashes[0], expected[string(hashes[0])]) - checkIndex(t, tx, indexBucket, hashes[1], expected[string(hashes[1])]) - checkIndex(t, tx, indexBucket, hashes[2], expected[string(hashes[2])]) - - //}) - err = pruneHistoryIndex(tx, csbucket, "", tmpDir, 128, ctx, logger) - assert.NoError(t, err) - expectNoHistoryBefore(t, tx, csbucket, 128) - - // double prune is safe - err = pruneHistoryIndex(tx, csbucket, "", tmpDir, 128, ctx, logger) - assert.NoError(t, err) - expectNoHistoryBefore(t, tx, csbucket, 128) - tx.Rollback() - } -} - -func expectNoHistoryBefore(t *testing.T, tx kv.Tx, csbucket string, prunedTo uint64) { - prefixLen := length.Addr - if csbucket == kv.StorageChangeSet { - prefixLen = length.Hash - } - afterPrune := 0 - err := tx.ForEach(historyv2.Mapper[csbucket].IndexBucket, nil, func(k, _ []byte) error { - n := binary.BigEndian.Uint64(k[prefixLen:]) - require.True(t, n >= prunedTo) - afterPrune++ - return nil - }) - require.True(t, afterPrune > 0) - assert.NoError(t, err) -} - -func generateTestData(t *testing.T, tx kv.RwTx, csBucket string, numOfBlocks int) ([][]byte, map[string][]uint64) { //nolint - csInfo, ok := historyv2.Mapper[csBucket] - if !ok { - t.Fatal("incorrect cs bucket") - } - var isPlain bool - if kv.StorageChangeSet == csBucket || kv.AccountChangeSet == csBucket { - isPlain = true - } - addrs, err := generateAddrs(3, isPlain) - require.NoError(t, err) - if kv.StorageChangeSet == csBucket { - keys, innerErr := generateAddrs(3, false) - require.NoError(t, innerErr) - - defaultIncarnation := make([]byte, 8) - binary.BigEndian.PutUint64(defaultIncarnation, uint64(1)) - for i := range addrs { - addrs[i] = append(addrs[i], defaultIncarnation...) - addrs[i] = append(addrs[i], keys[i]...) - } - } - - res := make([]uint64, 0) - res2 := make([]uint64, 0) - res3 := make([]uint64, 0) - - for i := 0; i < numOfBlocks; i++ { - cs := csInfo.New() - err = cs.Add(addrs[0], []byte(strconv.Itoa(i))) - require.NoError(t, err) - - res = append(res, uint64(i)) - - if i%2 == 0 { - err = cs.Add(addrs[1], []byte(strconv.Itoa(i))) - require.NoError(t, err) - res2 = append(res2, uint64(i)) - } - if i%3 == 0 { - err = cs.Add(addrs[2], []byte(strconv.Itoa(i))) - require.NoError(t, err) - res3 = append(res3, uint64(i)) - } - err = csInfo.Encode(uint64(i), cs, func(k, v []byte) error { - return tx.Put(csBucket, k, v) - }) - require.NoError(t, err) - } - - return addrs, map[string][]uint64{ - string(addrs[0]): res, - string(addrs[1]): res2, - string(addrs[2]): res3, - } -} - -func checkIndex(t *testing.T, db kv.Tx, bucket string, k []byte, expected []uint64) { - t.Helper() - k = dbutils.CompositeKeyWithoutIncarnation(k) - m, err := bitmapdb.Get64(db, bucket, k, 0, math.MaxUint32) - if err != nil { - t.Fatal(err, common.Bytes2Hex(k)) - } - val := m.ToArray() - if !reflect.DeepEqual(val, expected) { - fmt.Printf("get : %v\n", val) - fmt.Printf("expected: %v\n", toU32(expected)) - t.Fatal() - } -} - -func toU32(in []uint64) []uint32 { - out := make([]uint32, len(in)) - for i := range in { - out[i] = uint32(in[i]) - } - return out -} - -func generateAddrs(numOfAddrs int, isPlain bool) ([][]byte, error) { - addrs := make([][]byte, numOfAddrs) - for i := 0; i < numOfAddrs; i++ { - key1, err := crypto.GenerateKey() - if err != nil { - return nil, err - } - addr := crypto.PubkeyToAddress(key1.PublicKey) - if isPlain { - addrs[i] = addr.Bytes() - continue - } - hash, err := common2.HashData(addr.Bytes()) - if err != nil { - return nil, err - } - addrs[i] = hash.Bytes() - } - return addrs, nil -} - -func TestRoaringCanParseRealDBValue(t *testing.T) { - v := common.FromHex("0100000000000000000000003a30000001000000c700c00310000000c02c092e162e55315a315b315c310932103214321d321f32223226322d323c325a325b326132623267326c327332753278327f32853287329932ab32af32bb32c332c732c832cf32d232d732d832db32e232e532e932f032f632fe320f331333173325332e3333333433363344334e33583369336e337d338a3391339933a733ab33b033b533c133cb33d133ff3301340c34143421342e3434343a345f346734733476347d348734893497349a34a134a534b834ce34d534dc34df34eb34ed34f334f634023505350c3513353e3546354a354f355a356235713576357a358d35913598359e35a535bf3501361c362036243625362c3634363c36453648365636643665366b367036713679367c3682369f36a336b336be36cc36d436db3609370f37433759375d3766378337983799379c37a837a937af37b137b837bb37c037c337c737d537db37e037f3371c38263828382d3832383c383e3846384d3852386f3875387b388138843889388b389038b438c038c338ce38d738de38e638f8380e39273933393d394d39793981398639a839b639c039c339e039f239043a0f3a1f3a243a273a2a3a2b3a453a563a663a693a793a7d3a833a8b3ac43add3af63a033b0e3b283b2f3b413b443b5c3b803b8e3ba03bae3bc73bd03be03b0c3c133c1d3c1e3c333c3c3c3f3c4f3c693c7c3c933cbe3cca3cd93cdd3ce13ce93cf53c063d0b3d173d1d3d233d283d3d3d423d453d4d3d513d563d5e3d6a3d6e3d943dc43dd63de03df43df73d273e7c3ea13ea63edd3ee33ef23e423f4f3fcf40d340d940e440ed40f24000410b4116411a414f41694172417f4188418a41a041aa41c541cc41d241d841e541f341fc410e4214421e4224422a423042454258427f4281429042b742bf42c642d742e642eb42f042f642024310431443184323432b433043354338433e4343434f4355435943614366436e43704373437a437d438143854387438f4392439e43a843b843c643ce43d343dd43f043fb430d44124419441e44214425442e442f4433443b44584460446844744478447a448c449244a444a844b744c344cf44d944e744ec4407451145264541454c4576458545a045ad45b345bd45e145ea45f34505460c46114614461946494654465d4668466f4675467a467e4687469446ae46ba46c146c646c846d446da46de46ec46fb4602470b4716471f4727472a473147384771479e47c147d347d947ff487449824987498949c049c449d949eb49ff49094a0b4a0d4a8a4b934b974bb54bbc4bc04bc14bc64bc74bca4bd14bd54bdf4be24be34be74bee4bf04bf54b014c064c0a4c174c1e4c224c294c354c394c3e4c404c454c504c554c5b4c5d4c5e4c754c7f4c814c844c894c904c9b4ca44cab4cae4cb24cb44cc64cc84ccb4ccd4cd14cd64ceb4ced4cf84cfe4c054d0c4d124d154d194d224d2b4d334d3c4d4d4d574d5d4d604d674d704d734d794d804d864d954d984d9e4da14dac4dbb4dc14dc44dcc4dcf4dd54ddd4deb4dec4df04df74d094e114e144e294e2f4e364e394e404e484e514e594e5d4e684e6c4e704e754e774e814e8d4e954ea34ea94eaa4ead4eb34eb54ebb4ec54ec94ed14ed74ee04ee44ee94eeb4ef44efe4e014f054f124f1e4f304f334f374f394f3a4f4a4f5a4f604f674f6b4f754f774f7f4f8f4f954f9a4f9c4fa34fbf4fc84fe64ff04ffe4f06501f502d50305036503f504750525056505c50645068506a506e5073507b5088508a5094509a50a050a250af50b250b650cf50d650eb50f650f750fd50035105511451175119511c5125512a512c5135513d514351455153515a515f517651775178517b518051835186518b519151a251ad51b851bc51ca51cc51df51e251e651e851ed51ee51ef51fc51075224522952335234523d5248524c524f52505255525c526c5273527c528552aa52bb52c952d552db52e152e852f1520d533f5343534a535a5363536d5374537a53835386538b5396539e53a453ad539e54fe5448566656a558f6583f597f599b59b559dd59055a1e5a9b5afa5a045b0e5b175b2c5b315b335b395bf65b145c465c525c5d5c615c6b5c875c8d5cb45cba5cec5cf25cf75c055d585ea15ee55ebc5fee5f016019603a60426059605b609760a860d36015613c617061bc61c661d461e561ed61f36152626762a262bc62c162c562d462de62076330633c635f638063826388638d63cf63f363fa6311642e64cb64ea641765216538653e655365ba65bd65c965cf65d465d865d965dc65de65e565ea65ed65f465f965fc6500660c660f661a662b66366645664b664d6655667066776682668b66906692669366ad66b366b766b966d266d566fb66ff6604670867186723673767436749674d67526757676467af67e56716681e6825682c6835683c683e684e6855685c685d6867686f6873687a68e068236b4c6b8a6bcc6bdc6bde6bed6c1d6f236f596f6e6fbc6f8c708f7093709b70a27021714b71eb731b75377548755c7581759d75a575ab75b175237648764c765b76697682769376c176d6762c7740775f776d778777c977d477ef77f277ff7707780e78207821782e7833783b783f784d784f7853785a78717872787f7883788d7894789d78a078a378c578c978d078d378d778e678e878ea78f278f77809790c79") - m := roaring64.New() - err := m.UnmarshalBinary(v) - require.NoError(t, err) - require.Equal(t, uint64(961), m.GetCardinality()) - encoded, err := m.MarshalBinary() - require.NoError(t, err) - require.Equal(t, encoded, v) -} diff --git a/eth/stagedsync/stage_log_index.go b/eth/stagedsync/stage_log_index.go deleted file mode 100644 index 3f3f64b03eb..00000000000 --- a/eth/stagedsync/stage_log_index.go +++ /dev/null @@ -1,544 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package stagedsync - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - "runtime" - "slices" - "time" - - "github.com/RoaringBitmap/roaring" - "github.com/c2h5oh/datasize" - - "github.com/erigontech/erigon-lib/log/v3" - - libcommon "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon-lib/common/dbg" - "github.com/erigontech/erigon-lib/common/hexutility" - "github.com/erigontech/erigon-lib/etl" - "github.com/erigontech/erigon-lib/kv" - "github.com/erigontech/erigon-lib/kv/bitmapdb" - "github.com/erigontech/erigon-lib/kv/dbutils" - - "github.com/erigontech/erigon/core/types" - "github.com/erigontech/erigon/ethdb/prune" -) - -const ( - bitmapsBufLimit = 256 * datasize.MB // limit how much memory can use bitmaps before flushing to DB - bitmapsFlushEvery = 10 * time.Second -) - -type LogIndexCfg struct { - tmpdir string - db kv.RwDB - prune prune.Mode - bufLimit datasize.ByteSize - flushEvery time.Duration - - // For not pruning the logs of this contract since deposit contract logs are needed by CL to validate/produce blocks. - // All logs should be available to a validating node through eth_getLogs - depositContract *libcommon.Address -} - -func StageLogIndexCfg(db kv.RwDB, prune prune.Mode, tmpDir string, depositContract *libcommon.Address) LogIndexCfg { - return LogIndexCfg{ - db: db, - prune: prune, - bufLimit: bitmapsBufLimit, - flushEvery: bitmapsFlushEvery, - tmpdir: tmpDir, - depositContract: depositContract, - } -} - -func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context, prematureEndBlock uint64, logger log.Logger) error { - useExternalTx := tx != nil - if !useExternalTx { - var err error - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - endBlock, err := s.ExecutionAt(tx) - if err != nil { - return fmt.Errorf("getting last executed block: %w", err) - } - if s.BlockNumber > endBlock { // Erigon will self-heal (download missed blocks) eventually - return nil - } - logPrefix := s.LogPrefix() - // if prematureEndBlock is nonzero and less than the latest executed block, - // then we only run the log index stage until prematureEndBlock - if prematureEndBlock != 0 && prematureEndBlock < endBlock { - endBlock = prematureEndBlock - } - // It is possible that prematureEndBlock < s.BlockNumber, - // in which case it is important that we skip this stage, - // or else we could overwrite stage_at with prematureEndBlock - if endBlock <= s.BlockNumber { - return nil - } - - startBlock := s.BlockNumber - pruneTo := cfg.prune.History.PruneTo(endBlock) //endBlock - prune.r.older - // if startBlock < pruneTo { - // startBlock = pruneTo - // } - if startBlock > 0 { - startBlock++ - } - if err = promoteLogIndex(logPrefix, tx, startBlock, endBlock, pruneTo, cfg, ctx, logger); err != nil { - return err - } - if err = s.Update(tx, endBlock); err != nil { - return err - } - - if !useExternalTx { - if err = tx.Commit(); err != nil { - return err - } - } - - return nil -} - -// Add the topics and address index for logs, if not in prune range or addr is the deposit contract -func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, endBlock uint64, pruneBlock uint64, cfg LogIndexCfg, ctx context.Context, logger log.Logger) error { - quit := ctx.Done() - logEvery := time.NewTicker(30 * time.Second) - defer logEvery.Stop() - - topics := map[string]*roaring.Bitmap{} - addresses := map[string]*roaring.Bitmap{} - logs, err := tx.Cursor(kv.Log) - if err != nil { - return err - } - defer logs.Close() - checkFlushEvery := time.NewTicker(cfg.flushEvery) - defer checkFlushEvery.Stop() - - collectorTopics := etl.NewCollector(logPrefix, cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) - defer collectorTopics.Close() - collectorAddrs := etl.NewCollector(logPrefix, cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize), logger) - defer collectorAddrs.Close() - - reader := bytes.NewReader(nil) - - if endBlock != 0 && endBlock-start > 100 { - logger.Info(fmt.Sprintf("[%s] processing", logPrefix), "from", start, "to", endBlock, "pruneTo", pruneBlock) - } - - for k, v, err := logs.Seek(dbutils.LogKey(start, 0)); k != nil; k, v, err = logs.Next() { - if err != nil { - return err - } - - if err := libcommon.Stopped(quit); err != nil { - return err - } - blockNum := binary.BigEndian.Uint64(k[:8]) - - // if endBlock is positive, we only run the stage up until endBlock - // if endBlock is zero, we run the stage for all available blocks - if endBlock != 0 && blockNum > endBlock { - logger.Info(fmt.Sprintf("[%s] Reached user-specified end block", logPrefix), "endBlock", endBlock) - break - } - - select { - default: - case <-logEvery.C: - var m runtime.MemStats - dbg.ReadMemStats(&m) - logger.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockNum, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - case <-checkFlushEvery.C: - if needFlush(topics, cfg.bufLimit) { - if err := flushBitmaps(collectorTopics, topics); err != nil { - return err - } - topics = map[string]*roaring.Bitmap{} - } - - if needFlush(addresses, cfg.bufLimit) { - if err := flushBitmaps(collectorAddrs, addresses); err != nil { - return err - } - addresses = map[string]*roaring.Bitmap{} - } - } - - var ll types.Logs - reader.Reset(v) - //if err := cbor.Unmarshal(&ll, reader); err != nil { - // return fmt.Errorf("receipt unmarshal failed: %w, blocl=%d", err, blockNum) - //} - - toStore := true - // if pruning is enabled, and depositContract isn't configured for the chain, don't index - if blockNum < pruneBlock { - toStore = false - if cfg.depositContract == nil { - continue - } - for _, l := range ll { - // if any of the log address is in noPrune, store and index all logs for this txId - if *cfg.depositContract == l.Address { - toStore = true - break - } - } - } - - if !toStore { - continue - } - for _, l := range ll { - for _, topic := range l.Topics { - topicStr := string(topic.Bytes()) - m, ok := topics[topicStr] - if !ok { - m = roaring.New() - topics[topicStr] = m - } - m.Add(uint32(blockNum)) - } - - accStr := string(l.Address.Bytes()) - m, ok := addresses[accStr] - if !ok { - m = roaring.New() - addresses[accStr] = m - } - m.Add(uint32(blockNum)) - } - } - - if err := flushBitmaps(collectorTopics, topics); err != nil { - return err - } - if err := flushBitmaps(collectorAddrs, addresses); err != nil { - return err - } - - var currentBitmap = roaring.New() - var buf = bytes.NewBuffer(nil) - - lastChunkKey := make([]byte, 128) - var loaderFunc = func(k []byte, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - lastChunkKey = lastChunkKey[:len(k)+4] - copy(lastChunkKey, k) - binary.BigEndian.PutUint32(lastChunkKey[len(k):], ^uint32(0)) - lastChunkBytes, err := table.Get(lastChunkKey) - if err != nil { - return fmt.Errorf("find last chunk: %w", err) - } - - lastChunk := roaring.New() - if len(lastChunkBytes) > 0 { - _, err = lastChunk.FromBuffer(lastChunkBytes) - if err != nil { - return fmt.Errorf("couldn't read last log index chunk: %w, len(lastChunkBytes)=%d", err, len(lastChunkBytes)) - } - } - - if _, err := currentBitmap.FromBuffer(v); err != nil { - return err - } - currentBitmap.Or(lastChunk) // merge last existing chunk from db - next loop will overwrite it - return bitmapdb.WalkChunkWithKeys(k, currentBitmap, bitmapdb.ChunkLimit, func(chunkKey []byte, chunk *roaring.Bitmap) error { - buf.Reset() - if _, err := chunk.WriteTo(buf); err != nil { - return err - } - return next(k, chunkKey, buf.Bytes()) - }) - } - - if err := collectorTopics.Load(tx, kv.LogTopicIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { - return err - } - - if err := collectorAddrs.Load(tx, kv.LogAddressIndex, loaderFunc, etl.TransformArgs{Quit: quit}); err != nil { - return err - } - - return nil -} - -func UnwindLogIndex(u *UnwindState, s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context) (err error) { - quitCh := ctx.Done() - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - logPrefix := s.LogPrefix() - if err := unwindLogIndex(logPrefix, tx, u.UnwindPoint, cfg, quitCh); err != nil { - return err - } - - if err := u.Done(tx); err != nil { - return fmt.Errorf("%w", err) - } - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - return nil -} - -func unwindLogIndex(logPrefix string, db kv.RwTx, to uint64, cfg LogIndexCfg, quitCh <-chan struct{}) error { - topics := map[string]struct{}{} - addrs := map[string]struct{}{} - - reader := bytes.NewReader(nil) - c, err := db.Cursor(kv.Log) - if err != nil { - return err - } - defer c.Close() - for k, v, err := c.Seek(hexutility.EncodeTs(to + 1)); k != nil; k, v, err = c.Next() { - if err != nil { - return err - } - - if err := libcommon.Stopped(quitCh); err != nil { - return err - } - var logs types.Logs - reader.Reset(v) - //if err := cbor.Unmarshal(&logs, reader); err != nil { - // return fmt.Errorf("receipt unmarshal: %w, block=%d", err, binary.BigEndian.Uint64(k)) - //} - - for _, l := range logs { - for _, topic := range l.Topics { - topics[string(topic.Bytes())] = struct{}{} - } - addrs[string(l.Address.Bytes())] = struct{}{} - } - } - - if err := truncateBitmaps(db, kv.LogTopicIndex, topics, to); err != nil { - return err - } - if err := truncateBitmaps(db, kv.LogAddressIndex, addrs, to); err != nil { - return err - } - return nil -} - -func needFlush(bitmaps map[string]*roaring.Bitmap, memLimit datasize.ByteSize) bool { - sz := uint64(0) - for _, m := range bitmaps { - sz += m.GetSizeInBytes() * 2 // for golang's overhead - } - const memoryNeedsForKey = 32 * 2 * 2 // len(key) * (string and bytes) overhead * go's map overhead - return uint64(len(bitmaps)*memoryNeedsForKey)+sz > uint64(memLimit) -} - -func flushBitmaps(c *etl.Collector, inMem map[string]*roaring.Bitmap) error { - for k, v := range inMem { - v.RunOptimize() - if v.GetCardinality() == 0 { - continue - } - newV := bytes.NewBuffer(make([]byte, 0, v.GetSerializedSizeInBytes())) - if _, err := v.WriteTo(newV); err != nil { - return err - } - if err := c.Collect([]byte(k), newV.Bytes()); err != nil { - return err - } - } - return nil -} - -func truncateBitmaps(tx kv.RwTx, bucket string, inMem map[string]struct{}, to uint64) error { - keys := make([]string, 0, len(inMem)) - for k := range inMem { - keys = append(keys, k) - } - slices.Sort(keys) - for _, k := range keys { - if err := bitmapdb.TruncateRange(tx, bucket, []byte(k), uint32(to+1)); err != nil { - return fmt.Errorf("fail TruncateRange: bucket=%s, %w", bucket, err) - } - } - - return nil -} - -func pruneOldLogChunks(tx kv.RwTx, bucket string, inMem *etl.Collector, pruneTo uint64, ctx context.Context) error { - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - - c, err := tx.RwCursor(bucket) - if err != nil { - return err - } - defer c.Close() - - if err := inMem.Load(tx, bucket, func(key, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - for k, _, err := c.Seek(key); k != nil; k, _, err = c.Next() { - if err != nil { - return err - } - var blockNum uint64 - blockNum = uint64(binary.BigEndian.Uint32(k[len(key):])) - - if !bytes.HasPrefix(k, key) || blockNum >= pruneTo { - break - } - - if err = c.DeleteCurrent(); err != nil { - return fmt.Errorf("failed delete log/index, bucket=%v block=%d: %w", bucket, blockNum, err) - } - } - return nil - }, etl.TransformArgs{ - Quit: ctx.Done(), - }); err != nil { - return err - } - return nil -} - -// Call pruneLogIndex with the current sync progresses and commit the data to db -func PruneLogIndex(s *PruneState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context, logger log.Logger) (err error) { - if !cfg.prune.History.Enabled() { - return nil - } - logPrefix := s.LogPrefix() - - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - pruneTo := cfg.prune.History.PruneTo(s.ForwardProgress) - if err = pruneLogIndex(logPrefix, tx, cfg.tmpdir, s.PruneProgress, pruneTo, ctx, logger, cfg.depositContract); err != nil { - return err - } - if err = s.DoneAt(tx, pruneTo); err != nil { - return err - } - - if !useExternalTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil -} - -// Prune log indexes as well as logs within the prune range -func pruneLogIndex(logPrefix string, tx kv.RwTx, tmpDir string, pruneFrom, pruneTo uint64, ctx context.Context, logger log.Logger, depositContract *libcommon.Address) error { - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - - bufferSize := etl.BufferOptimalSize - topics := etl.NewCollector(logPrefix, tmpDir, etl.NewOldestEntryBuffer(bufferSize), logger) - defer topics.Close() - addrs := etl.NewCollector(logPrefix, tmpDir, etl.NewOldestEntryBuffer(bufferSize), logger) - defer addrs.Close() - - reader := bytes.NewReader(nil) - { - c, err := tx.Cursor(kv.Log) - if err != nil { - return err - } - defer c.Close() - - for k, v, err := c.Seek(dbutils.LogKey(pruneFrom, 0)); k != nil; k, v, err = c.Next() { - if err != nil { - return err - } - blockNum := binary.BigEndian.Uint64(k) - if blockNum >= pruneTo { - break - } - select { - case <-logEvery.C: - logger.Info(fmt.Sprintf("[%s]", logPrefix), "table", kv.Log, "block", blockNum, "pruneFrom", pruneFrom, "pruneTo", pruneTo) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - - var logs types.Logs - reader.Reset(v) - //if err := cbor.Unmarshal(&logs, reader); err != nil { - // return fmt.Errorf("receipt unmarshal failed: %w, block=%d", err, binary.BigEndian.Uint64(k)) - //} - - toPrune := true - for _, l := range logs { - // No logs (or sublogs) for this txId should be pruned - // if one of the logs belongs to the deposit contract - if depositContract != nil && *depositContract == l.Address { - toPrune = false - break - } - } - - if toPrune { - for _, l := range logs { - for _, topic := range l.Topics { - if err := topics.Collect(topic.Bytes(), nil); err != nil { - return err - } - } - if err := addrs.Collect(l.Address.Bytes(), nil); err != nil { - return err - } - } - if err := tx.Delete(kv.Log, k); err != nil { - return err - } - } - } - } - - if err := pruneOldLogChunks(tx, kv.LogTopicIndex, topics, pruneTo, ctx); err != nil { - return err - } - if err := pruneOldLogChunks(tx, kv.LogAddressIndex, addrs, pruneTo, ctx); err != nil { - return err - } - return nil -} diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index c312ddab0d9..d5e8d0bcb4c 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -134,7 +134,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc if cfg.miner.MiningConfig.Etherbase == (libcommon.Address{}) { if cfg.blockBuilderParameters == nil { - return fmt.Errorf("refusing to mine without etherbase") + return errors.New("refusing to mine without etherbase") } // If we do not have an etherbase, let's use the suggested one coinbase = cfg.blockBuilderParameters.SuggestedFeeRecipient diff --git a/eth/stagedsync/stage_polygon_sync.go b/eth/stagedsync/stage_polygon_sync.go index 46254817f98..3f736e7eed0 100644 --- a/eth/stagedsync/stage_polygon_sync.go +++ b/eth/stagedsync/stage_polygon_sync.go @@ -34,6 +34,7 @@ import ( "github.com/erigontech/erigon-lib/common/metrics" "github.com/erigontech/erigon-lib/direct" "github.com/erigontech/erigon-lib/kv" + "github.com/erigontech/erigon-lib/kv/dbutils" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/accounts/abi" "github.com/erigontech/erigon/core/rawdb" @@ -45,6 +46,7 @@ import ( "github.com/erigontech/erigon/polygon/p2p" polygonsync "github.com/erigontech/erigon/polygon/sync" "github.com/erigontech/erigon/turbo/services" + "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" ) var updateForkChoiceSuccessErr = errors.New("update fork choice success") @@ -84,9 +86,12 @@ func NewPolygonSyncStageCfg( spanReader: blockReader, txActionStream: txActionStream, }, + spanBlockProducerSelections: &polygonSyncStageSbpsStore{ + txActionStream: txActionStream, + }, } - heimdallService := heimdall.NewService(heimdallClient, heimdallStore, logger) borConfig := chainConfig.Bor.(*borcfg.BorConfig) + heimdallService := heimdall.NewService(borConfig, heimdallClient, heimdallStore, logger) p2pService := p2p.NewService(maxPeers, logger, sentry, statusDataProvider.GetStatusData) checkpointVerifier := polygonsync.VerifyCheckpointHeaders milestoneVerifier := polygonsync.VerifyMilestoneHeaders @@ -104,7 +109,6 @@ func NewPolygonSyncStageCfg( syncStore, blockLimit, ) - spansCache := polygonsync.NewSpansCache() events := polygonsync.NewTipEvents(logger, p2pService, heimdallService) sync := polygonsync.NewSync( syncStore, @@ -113,9 +117,7 @@ func NewPolygonSyncStageCfg( blocksVerifier, p2pService, blockDownloader, - polygonsync.NewCanonicalChainBuilderFactory(chainConfig, borConfig, spansCache), - spansCache, - heimdallService.FetchLatestSpans, + polygonsync.NewCanonicalChainBuilderFactory(chainConfig, borConfig, heimdallService), events.Events(), logger, ) @@ -305,9 +307,14 @@ func (s *polygonSyncStageSyncStore) Run(context.Context) error { } type polygonSyncStageHeimdallStore struct { - checkpoints *polygonSyncStageCheckpointStore - milestones *polygonSyncStageMilestoneStore - spans *polygonSyncStageSpanStore + checkpoints *polygonSyncStageCheckpointStore + milestones *polygonSyncStageMilestoneStore + spans *polygonSyncStageSpanStore + spanBlockProducerSelections *polygonSyncStageSbpsStore +} + +func (s polygonSyncStageHeimdallStore) SpanBlockProducerSelections() heimdall.EntityStore[*heimdall.SpanBlockProducerSelection] { + return s.spanBlockProducerSelections } func (s polygonSyncStageHeimdallStore) Checkpoints() heimdall.EntityStore[*heimdall.Checkpoint] { @@ -335,7 +342,7 @@ type polygonSyncStageCheckpointStore struct { txActionStream chan<- polygonSyncStageTxAction } -func (s polygonSyncStageCheckpointStore) GetLastEntityId(ctx context.Context) (uint64, bool, error) { +func (s polygonSyncStageCheckpointStore) LastEntityId(ctx context.Context) (uint64, bool, error) { type response struct { id uint64 ok bool @@ -354,19 +361,19 @@ func (s polygonSyncStageCheckpointStore) GetLastEntityId(ctx context.Context) (u return r.id, r.ok, r.err } -func (s polygonSyncStageCheckpointStore) GetLastEntity(ctx context.Context) (cp *heimdall.Checkpoint, err error) { - id, ok, err := s.GetLastEntityId(ctx) +func (s polygonSyncStageCheckpointStore) LastEntity(ctx context.Context) (*heimdall.Checkpoint, bool, error) { + id, ok, err := s.LastEntityId(ctx) if err != nil { - return nil, err + return nil, false, err } if !ok { - return nil, errors.New("last checkpoint not found") + return nil, false, nil } - return s.GetEntity(ctx, id) + return s.Entity(ctx, id) } -func (s polygonSyncStageCheckpointStore) GetEntity(ctx context.Context, id uint64) (*heimdall.Checkpoint, error) { +func (s polygonSyncStageCheckpointStore) Entity(ctx context.Context, id uint64) (*heimdall.Checkpoint, bool, error) { type response struct { v []byte err error @@ -378,15 +385,19 @@ func (s polygonSyncStageCheckpointStore) GetEntity(ctx context.Context, id uint6 return nil }) if err != nil { - return nil, err + return nil, false, err } if r.err != nil { - return nil, r.err + if errors.Is(r.err, freezeblocks.ErrCheckpointNotFound) { + return nil, false, nil + } + + return nil, false, r.err } var c heimdall.Checkpoint err = json.Unmarshal(r.v, &c) - return &c, err + return &c, true, err } func (s polygonSyncStageCheckpointStore) PutEntity(ctx context.Context, id uint64, entity *heimdall.Checkpoint) error { @@ -447,7 +458,7 @@ type polygonSyncStageMilestoneStore struct { txActionStream chan<- polygonSyncStageTxAction } -func (s polygonSyncStageMilestoneStore) GetLastEntityId(ctx context.Context) (uint64, bool, error) { +func (s polygonSyncStageMilestoneStore) LastEntityId(ctx context.Context) (uint64, bool, error) { type response struct { id uint64 ok bool @@ -466,19 +477,19 @@ func (s polygonSyncStageMilestoneStore) GetLastEntityId(ctx context.Context) (ui return r.id, r.ok, r.err } -func (s polygonSyncStageMilestoneStore) GetLastEntity(ctx context.Context) (*heimdall.Milestone, error) { - id, ok, err := s.GetLastEntityId(ctx) +func (s polygonSyncStageMilestoneStore) LastEntity(ctx context.Context) (*heimdall.Milestone, bool, error) { + id, ok, err := s.LastEntityId(ctx) if err != nil { - return nil, err + return nil, false, err } if !ok { - return nil, errors.New("last milestone not found") + return nil, false, nil } - return s.GetEntity(ctx, id) + return s.Entity(ctx, id) } -func (s polygonSyncStageMilestoneStore) GetEntity(ctx context.Context, id uint64) (*heimdall.Milestone, error) { +func (s polygonSyncStageMilestoneStore) Entity(ctx context.Context, id uint64) (*heimdall.Milestone, bool, error) { type response struct { v []byte err error @@ -490,15 +501,19 @@ func (s polygonSyncStageMilestoneStore) GetEntity(ctx context.Context, id uint64 return nil }) if err != nil { - return nil, err + return nil, false, err } if r.err != nil { - return nil, r.err + if errors.Is(r.err, freezeblocks.ErrMilestoneNotFound) { + return nil, false, nil + } + + return nil, false, r.err } var m heimdall.Milestone err = json.Unmarshal(r.v, &m) - return &m, err + return &m, true, err } func (s polygonSyncStageMilestoneStore) PutEntity(ctx context.Context, id uint64, entity *heimdall.Milestone) error { @@ -557,7 +572,7 @@ type polygonSyncStageSpanStore struct { txActionStream chan<- polygonSyncStageTxAction } -func (s polygonSyncStageSpanStore) GetLastEntityId(ctx context.Context) (id uint64, ok bool, err error) { +func (s polygonSyncStageSpanStore) LastEntityId(ctx context.Context) (id uint64, ok bool, err error) { type response struct { id uint64 ok bool @@ -576,19 +591,19 @@ func (s polygonSyncStageSpanStore) GetLastEntityId(ctx context.Context) (id uint return r.id, r.ok, r.err } -func (s polygonSyncStageSpanStore) GetLastEntity(ctx context.Context) (*heimdall.Span, error) { - id, ok, err := s.GetLastEntityId(ctx) +func (s polygonSyncStageSpanStore) LastEntity(ctx context.Context) (*heimdall.Span, bool, error) { + id, ok, err := s.LastEntityId(ctx) if err != nil { - return nil, err + return nil, false, err } if !ok { - return nil, errors.New("last span not found") + return nil, false, nil } - return s.GetEntity(ctx, id) + return s.Entity(ctx, id) } -func (s polygonSyncStageSpanStore) GetEntity(ctx context.Context, id uint64) (*heimdall.Span, error) { +func (s polygonSyncStageSpanStore) Entity(ctx context.Context, id uint64) (*heimdall.Span, bool, error) { type response struct { v []byte err error @@ -600,15 +615,19 @@ func (s polygonSyncStageSpanStore) GetEntity(ctx context.Context, id uint64) (*h return nil }) if err != nil { - return nil, err + return nil, false, err } if r.err != nil { - return nil, r.err + if errors.Is(r.err, freezeblocks.ErrSpanNotFound) { + return nil, false, nil + } + + return nil, false, r.err } var span heimdall.Span err = json.Unmarshal(r.v, &span) - return &span, err + return &span, true, err } func (s polygonSyncStageSpanStore) PutEntity(ctx context.Context, id uint64, entity *heimdall.Span) error { @@ -662,6 +681,150 @@ func (s polygonSyncStageSpanStore) Close() { // no-op } +// polygonSyncStageSbpsStore is the store for heimdall.SpanBlockProducerSelection +type polygonSyncStageSbpsStore struct { + txActionStream chan<- polygonSyncStageTxAction +} + +func (s polygonSyncStageSbpsStore) LastEntityId(ctx context.Context) (uint64, bool, error) { + entity, ok, err := s.LastEntity(ctx) + if err != nil || !ok { + return 0, ok, err + } + + return entity.RawId(), true, nil +} + +func (s polygonSyncStageSbpsStore) LastEntity(ctx context.Context) (*heimdall.SpanBlockProducerSelection, bool, error) { + type response struct { + v []byte + ok bool + err error + } + + r, err := awaitTxAction(ctx, s.txActionStream, func(tx kv.RwTx, responseStream chan<- response) error { + cursor, err := tx.Cursor(kv.BorProducerSelections) + if err != nil { + responseStream <- response{err: err} + return nil + } + + defer cursor.Close() + k, v, err := cursor.Last() + if err != nil { + responseStream <- response{v: nil, ok: false, err: err} + return nil + } + if k == nil { + // not found + responseStream <- response{v: nil, ok: false, err: nil} + return nil + } + + responseStream <- response{v: v, ok: true, err: err} + return nil + }) + if err != nil { + return nil, false, err + } + if r.err != nil || !r.ok { + return nil, r.ok, r.err + } + + var selection heimdall.SpanBlockProducerSelection + err = json.Unmarshal(r.v, &selection) + return &selection, true, err +} + +func (s polygonSyncStageSbpsStore) Entity(ctx context.Context, id uint64) (*heimdall.SpanBlockProducerSelection, bool, error) { + type response struct { + v []byte + ok bool + err error + } + + r, err := awaitTxAction(ctx, s.txActionStream, func(tx kv.RwTx, responseStream chan<- response) error { + k := make([]byte, dbutils.NumberLength) + binary.BigEndian.PutUint64(k, id) + + v, err := tx.GetOne(kv.BorProducerSelections, k) + if err != nil { + responseStream <- response{v: nil, ok: false, err: err} + return nil + } + if v == nil { + // not found + responseStream <- response{v: nil, ok: false, err: nil} + return nil + } + + responseStream <- response{v: v, ok: true, err: err} + return nil + }) + if err != nil { + return nil, false, err + } + if r.err != nil || !r.ok { + return nil, r.ok, r.err + } + + var selection heimdall.SpanBlockProducerSelection + err = json.Unmarshal(r.v, &selection) + return &selection, true, err +} + +func (s polygonSyncStageSbpsStore) PutEntity(ctx context.Context, id uint64, entity *heimdall.SpanBlockProducerSelection) error { + type response struct { + err error + } + + r, err := awaitTxAction(ctx, s.txActionStream, func(tx kv.RwTx, responseStream chan<- response) error { + k := make([]byte, dbutils.NumberLength) + binary.BigEndian.PutUint64(k, id) + + v, err := json.Marshal(entity) + if err != nil { + responseStream <- response{err: err} + return nil + } + + responseStream <- response{err: tx.Put(kv.BorProducerSelections, k, v)} + return nil + }) + if err != nil { + return err + } + + return r.err +} + +func (s polygonSyncStageSbpsStore) RangeFromBlockNum(ctx context.Context, blockNum uint64) ([]*heimdall.SpanBlockProducerSelection, error) { + type response struct { + result []*heimdall.SpanBlockProducerSelection + err error + } + + r, err := awaitTxAction(ctx, s.txActionStream, func(tx kv.RwTx, responseStream chan<- response) error { + makeEntity := func() *heimdall.SpanBlockProducerSelection { return &heimdall.SpanBlockProducerSelection{} } + r, err := blockRangeEntitiesFromBlockNum(tx, kv.BorProducerSelections, makeEntity, blockNum) + responseStream <- response{result: r, err: err} + return nil + }) + if err != nil { + return nil, err + } + + return r.result, r.err +} + +func (s polygonSyncStageSbpsStore) Prepare(_ context.Context) error { + return nil +} + +func (s polygonSyncStageSbpsStore) Close() { + // no-op +} + type blockRangeComparator interface { CmpRange(blockNum uint64) int } @@ -675,7 +838,7 @@ func blockRangeEntitiesFromBlockNum[T blockRangeComparator](tx kv.Tx, table stri defer cur.Close() var k, v []byte var entities []T - for k, v, err = cur.Last(); err == nil && k != nil; _, v, err = cur.Prev() { + for k, v, err = cur.Last(); err == nil && k != nil; k, v, err = cur.Prev() { entity := makeEntity() err = json.Unmarshal(v, entity) if err != nil { @@ -716,7 +879,7 @@ func (e *polygonSyncStageExecutionEngine) InsertBlocks(ctx context.Context, bloc } r, err := awaitTxAction(ctx, e.txActionStream, func(tx kv.RwTx, responseStream chan<- response) error { - responseStream <- response{err: e.insertBlocks(ctx, blocks, tx)} + responseStream <- response{err: e.insertBlocks(blocks, tx)} return nil }) if err != nil { @@ -726,10 +889,7 @@ func (e *polygonSyncStageExecutionEngine) InsertBlocks(ctx context.Context, bloc return r.err } -func (e *polygonSyncStageExecutionEngine) insertBlocks(ctx context.Context, blocks []*types.Block, tx kv.RwTx) error { - stateSyncEventsLogTicker := time.NewTicker(logInterval) - defer stateSyncEventsLogTicker.Stop() - +func (e *polygonSyncStageExecutionEngine) insertBlocks(blocks []*types.Block, tx kv.RwTx) error { for _, block := range blocks { height := block.NumberU64() header := block.Header() @@ -767,10 +927,6 @@ func (e *polygonSyncStageExecutionEngine) insertBlocks(ctx context.Context, bloc if _, err := rawdb.WriteRawBodyIfNotExists(tx, header.Hash(), height, body.RawBody()); err != nil { return err } - - if err := e.downloadStateSyncEvents(ctx, tx, header, stateSyncEventsLogTicker); err != nil { - return err - } } return nil @@ -782,7 +938,7 @@ func (e *polygonSyncStageExecutionEngine) UpdateForkChoice(ctx context.Context, } r, err := awaitTxAction(ctx, e.txActionStream, func(tx kv.RwTx, responseStream chan<- response) error { - err := e.updateForkChoice(tx, tip) + err := e.updateForkChoice(ctx, tx, tip) responseStream <- response{err: err} if err == nil { return updateForkChoiceSuccessErr @@ -796,7 +952,7 @@ func (e *polygonSyncStageExecutionEngine) UpdateForkChoice(ctx context.Context, return r.err } -func (e *polygonSyncStageExecutionEngine) updateForkChoice(tx kv.RwTx, tip *types.Header) error { +func (e *polygonSyncStageExecutionEngine) updateForkChoice(ctx context.Context, tx kv.RwTx, tip *types.Header) error { tipBlockNum := tip.Number.Uint64() tipHash := tip.Hash() @@ -821,6 +977,35 @@ func (e *polygonSyncStageExecutionEngine) updateForkChoice(tx kv.RwTx, tip *type return nil } + // TODO remove below for loop once the bridge is integrated in the stage + borConfig := e.chainConfig.Bor.(*borcfg.BorConfig) + for i := len(newNodes) - 1; i >= 0; { + blockNum := newNodes[i].number + blockHash := newNodes[i].hash + if blockNum == 0 { + break + } + + sprintLen := borConfig.CalculateSprintLength(blockNum) + blockPosInSprint := blockNum % sprintLen + if blockPosInSprint > 0 { + i -= int(blockPosInSprint) + continue + } + + header, err := e.blockReader.Header(ctx, tx, blockHash, blockNum) + if err != nil { + return err + } + + err = e.downloadStateSyncEvents(ctx, tx, header, logTicker) + if err != nil { + return err + } + + i -= int(sprintLen) + } + if err := rawdb.AppendCanonicalTxNums(tx, newNodes[len(newNodes)-1].number); err != nil { return err } @@ -892,6 +1077,8 @@ func (e *polygonSyncStageExecutionEngine) downloadStateSyncEvents( header *types.Header, logTicker *time.Ticker, ) error { + e.logger.Trace(e.appendLogPrefix("download state sync event"), "block", header.Number.Uint64()) + var err error if !e.lastStateSyncEventIdInit { e.lastStateSyncEventId, _, err = e.blockReader.LastEventId(ctx, tx) diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 2a4b381a5b4..c159095a728 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -60,11 +60,10 @@ type SendersCfg struct { chainConfig *chain.Config hd *headerdownload.HeaderDownload blockReader services.FullBlockReader - loopBreakCheck func(int) bool syncCfg ethconfig.Sync } -func StageSendersCfg(db kv.RwDB, chainCfg *chain.Config, syncCfg ethconfig.Sync, badBlockHalt bool, tmpdir string, prune prune.Mode, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload, loopBreakCheck func(int) bool) SendersCfg { +func StageSendersCfg(db kv.RwDB, chainCfg *chain.Config, syncCfg ethconfig.Sync, badBlockHalt bool, tmpdir string, prune prune.Mode, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload) SendersCfg { const sendersBatchSize = 10000 const sendersBlockSize = 4096 @@ -81,13 +80,12 @@ func StageSendersCfg(db kv.RwDB, chainCfg *chain.Config, syncCfg ethconfig.Sync, prune: prune, hd: hd, blockReader: blockReader, - loopBreakCheck: loopBreakCheck, syncCfg: syncCfg, } } func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, logger log.Logger) error { - if cfg.blockReader.FreezingCfg().Enabled && s.BlockNumber < cfg.blockReader.FrozenBlocks() { + if s.BlockNumber < cfg.blockReader.FrozenBlocks() { s.BlockNumber = cfg.blockReader.FrozenBlocks() } @@ -217,10 +215,6 @@ Loop: break } - if cfg.loopBreakCheck != nil && cfg.loopBreakCheck(int(blockNumber-startFrom)) { - break - } - has, err := cfg.blockReader.HasSenders(ctx, tx, blockHash, blockNumber) if err != nil { return err @@ -413,14 +407,8 @@ func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Co } defer tx.Rollback() } - if cfg.blockReader.FreezingCfg().Enabled { - // noop. in this case senders will be deleted by BlockRetire.PruneAncientBlocks after data-freezing. - } else if cfg.prune.History.Enabled() { - to := cfg.prune.History.PruneTo(s.ForwardProgress) - if err = rawdb.PruneTable(tx, kv.Senders, to, ctx, 100); err != nil { - return err - } - } + + // noop. in this case senders will be deleted by BlockRetire.PruneAncientBlocks after data-freezing. if !useExternalTx { if err = tx.Commit(); err != nil { diff --git a/eth/stagedsync/stage_senders_test.go b/eth/stagedsync/stage_senders_test.go index 6dd05726abd..20fac8840fd 100644 --- a/eth/stagedsync/stage_senders_test.go +++ b/eth/stagedsync/stage_senders_test.go @@ -146,7 +146,7 @@ func TestSenders(t *testing.T) { require.NoError(stages.SaveStageProgress(tx, stages.Bodies, 3)) - cfg := stagedsync.StageSendersCfg(db, params.TestChainConfig, ethconfig.Defaults.Sync, false, "", prune.Mode{}, br, nil, nil) + cfg := stagedsync.StageSendersCfg(db, params.TestChainConfig, ethconfig.Defaults.Sync, false, "", prune.Mode{}, br, nil) err = stagedsync.SpawnRecoverSendersStage(cfg, &stagedsync.StageState{ID: stages.Senders}, nil, tx, 3, m.Ctx, log.New()) require.NoError(err) diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go index f3340d9f7fe..b15a2e06a1f 100644 --- a/eth/stagedsync/stage_snapshots.go +++ b/eth/stagedsync/stage_snapshots.go @@ -138,7 +138,7 @@ func StageSnapshotsCfg(db kv.RwDB, freezingCfg := cfg.blockReader.FreezingCfg() - if freezingCfg.Enabled && freezingCfg.ProduceE2 { + if freezingCfg.ProduceE2 { u := cfg.snapshotUploader if maxSeedable := u.maxSeedableHeader(); u.cfg.syncConfig.FrozenBlockLimit > 0 && maxSeedable > u.cfg.syncConfig.FrozenBlockLimit { @@ -249,9 +249,6 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R if !s.CurrentSyncCycle.IsFirstCycle { return nil } - if !cfg.blockReader.FreezingCfg().Enabled { - return nil - } diagnostics.Send(diagnostics.CurrentSyncStage{Stage: string(stages.Snapshots)}) @@ -268,7 +265,7 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R diagnostics.Send(diagnostics.CurrentSyncSubStage{SubStage: "Download header-chain"}) // Download only the snapshots that are for the header chain. - if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, true, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { + if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, cfg.dirs, true, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { return err } if err := cfg.blockReader.Snapshots().ReopenSegments([]snaptype.Type{coresnaptype.Headers, coresnaptype.Bodies}, true); err != nil { @@ -276,7 +273,7 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R } diagnostics.Send(diagnostics.CurrentSyncSubStage{SubStage: "Download snapshots"}) - if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, false, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { + if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, cfg.dirs, false, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { return err } @@ -506,11 +503,6 @@ func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, dirs return nil } -func computeBlocksToPrune(cfg SnapshotsCfg) (blocksToPrune uint64, historyToPrune uint64) { - frozenBlocks := cfg.blockReader.Snapshots().SegmentsMax() - return frozenBlocks - cfg.prune.Blocks.PruneTo(frozenBlocks), frozenBlocks - cfg.prune.History.PruneTo(frozenBlocks) -} - /* ====== PRUNING ====== */ // snapshots pruning sections works more as a retiring of blocks // retiring blocks means moving block data from db into snapshots @@ -525,71 +517,69 @@ func SnapshotsPrune(s *PruneState, cfg SnapshotsCfg, ctx context.Context, tx kv. } freezingCfg := cfg.blockReader.FreezingCfg() - if freezingCfg.Enabled { - if cfg.blockRetire.HasNewFrozenFiles() || cfg.agg.HasNewFrozenFiles() { - ac := cfg.agg.BeginFilesRo() - defer ac.Close() - aggFiles := ac.Files() - ac.Close() + if cfg.blockRetire.HasNewFrozenFiles() || cfg.agg.HasNewFrozenFiles() { + ac := cfg.agg.BeginFilesRo() + defer ac.Close() + aggFiles := ac.Files() + ac.Close() - if err := rawdb.WriteSnapshots(tx, cfg.blockReader.FrozenFiles(), aggFiles); err != nil { - return err - } + if err := rawdb.WriteSnapshots(tx, cfg.blockReader.FrozenFiles(), aggFiles); err != nil { + return err } + } - if freezingCfg.ProduceE2 { - //TODO: initialSync maybe save files progress here - - var minBlockNumber uint64 - - if cfg.snapshotUploader != nil { - minBlockNumber = cfg.snapshotUploader.minBlockNumber() - } + if freezingCfg.ProduceE2 { + //TODO: initialSync maybe save files progress here - if s.CurrentSyncCycle.IsInitialCycle { - cfg.blockRetire.SetWorkers(estimate.CompressSnapshot.Workers()) - } else { - cfg.blockRetire.SetWorkers(1) - } + var minBlockNumber uint64 - cfg.blockRetire.RetireBlocksInBackground(ctx, minBlockNumber, s.ForwardProgress, log.LvlDebug, func(downloadRequest []services.DownloadRequest) error { - if cfg.snapshotDownloader != nil && !reflect.ValueOf(cfg.snapshotDownloader).IsNil() { - if err := snapshotsync.RequestSnapshotsDownload(ctx, downloadRequest, cfg.snapshotDownloader); err != nil { - return err - } - } + if cfg.snapshotUploader != nil { + minBlockNumber = cfg.snapshotUploader.minBlockNumber() + } - return nil - }, func(l []string) error { - //if cfg.snapshotUploader != nil { - // TODO - we need to also remove files from the uploader (100k->500K transition) - //} + if s.CurrentSyncCycle.IsInitialCycle { + cfg.blockRetire.SetWorkers(estimate.CompressSnapshot.Workers()) + } else { + cfg.blockRetire.SetWorkers(1) + } - if !(cfg.snapshotDownloader == nil || reflect.ValueOf(cfg.snapshotDownloader).IsNil()) { - _, err := cfg.snapshotDownloader.Delete(ctx, &protodownloader.DeleteRequest{Paths: l}) + cfg.blockRetire.RetireBlocksInBackground(ctx, minBlockNumber, s.ForwardProgress, log.LvlDebug, func(downloadRequest []services.DownloadRequest) error { + if cfg.snapshotDownloader != nil && !reflect.ValueOf(cfg.snapshotDownloader).IsNil() { + if err := snapshotsync.RequestSnapshotsDownload(ctx, downloadRequest, cfg.snapshotDownloader); err != nil { return err } + } - return nil - }, func() error { - filesDeleted, err := pruneBlockSnapshots(ctx, cfg, logger) - if filesDeleted && cfg.notifier != nil { - cfg.notifier.Events.OnNewSnapshot() - } + return nil + }, func(l []string) error { + //if cfg.snapshotUploader != nil { + // TODO - we need to also remove files from the uploader (100k->500K transition) + //} + + if !(cfg.snapshotDownloader == nil || reflect.ValueOf(cfg.snapshotDownloader).IsNil()) { + _, err := cfg.snapshotDownloader.Delete(ctx, &protodownloader.DeleteRequest{Paths: l}) return err - }) + } - // cfg.agg.BuildFilesInBackground() + return nil + }, func() error { + filesDeleted, err := pruneBlockSnapshots(ctx, cfg, logger) + if filesDeleted && cfg.notifier != nil { + cfg.notifier.Events.OnNewSnapshot() + } + return err + }) - } + // cfg.agg.BuildFilesInBackground() - pruneLimit := 100 - if s.CurrentSyncCycle.IsInitialCycle { - pruneLimit = 10_000 - } - if _, err := cfg.blockRetire.PruneAncientBlocks(tx, pruneLimit); err != nil { - return err - } + } + + pruneLimit := 100 + if s.CurrentSyncCycle.IsInitialCycle { + pruneLimit = 10_000 + } + if _, err := cfg.blockRetire.PruneAncientBlocks(tx, pruneLimit); err != nil { + return err } if cfg.snapshotUploader != nil { @@ -636,19 +626,15 @@ func pruneBlockSnapshots(ctx context.Context, cfg SnapshotsCfg, logger log.Logge return false, err } // If we are behind the execution stage, we should not prune snapshots - if headNumber > executionProgress { + if headNumber > executionProgress || !cfg.prune.Blocks.Enabled() { return false, nil } // Keep at least 2 block snapshots as we do not want FrozenBlocks to be 0 - pruneAmount, _ := computeBlocksToPrune(cfg) - if pruneAmount == 0 { - return false, nil - } + pruneTo := cfg.prune.Blocks.PruneTo(headNumber) - minBlockNumberToKeep := uint64(0) - if headNumber > pruneAmount { - minBlockNumberToKeep = headNumber - pruneAmount + if pruneTo > executionProgress { + return false, nil } snapshotFileNames := cfg.blockReader.FrozenFiles() @@ -664,7 +650,7 @@ func pruneBlockSnapshots(ctx context.Context, cfg SnapshotsCfg, logger log.Logge if !ok { continue } - if info.To >= minBlockNumberToKeep { + if info.To >= pruneTo { continue } if info.To-info.From != snaptype.Erigon2MergeLimit { @@ -713,7 +699,7 @@ func (u *snapshotUploader) init(ctx context.Context, logger log.Logger) { if u.files == nil { freezingCfg := u.cfg.blockReader.FreezingCfg() - if freezingCfg.Enabled && freezingCfg.ProduceE2 { + if freezingCfg.ProduceE2 { u.files = map[string]*uploadState{} u.start(ctx, logger) } diff --git a/eth/stagedsync/stage_trie3.go b/eth/stagedsync/stage_trie3.go index 50d24fbc4c1..6009e6773bc 100644 --- a/eth/stagedsync/stage_trie3.go +++ b/eth/stagedsync/stage_trie3.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/hex" + "errors" "fmt" "sync/atomic" @@ -119,7 +120,7 @@ func collectAndComputeCommitment(ctx context.Context, tx kv.RwTx, tmpDir string, } logger.Info("Committing batch", "processed", fmt.Sprintf("%dM/%dM (%.2f%%)", processed.Load()/1_000_000, totalKeys.Load()/1_000_000, float64(processed.Load())/float64(totalKeys.Load())*100), - "intermediate root", fmt.Sprintf("%x", rh)) + "intermediate root", hex.EncodeToString(rh)) } processed.Add(1) sdCtx.TouchKey(kv.AccountsDomain, string(k), nil) @@ -235,7 +236,7 @@ func StageHashStateCfg(db kv.RwDB, dirs datadir.Dirs) HashStateCfg { } } -var ErrInvalidStateRootHash = fmt.Errorf("invalid state root hash") +var ErrInvalidStateRootHash = errors.New("invalid state root hash") func RebuildPatriciaTrieBasedOnFiles(rwTx kv.RwTx, cfg TrieCfg, ctx context.Context, logger log.Logger) (libcommon.Hash, error) { useExternalTx := rwTx != nil @@ -299,7 +300,7 @@ func RebuildPatriciaTrieBasedOnFiles(rwTx kv.RwTx, cfg TrieCfg, ctx context.Cont logger.Error(fmt.Sprintf("[RebuildCommitment] Wrong trie root of block %d: %x, expected (from header): %x. Block hash: %x", blockNum, rh, expectedRootHash, headerHash)) rwTx.Rollback() - return trie.EmptyRoot, fmt.Errorf("wrong trie root") + return trie.EmptyRoot, errors.New("wrong trie root") } logger.Info(fmt.Sprintf("[RebuildCommitment] Trie root of block %d txNum %d: %x. Could not verify with block hash because txnum of state is in the middle of the block.", blockNum, toTxNum, rh)) diff --git a/eth/stagedsync/stage_txlookup.go b/eth/stagedsync/stage_txlookup.go index 3e3c39f274d..60f35399041 100644 --- a/eth/stagedsync/stage_txlookup.go +++ b/eth/stagedsync/stage_txlookup.go @@ -76,12 +76,12 @@ func SpawnTxLookup(s *StageState, tx kv.RwTx, toBlock uint64, cfg TxLookupCfg, c } logPrefix := s.LogPrefix() endBlock, err := s.ExecutionAt(tx) - if s.BlockNumber > endBlock { // Erigon will self-heal (download missed blocks) eventually - return nil - } if err != nil { return err } + if s.BlockNumber > endBlock { // Erigon will self-heal (download missed blocks) eventually + return nil + } if toBlock > 0 { endBlock = min(endBlock, toBlock) } @@ -97,13 +97,11 @@ func SpawnTxLookup(s *StageState, tx kv.RwTx, toBlock uint64, cfg TxLookupCfg, c } } - if cfg.blockReader.FreezingCfg().Enabled { - if cfg.blockReader.FrozenBlocks() > startBlock { - // Snapshot .idx files already have TxLookup index - then no reason iterate over them here - startBlock = cfg.blockReader.FrozenBlocks() - if err = s.UpdatePrune(tx, startBlock); err != nil { // prune func of this stage will use this value to prevent all ancient blocks traversal - return err - } + if cfg.blockReader.FrozenBlocks() > startBlock { + // Snapshot .idx files already have TxLookup index - then no reason iterate over them here + startBlock = cfg.blockReader.FrozenBlocks() + if err = s.UpdatePrune(tx, startBlock); err != nil { // prune func of this stage will use this value to prevent all ancient blocks traversal + return err } } @@ -209,10 +207,9 @@ func UnwindTxLookup(u *UnwindState, s *StageState, tx kv.RwTx, cfg TxLookupCfg, // end key needs to be s.BlockNumber + 1 and not s.BlockNumber, because // the keys in BlockBody table always have hash after the block number blockFrom, blockTo := u.UnwindPoint+1, s.BlockNumber+1 - if cfg.blockReader.FreezingCfg().Enabled { - smallestInDB := cfg.blockReader.FrozenBlocks() - blockFrom, blockTo = max(blockFrom, smallestInDB), max(blockTo, smallestInDB) - } + smallestInDB := cfg.blockReader.FrozenBlocks() + blockFrom, blockTo = max(blockFrom, smallestInDB), max(blockTo, smallestInDB) + // etl.Transform uses ExtractEndKey as exclusive bound, therefore blockTo + 1 if err := deleteTxLookupRange(tx, s.LogPrefix(), blockFrom, blockTo+1, ctx, cfg, logger); err != nil { return fmt.Errorf("unwind TxLookUp: %w", err) @@ -243,15 +240,15 @@ func PruneTxLookup(s *PruneState, tx kv.RwTx, cfg TxLookupCfg, ctx context.Conte } defer tx.Rollback() } - blockFrom, blockTo := s.PruneProgress, uint64(0) + blockFrom := s.PruneProgress + var blockTo uint64 var pruneBor bool - // Forward stage doesn't write anything before PruneTo point if cfg.prune.History.Enabled() { blockTo = cfg.prune.History.PruneTo(s.ForwardProgress) pruneBor = true - } else if cfg.blockReader.FreezingCfg().Enabled { + } else { blockTo = cfg.blockReader.CanPruneTo(s.ForwardProgress) } // can't prune much here: because tx_lookup index has crypto-hashed-keys, and 1 block producing hundreds of deletes diff --git a/eth/stagedsync/stagedsynctest/harness.go b/eth/stagedsync/stagedsynctest/harness.go index 05f565d27b7..34f0bfa40f9 100644 --- a/eth/stagedsync/stagedsynctest/harness.go +++ b/eth/stagedsync/stagedsynctest/harness.go @@ -74,9 +74,8 @@ func InitHarness(ctx context.Context, t *testing.T, cfg HarnessCfg) Harness { blockReader, nil, // headerDownloader nil, // penalize - nil, // loopBreakCheck nil, // recent bor snapshots cached - nil, // signatures lru cache + nil, // signatures false, nil, ) @@ -508,6 +507,7 @@ func (h *Harness) consensusEngine(t *testing.T, cfg HarnessCfg) consensus.Engine genesisContracts, h.logger, nil, + nil, ) borConsensusEng.Authorize(h.validatorAddress, func(_ libcommon.Address, _ string, msg []byte) ([]byte, error) { diff --git a/eth/stagedsync/stages/stages.go b/eth/stagedsync/stages/stages.go index 8cc732677e2..a6d70c5b8b2 100644 --- a/eth/stagedsync/stages/stages.go +++ b/eth/stagedsync/stages/stages.go @@ -29,26 +29,20 @@ import ( type SyncStage string var ( - Snapshots SyncStage = "Snapshots" // Snapshots - Headers SyncStage = "Headers" // Headers are downloaded, their Proof-Of-Work validity and chaining is verified - BorHeimdall SyncStage = "BorHeimdall" // Downloading data from heimdall corresponding to the downloaded headers (validator sets and sync events) - PolygonSync SyncStage = "PolygonSync" // Use polygon sync component to sync headers, bodies and heimdall data - CumulativeIndex SyncStage = "CumulativeIndex" // Calculate how much gas has been used up to each block. - BlockHashes SyncStage = "BlockHashes" // Headers Number are written, fills blockHash => number bucket - Bodies SyncStage = "Bodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified - Senders SyncStage = "Senders" // "From" recovered from signatures, bodies re-written - Execution SyncStage = "Execution" // Executing each block w/o buildinf a trie - CustomTrace SyncStage = "CustomTrace" // Executing each block w/o buildinf a trie - Translation SyncStage = "Translation" // Translation each marked for translation contract (from EVM to TEVM) - VerkleTrie SyncStage = "VerkleTrie" - IntermediateHashes SyncStage = "IntermediateHashes" // Generate intermediate hashes, calculate the state root hash - HashState SyncStage = "HashState" // Apply Keccak256 to all the keys in the state - AccountHistoryIndex SyncStage = "AccountHistoryIndex" // Generating history index for accounts - StorageHistoryIndex SyncStage = "StorageHistoryIndex" // Generating history index for storage - LogIndex SyncStage = "LogIndex" // Generating logs index (from receipts) - CallTraces SyncStage = "CallTraces" // Generating call traces index - TxLookup SyncStage = "TxLookup" // Generating transactions lookup index - Finish SyncStage = "Finish" // Nominal stage after all other stages + Snapshots SyncStage = "OtterSync" // Snapshots + Headers SyncStage = "Headers" // Headers are downloaded, their Proof-Of-Work validity and chaining is verified + BorHeimdall SyncStage = "BorHeimdall" // Downloading data from heimdall corresponding to the downloaded headers (validator sets and sync events) + PolygonSync SyncStage = "PolygonSync" // Use polygon sync component to sync headers, bodies and heimdall data + CumulativeIndex SyncStage = "CumulativeIndex" // Calculate how much gas has been used up to each block. + BlockHashes SyncStage = "BlockHashes" // Headers Number are written, fills blockHash => number bucket + Bodies SyncStage = "Bodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified + Senders SyncStage = "Senders" // "From" recovered from signatures, bodies re-written + Execution SyncStage = "Execution" // Executing each block w/o building a trie + CustomTrace SyncStage = "CustomTrace" // Executing each block w/o building a trie + Translation SyncStage = "Translation" // Translation each marked for translation contract (from EVM to TEVM) + VerkleTrie SyncStage = "VerkleTrie" + TxLookup SyncStage = "TxLookup" // Generating transactions lookup index + Finish SyncStage = "Finish" // Nominal stage after all other stages MiningCreateBlock SyncStage = "MiningCreateBlock" MiningBorHeimdall SyncStage = "MiningBorHeimdall" @@ -73,12 +67,6 @@ var AllStages = []SyncStage{ Execution, CustomTrace, Translation, - HashState, - IntermediateHashes, - AccountHistoryIndex, - StorageHistoryIndex, - LogIndex, - CallTraces, TxLookup, Finish, } diff --git a/eth/stagedsync/sync_test.go b/eth/stagedsync/sync_test.go index 350a1d021ce..ed9c8db253a 100644 --- a/eth/stagedsync/sync_test.go +++ b/eth/stagedsync/sync_test.go @@ -206,23 +206,8 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { return u.Done(txc.Tx) }, }, - { - ID: stages.IntermediateHashes, - Disabled: true, - Forward: func(badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { - flow = append(flow, stages.IntermediateHashes) - if s.BlockNumber == 0 { - return s.Update(txc.Tx, 2000) - } - return nil - }, - Unwind: func(u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { - flow = append(flow, unwindOf(stages.IntermediateHashes)) - return u.Done(txc.Tx) - }, - }, } - state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */, false) assert.NoError(t, err) @@ -299,23 +284,8 @@ func TestUnwind(t *testing.T) { return u.Done(txc.Tx) }, }, - { - ID: stages.IntermediateHashes, - Disabled: true, - Forward: func(badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { - flow = append(flow, stages.IntermediateHashes) - if s.BlockNumber == 0 { - return s.Update(txc.Tx, 2000) - } - return nil - }, - Unwind: func(u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error { - flow = append(flow, unwindOf(stages.IntermediateHashes)) - return u.Done(txc.Tx) - }, - }, } - state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) + state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New()) db, tx := memdb.NewTestTx(t) _, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */, false) assert.NoError(t, err) @@ -342,7 +312,7 @@ func TestUnwind(t *testing.T) { //check that at unwind disabled stage not appear flow = flow[:0] - state.unwindOrder = []*Stage{s[3], s[2], s[1], s[0]} + state.unwindOrder = []*Stage{s[2], s[1], s[0]} _ = state.UnwindTo(100, UnwindReason{}, tx) _, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */, false) assert.NoError(t, err) diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index 9a588a9038c..f32a422d138 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -155,7 +155,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) { dbTx, err := m.DB.BeginRw(m.Ctx) require.NoError(t, err) defer dbTx.Rollback() - statedb, err := tests.MakePreState(rules, dbTx, test.Genesis.Alloc, uint64(test.Context.Number), m.HistoryV3) + statedb, err := tests.MakePreState(rules, dbTx, test.Genesis.Alloc, uint64(test.Context.Number)) require.NoError(t, err) tracer, err := tracers.New(tracerName, new(tracers.Context), test.TracerConfig) if err != nil { @@ -263,7 +263,7 @@ func benchTracer(b *testing.B, tracerName string, test *callTracerTest) { dbTx, err := m.DB.BeginRw(m.Ctx) require.NoError(b, err) defer dbTx.Rollback() - statedb, _ := tests.MakePreState(rules, dbTx, test.Genesis.Alloc, uint64(test.Context.Number), m.HistoryV3) + statedb, _ := tests.MakePreState(rules, dbTx, test.Genesis.Alloc, uint64(test.Context.Number)) b.ReportAllocs() b.ResetTimer() @@ -340,7 +340,7 @@ func TestZeroValueToNotExitCall(t *testing.T) { require.NoError(t, err) defer dbTx.Rollback() - statedb, _ := tests.MakePreState(rules, dbTx, alloc, context.BlockNumber, m.HistoryV3) + statedb, _ := tests.MakePreState(rules, dbTx, alloc, context.BlockNumber) // Create the tracer, the EVM environment and run it tracer, err := tracers.New("callTracer", nil, nil) if err != nil { diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go index 2a4a5852ee6..09cae7c8768 100644 --- a/eth/tracers/internal/tracetest/prestate_test.go +++ b/eth/tracers/internal/tracetest/prestate_test.go @@ -119,7 +119,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) { dbTx, err := m.DB.BeginRw(m.Ctx) require.NoError(t, err) defer dbTx.Rollback() - statedb, err := tests.MakePreState(rules, dbTx, test.Genesis.Alloc, context.BlockNumber, m.HistoryV3) + statedb, err := tests.MakePreState(rules, dbTx, test.Genesis.Alloc, context.BlockNumber) require.NoError(t, err) tracer, err := tracers.New(tracerName, new(tracers.Context), test.TracerConfig) if err != nil { diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go index c7b77500d82..7c532f43424 100644 --- a/eth/tracers/js/goja.go +++ b/eth/tracers/js/goja.go @@ -92,7 +92,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b b := obj.Get("buffer").Export().(goja.ArrayBuffer).Bytes() return b, nil } - return nil, fmt.Errorf("invalid buffer type") + return nil, errors.New("invalid buffer type") } // jsTracer is an implementation of the Tracer interface which evaluates diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index 8eb7ddb457f..e3633cfdad0 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -283,14 +283,14 @@ func FormatLogs(logs []StructLog) []StructLogRes { if trace.Stack != nil { stack := make([]string, len(trace.Stack)) for i, stackValue := range trace.Stack { - stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) + stack[i] = hex.EncodeToString(math.PaddedBigBytes(stackValue, 32)) } formatted[index].Stack = &stack } if trace.Memory != nil { memory := make([]string, 0, (len(trace.Memory)+31)/32) for i := 0; i+32 <= len(trace.Memory); i += 32 { - memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) + memory = append(memory, hex.EncodeToString(trace.Memory[i:i+32])) } formatted[index].Memory = &memory } diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index b3ac179a093..81699575179 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -185,6 +185,10 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco if t.config.OnlyTopCall && depth > 0 { return } + // on-error `stackData[stackSize-2]` will contain error data instead of logs. + if err != nil { + return + } // Skip if tracing was interrupted if atomic.LoadUint32(&t.interrupt) > 0 { return diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index b38e7b22787..316941b1dc0 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -106,7 +106,7 @@ func TestPrestateTracerCreate2(t *testing.T) { require.NoError(t, err) defer tx.Rollback() rules := params.AllProtocolChanges.Rules(context.BlockNumber, context.Time) - statedb, _ := tests.MakePreState(rules, tx, alloc, context.BlockNumber, m.HistoryV3) + statedb, _ := tests.MakePreState(rules, tx, alloc, context.BlockNumber) // Create the tracer, the EVM environment and run it tracer, err := tracers.New("prestateTracer", new(tracers.Context), json.RawMessage("{}")) diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index b9d40d91831..52d363f4718 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -19,7 +19,6 @@ package privateapi import ( "context" "errors" - "fmt" "google.golang.org/protobuf/types/known/emptypb" @@ -272,7 +271,7 @@ func (s *EthBackendServer) SubscribeLogs(server remote.ETHBACKEND_SubscribeLogsS if s.logsFilter != nil { return s.logsFilter.subscribeLogs(server) } - return fmt.Errorf("no logs filter available") + return errors.New("no logs filter available") } func (s *EthBackendServer) BorEvent(ctx context.Context, req *remote.BorEventRequest) (*remote.BorEventReply, error) { diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index a042435b40a..55da813f6e6 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -175,7 +175,7 @@ func (s *Service) Stop() error { // until termination. func (s *Service) loop() { // Resolve the URL, defaulting to TLS, but falling back to none too - path := fmt.Sprintf("%s/api", s.host) + path := s.host + "/api" urls := []string{path} // url.Parse and url.IsAbs is unsuitable (https://github.com/golang/go/issues/19779) @@ -399,7 +399,7 @@ func (s *Service) login(conn *connWrapper) error { Name: s.node, Node: nodeName, Port: 0, - Network: fmt.Sprintf("%d", s.networkid), + Network: strconv.FormatUint(s.networkid, 10), Protocol: strings.Join(protocols, ", "), API: "No", Os: runtime.GOOS, diff --git a/event/feed_test.go b/event/feed_test.go index 7eba94a0a4d..c5b1147d482 100644 --- a/event/feed_test.go +++ b/event/feed_test.go @@ -20,6 +20,7 @@ package event import ( + "errors" "fmt" "reflect" "sync" @@ -71,7 +72,7 @@ func checkPanic(want error, fn func()) (err error) { defer func() { panic := recover() if panic == nil { - err = fmt.Errorf("didn't panic") + err = errors.New("didn't panic") } else if !reflect.DeepEqual(panic, want) { err = fmt.Errorf("panicked with wrong error: got %q, want %q", panic, want) } diff --git a/go.mod b/go.mod index 9e691cb19ad..ac3acf70750 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/erigontech/erigon -go 1.22 - -toolchain go1.22.2 +go 1.21.5 require ( github.com/erigontech/erigonwatch v0.0.0-20240718131902-b6576bde1116 @@ -35,8 +33,7 @@ require ( github.com/crate-crypto/go-kzg-4844 v0.7.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.8.0 - github.com/deckarep/golang-set/v2 v2.6.0 - github.com/docker/docker v26.1.0+incompatible + github.com/deckarep/golang-set/v2 v2.5.0 github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 github.com/edsrzf/mmap-go v1.1.0 github.com/emicklei/dot v1.6.2 @@ -44,18 +41,16 @@ require ( github.com/ethereum/go-ethereum v1.13.5 github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b - github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa github.com/go-chi/chi/v5 v5.1.0 github.com/go-chi/cors v1.2.1 github.com/go-echarts/go-echarts/v2 v2.3.3 github.com/goccy/go-json v0.10.2 - github.com/gofrs/flock v0.11.0 + github.com/gofrs/flock v0.12.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/btree v1.1.2 github.com/google/cel-go v0.18.2 github.com/google/gofuzz v1.2.0 - github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/hashicorp/golang-lru/arc/v2 v2.0.7 @@ -67,9 +62,8 @@ require ( github.com/jedib0t/go-pretty/v6 v6.5.9 github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 - github.com/karrick/godirwalk v1.17.0 github.com/klauspost/compress v1.17.8 - github.com/libp2p/go-libp2p v0.35.0 + github.com/libp2p/go-libp2p v0.34.0 github.com/libp2p/go-libp2p-mplex v0.9.0 github.com/libp2p/go-libp2p-pubsub v0.11.0 github.com/maticnetwork/crand v1.0.2 @@ -123,12 +117,14 @@ require ( require ( github.com/cosmos/gogoproto v1.4.1 // indirect github.com/d4l3k/messagediff v1.2.1 // indirect + github.com/erigontech/erigon-snapshot v1.3.1-0.20240801141542-7a7b08ebd406 // indirect github.com/erigontech/speedtest v0.0.2 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/golang/protobuf v1.5.4 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gtank/merlin v0.1.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect @@ -189,7 +185,6 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/elastic/gosigar v0.14.2 // indirect - github.com/erigontech/erigon-snapshot v1.3.1-0.20240720122906-e073fcdeca33 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect @@ -303,9 +298,7 @@ require ( golang.org/x/mod v0.19.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/tools v0.23.0 // indirect - gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gotest.tools/v3 v3.5.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect modernc.org/libc v1.50.9 // indirect @@ -318,9 +311,9 @@ require ( ) replace ( - github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-32 + github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-33 github.com/cometbft/cometbft => github.com/bnb-chain/greenfield-cometbft v1.3.1 - github.com/erigontech/erigon-snapshot => github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240805061542-ddf011a69761 + github.com/erigontech/erigon-snapshot => github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240807061100-d5a04db04e7a github.com/gballet/go-verkle => github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35 github.com/holiman/bloomfilter/v2 => github.com/AskAlexSharov/bloomfilter/v2 v2.0.8 github.com/tendermint/tendermint => github.com/bnb-chain/tendermint v0.31.16 diff --git a/go.sum b/go.sum index 62ec310a964..e2b25d7522d 100644 --- a/go.sum +++ b/go.sum @@ -259,8 +259,8 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= -github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= -github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/deckarep/golang-set/v2 v2.5.0 h1:hn6cEZtQ0h3J8kFrHR/NrzyOoTnjgW1+FmNJzQ7y/sA= +github.com/deckarep/golang-set/v2 v2.5.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= @@ -277,8 +277,6 @@ github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8 github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/docker v26.1.0+incompatible h1:W1G9MPNbskA6VZWL7b3ZljTh0pXI68FpINx0GKaOdaM= -github.com/docker/docker v26.1.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -318,8 +316,8 @@ github.com/erigontech/silkworm-go v0.18.0 h1:j56p61xZHBFhZGH1OixlGU8KcfjHzcw9pjA github.com/erigontech/silkworm-go v0.18.0/go.mod h1:O50ux0apICEVEGyRWiE488K8qz8lc3PA/SXbQQAc8SU= github.com/erigontech/speedtest v0.0.2 h1:W9Cvky/8AMUtUONwkLA/dZjeQ2XfkBdYfJzvhMZUO+U= github.com/erigontech/speedtest v0.0.2/go.mod h1:vulsRNiM51BmSTbVtch4FWxKxx53pS2D35lZTtao0bw= -github.com/erigontech/torrent v1.54.2-alpha-32 h1:Ly8W2JvD7r1o5TklXxKEV9D9Tr664tSrgj5OPpOrlWg= -github.com/erigontech/torrent v1.54.2-alpha-32/go.mod h1:QtK2WLdEz1Iy1Dh/325UltdHU0nA1xujh2rN6aov6y0= +github.com/erigontech/torrent v1.54.2-alpha-33 h1:CzTALQ+M4iYDirlP6rUFunxwz1MJXVoLOStmPyDP1ok= +github.com/erigontech/torrent v1.54.2-alpha-33/go.mod h1:QtK2WLdEz1Iy1Dh/325UltdHU0nA1xujh2rN6aov6y0= github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= @@ -345,8 +343,6 @@ github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c h1:uYNKzPntb8c6DKvP9E github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35 h1:I8QswD9gf3VEpr7bpepKKOm7ChxFITIG+oc1I5/S0no= github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35/go.mod h1:DMDd04jjQgdynaAwbEgiRERIGpC8fDjx0+y06an7Psg= -github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa h1:b6fBm4SLM8jywQHNmc3ZCl6zQEhEyZl6bp7is4en72M= -github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa/go.mod h1:K0FMPjMrIaS1+/SeZeOVkGVjDVERZJW53inQL00FjLE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= @@ -402,8 +398,8 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.11.0 h1:AGFQxrpWd8ezw60AvLWIPbxMydNfF8564pwH3FCty0g= -github.com/gofrs/flock v0.11.0/go.mod h1:FirDy1Ing0mI2+kB6wk+vyyAH+e6xiE+EYA0jnzV9jc= +github.com/gofrs/flock v0.12.0 h1:xHW8t8GPAiGtqz7KxiSqfOEXwpOaqhpYZrTE2MQBgXY= +github.com/gofrs/flock v0.12.0/go.mod h1:FirDy1Ing0mI2+kB6wk+vyyAH+e6xiE+EYA0jnzV9jc= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -586,8 +582,6 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= -github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -621,8 +615,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc= -github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.34.0 h1:J+SL3DMz+zPz06OHSRt42GKA5n5hmwgY1l7ckLUz3+c= +github.com/libp2p/go-libp2p v0.34.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-mplex v0.9.0 h1:R58pDRAmuBXkYugbSSXR9wrTX3+1pFM1xP2bLuodIq8= @@ -734,8 +728,8 @@ github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdh github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240805061542-ddf011a69761 h1:Oz/Pa4dMUhGI3iAt47nrz55p6srdCOLwiWe7mGJVhbY= -github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240805061542-ddf011a69761/go.mod h1:ooHlCl+eEYzebiPu+FP6Q6SpPUeMADn8Jxabv3IKb9M= +github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240807061100-d5a04db04e7a h1:UtunjNDwMvR6ySEC7uGYMPLVm5YkQt/9w/qaNlhz3y4= +github.com/node-real/bsc-erigon-snapshot v1.0.1-0.20240807061100-d5a04db04e7a/go.mod h1:ooHlCl+eEYzebiPu+FP6Q6SpPUeMADn8Jxabv3IKb9M= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1078,7 +1072,6 @@ golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4 golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= @@ -1123,7 +1116,6 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1175,7 +1167,6 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= @@ -1279,7 +1270,6 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -1294,7 +1284,6 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= @@ -1498,8 +1487,6 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= -gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1524,8 +1511,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/reexec/reexec.go b/internal/reexec/reexec.go new file mode 100644 index 00000000000..af8d347986b --- /dev/null +++ b/internal/reexec/reexec.go @@ -0,0 +1,35 @@ +// This file originates from Docker/Moby, +// https://github.com/moby/moby/blob/master/pkg/reexec/reexec.go +// Licensed under Apache License 2.0: https://github.com/moby/moby/blob/master/LICENSE +// Copyright 2013-2018 Docker, Inc. +// +// Package reexec facilitates the busybox style reexec of the docker binary that +// we require because of the forking limitations of using Go. Handlers can be +// registered with a name and the argv 0 of the exec of the binary will be used +// to find and execute custom init paths. +package reexec + +import ( + "fmt" + "os" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + if initializer, ok := registeredInitializers[os.Args[0]]; ok { + initializer() + return true + } + return false +} diff --git a/internal/reexec/self_linux.go b/internal/reexec/self_linux.go new file mode 100644 index 00000000000..956d09326a2 --- /dev/null +++ b/internal/reexec/self_linux.go @@ -0,0 +1,14 @@ +// This file originates from Docker/Moby, +// https://github.com/moby/moby/blob/master/pkg/reexec/ +// Licensed under Apache License 2.0: https://github.com/moby/moby/blob/master/LICENSE +// Copyright 2013-2018 Docker, Inc. + +//go:build linux + +package reexec + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} diff --git a/internal/reexec/self_others.go b/internal/reexec/self_others.go new file mode 100644 index 00000000000..a9f502ca87e --- /dev/null +++ b/internal/reexec/self_others.go @@ -0,0 +1,32 @@ +// This file originates from Docker/Moby, +// https://github.com/moby/moby/blob/master/pkg/reexec/ +// Licensed under Apache License 2.0: https://github.com/moby/moby/blob/master/LICENSE +// Copyright 2013-2018 Docker, Inc. + +//go:build !linux + +package reexec + +import ( + "os" + "os/exec" + "path/filepath" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/migrations/migrations.go b/migrations/migrations.go index 71b2309eb66..b8ad93ac738 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "path/filepath" @@ -65,9 +66,9 @@ type Migration struct { } var ( - ErrMigrationNonUniqueName = fmt.Errorf("please provide unique migration name") - ErrMigrationCommitNotCalled = fmt.Errorf("migration before-commit function was not called") - ErrMigrationETLFilesDeleted = fmt.Errorf( + ErrMigrationNonUniqueName = errors.New("please provide unique migration name") + ErrMigrationCommitNotCalled = errors.New("migration before-commit function was not called") + ErrMigrationETLFilesDeleted = errors.New( "db migration progress was interrupted after extraction step and ETL files was deleted, please contact development team for help or re-sync from scratch", ) ) diff --git a/node/node.go b/node/node.go index 186ca35cfbd..728c9e9269d 100644 --- a/node/node.go +++ b/node/node.go @@ -305,7 +305,7 @@ func OpenDatabase(ctx context.Context, config *nodecfg.Config, label kv.Label, n name = "txpool" case kv.ConsensusDB: if len(name) == 0 { - return nil, fmt.Errorf("expected a consensus name") + return nil, errors.New("expected a consensus name") } case kv.BlobDb: name = "blob" diff --git a/node/rpcstack.go b/node/rpcstack.go index 2e0b9898118..e45c5fcf19c 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -22,6 +22,7 @@ package node import ( "compress/gzip" "context" + "errors" "fmt" "io" "net" @@ -265,7 +266,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig, allowList rpc. defer h.mu.Unlock() if h.rpcAllowed() { - return fmt.Errorf("JSON-RPC over HTTP is already enabled") + return errors.New("JSON-RPC over HTTP is already enabled") } // Create RPC server and handler. @@ -298,7 +299,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig, allowList rpc.All defer h.mu.Unlock() if h.wsAllowed() { - return fmt.Errorf("JSON-RPC over WebSocket is already enabled") + return errors.New("JSON-RPC over WebSocket is already enabled") } // Create RPC server and handler. diff --git a/p2p/discover/lookup_util_test.go b/p2p/discover/lookup_util_test.go index a6f19b726b8..cad8782f8f2 100644 --- a/p2p/discover/lookup_util_test.go +++ b/p2p/discover/lookup_util_test.go @@ -25,6 +25,7 @@ import ( "crypto/ecdsa" "fmt" "net" + "slices" "sort" "testing" @@ -149,7 +150,7 @@ func (tn *preminedTestnet) neighborsAtDistances(base *enode.Node, distances []ui for i := range lookupTestnet.dists[d] { n := lookupTestnet.node(d, i) d := enode.LogDist(base.ID(), n.ID()) - if containsUint(uint(d), distances) { + if slices.Contains(distances, uint(d)) { result = append(result, n) if len(result) >= elems { return result diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index 1bcba52c113..544ec0ef936 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -448,7 +448,7 @@ func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) { return nil, err } if respN.ID() != n.ID() { - return nil, fmt.Errorf("invalid ID in response record") + return nil, errors.New("invalid ID in response record") } if respN.Seq() < n.Seq() { return n, nil // response record is older diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index d28120b5303..0ed36b0a3e7 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -29,6 +29,7 @@ import ( "io" "math" "net" + "slices" "sync" "time" @@ -435,26 +436,17 @@ func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, s } if distances != nil { nd := enode.LogDist(c.node.ID(), node.ID()) - if !containsUint(uint(nd), distances) { + if !slices.Contains(distances, uint(nd)) { return nil, errors.New("does not match any requested distance") } } if _, ok := seen[node.ID()]; ok { - return nil, fmt.Errorf("duplicate record") + return nil, errors.New("duplicate record") } seen[node.ID()] = struct{}{} return node, nil } -func containsUint(x uint, xs []uint) bool { - for _, v := range xs { - if x == v { - return true - } - } - return false -} - // call sends the given call and sets up a handler for response packets (of message type // responseType). Responses are dispatched to the call's response channel. func (t *UDPv5) call(node *enode.Node, responseType byte, packet v5wire.Packet) *callV5 { @@ -701,11 +693,11 @@ func (t *UDPv5) handleCallResponse(fromID enode.ID, fromAddr *net.UDPAddr, p v5w return false } if !fromAddr.IP.Equal(ac.node.IP()) || fromAddr.Port != ac.node.UDP() { - t.log.Trace(fmt.Sprintf("%s from wrong endpoint", p.Name()), "id", fromID, "addr", fromAddr) + t.log.Trace(p.Name()+" from wrong endpoint", "id", fromID, "addr", fromAddr) return false } if p.Kind() != ac.responseType { - t.log.Trace(fmt.Sprintf("Wrong discv5 response type %s", p.Name()), "id", fromID, "addr", fromAddr) + t.log.Trace("Wrong discv5 response type "+p.Name(), "id", fromID, "addr", fromAddr) return false } t.startResponseTimeout(ac) diff --git a/p2p/discover/v5wire/encoding.go b/p2p/discover/v5wire/encoding.go index 4b4ed628a97..d813ca2a31e 100644 --- a/p2p/discover/v5wire/encoding.go +++ b/p2p/discover/v5wire/encoding.go @@ -355,11 +355,11 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey // key is part of the ID nonce signature. var remotePubkey = new(ecdsa.PublicKey) if err := challenge.Node.Load((*enode.Secp256k1)(remotePubkey)); err != nil { - return nil, nil, fmt.Errorf("can't find secp256k1 key for recipient") + return nil, nil, errors.New("can't find secp256k1 key for recipient") } ephkey, err := c.sc.ephemeralKeyGen() if err != nil { - return nil, nil, fmt.Errorf("can't generate ephemeral key") + return nil, nil, errors.New("can't generate ephemeral key") } ephpubkey := EncodePubkey(&ephkey.PublicKey) auth.pubkey = ephpubkey @@ -383,7 +383,7 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey // Create session keys. sec := deriveKeys(sha256.New, ephkey, remotePubkey, c.localnode.ID(), challenge.Node.ID(), cdata) if sec == nil { - return nil, nil, fmt.Errorf("key derivation failed") + return nil, nil, errors.New("key derivation failed") } return auth, sec, err } diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index 850f5004532..aa1452f9e6a 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -200,7 +200,7 @@ func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry, func (c *Client) doResolveEntry(ctx context.Context, domain, hash string) (entry, error) { wantHash, err := b32format.DecodeString(hash) if err != nil { - return nil, fmt.Errorf("invalid base32 hash") + return nil, errors.New("invalid base32 hash") } name := hash + "." + domain txts, err := c.cfg.Resolver.LookupTXT(ctx, hash+"."+domain) diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go index 64105a172e8..c948d25da5b 100644 --- a/p2p/dnsdisc/tree.go +++ b/p2p/dnsdisc/tree.go @@ -24,6 +24,7 @@ import ( "crypto/ecdsa" "encoding/base32" "encoding/base64" + "errors" "fmt" "io" "slices" @@ -311,7 +312,7 @@ func parseLinkEntry(e string) (entry, error) { func parseLink(e string) (*linkEntry, error) { if !strings.HasPrefix(e, linkPrefix) { - return nil, fmt.Errorf("wrong/missing scheme 'enrtree' in URL") + return nil, errors.New("wrong/missing scheme 'enrtree' in URL") } e = e[len(linkPrefix):] keystring, domain, ok := strings.Cut(e, "@") diff --git a/p2p/enode/idscheme.go b/p2p/enode/idscheme.go index f0a3b8f6365..8f69df871d0 100644 --- a/p2p/enode/idscheme.go +++ b/p2p/enode/idscheme.go @@ -21,7 +21,7 @@ package enode import ( "crypto/ecdsa" - "fmt" + "errors" "io" "github.com/erigontech/erigon/crypto" @@ -69,7 +69,7 @@ func (V4ID) Verify(r *enr.Record, sig []byte) error { if err := r.Load(&entry); err != nil { return err } else if len(entry) != 33 { - return fmt.Errorf("invalid public key") + return errors.New("invalid public key") } h := sha3.NewLegacyKeccak256() diff --git a/p2p/nat/natpmp.go b/p2p/nat/natpmp.go index 4d29a5ddc93..b6239268fc4 100644 --- a/p2p/nat/natpmp.go +++ b/p2p/nat/natpmp.go @@ -20,6 +20,7 @@ package nat import ( + "errors" "fmt" "net" "strings" @@ -51,7 +52,7 @@ func (n *pmp) ExternalIP() (net.IP, error) { func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error { if lifetime <= 0 { - return fmt.Errorf("lifetime must not be <= 0") + return errors.New("lifetime must not be <= 0") } // Note order of port arguments is switched between our // AddMapping and the client's AddPortMapping. diff --git a/p2p/node_key_config.go b/p2p/node_key_config.go index acbd401c7fa..9d6190973de 100644 --- a/p2p/node_key_config.go +++ b/p2p/node_key_config.go @@ -18,6 +18,7 @@ package p2p import ( "crypto/ecdsa" + "errors" "fmt" "os" "path" @@ -87,7 +88,7 @@ func (config NodeKeyConfig) LoadOrGenerateAndSave(keyfile string) (*ecdsa.Privat func (config NodeKeyConfig) LoadOrParseOrGenerateAndSave(file, hex, datadir string) (*ecdsa.PrivateKey, error) { switch { case file != "" && hex != "": - return nil, fmt.Errorf("P2P node key is set as both file and hex string - these options are mutually exclusive") + return nil, errors.New("P2P node key is set as both file and hex string - these options are mutually exclusive") case file != "": return config.load(file) case hex != "": diff --git a/p2p/sentry/sentry_grpc_server_test.go b/p2p/sentry/sentry_grpc_server_test.go index 079301d1a92..03fdd4ddaf5 100644 --- a/p2p/sentry/sentry_grpc_server_test.go +++ b/p2p/sentry/sentry_grpc_server_test.go @@ -105,8 +105,8 @@ func testForkIDSplit(t *testing.T, protocol uint) { gspecNoFork = &types.Genesis{Config: configNoFork} gspecProFork = &types.Genesis{Config: configProFork} - genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root()) - genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, "", log.Root()) + genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, datadir.New(t.TempDir()), log.Root()) + genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, datadir.New(t.TempDir()), log.Root()) ) var s1, s2 *GrpcServer @@ -194,7 +194,7 @@ func TestSentryServerImpl_SetStatusInitPanic(t *testing.T) { configNoFork := &chain.Config{HomesteadBlock: big.NewInt(1), ChainID: big.NewInt(1)} dbNoFork, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) gspecNoFork := &types.Genesis{Config: configNoFork} - genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root()) + genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, datadir.New(t.TempDir()), log.Root()) ss := &GrpcServer{p2p: &p2p.Config{}} _, err := ss.SetStatus(context.Background(), &proto_sentry.StatusData{ diff --git a/p2p/sentry/sentry_multi_client/sentry_multi_client.go b/p2p/sentry/sentry_multi_client/sentry_multi_client.go index 701092f8012..94d5f4566eb 100644 --- a/p2p/sentry/sentry_multi_client/sentry_multi_client.go +++ b/p2p/sentry/sentry_multi_client/sentry_multi_client.go @@ -29,7 +29,6 @@ import ( "time" "github.com/c2h5oh/datasize" - lru "github.com/hashicorp/golang-lru/v2" "golang.org/x/sync/semaphore" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -38,7 +37,6 @@ import ( "google.golang.org/protobuf/types/known/emptypb" "github.com/erigontech/erigon-lib/chain" - "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/dbg" "github.com/erigontech/erigon-lib/direct" "github.com/erigontech/erigon-lib/gointerfaces/grpcutil" @@ -355,14 +353,6 @@ func NewMultiClient( blockReader.WithSidecars(parlia.BlobStore) } - receiptsCacheLimit := 32 - receiptsCache, err := lru.New[common.Hash, []*types.Receipt](receiptsCacheLimit) - if err != nil { - return nil, err - } - - receiptsGenerator := receipts.NewGenerator(receiptsCache, blockReader, engine) - cs := &MultiClient{ Hd: hd, Bd: bd, @@ -378,7 +368,7 @@ func NewMultiClient( disableBlockDownload: disableBlockDownload, logger: logger, getReceiptsActiveGoroutineNumber: semaphore.NewWeighted(1), - ethApiWrapper: receiptsGenerator, + ethApiWrapper: receipts.NewGenerator(32, blockReader, engine), } return cs, nil diff --git a/p2p/sentry/simulator/sentry_simulator.go b/p2p/sentry/simulator/sentry_simulator.go index ca6947050e0..19e381b157e 100644 --- a/p2p/sentry/simulator/sentry_simulator.go +++ b/p2p/sentry/simulator/sentry_simulator.go @@ -19,6 +19,7 @@ package simulator import ( "bytes" "context" + "errors" "fmt" "path/filepath" @@ -85,7 +86,6 @@ func NewSentry(ctx context.Context, chain string, snapshotLocation string, peerC torrentDir := filepath.Join(snapshotLocation, "torrents", chain) knownSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ - Enabled: true, ProduceE2: false, NoDownloader: true, }, "", 0, logger) @@ -100,7 +100,6 @@ func NewSentry(ctx context.Context, chain string, snapshotLocation string, peerC //s.knownSnapshots.ReopenList([]string{ent2.Name()}, false) activeSnapshots := freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{ - Enabled: true, ProduceE2: false, NoDownloader: true, }, torrentDir, 0, logger) @@ -142,7 +141,7 @@ func (s *server) Close() { } func (s *server) NodeInfo(context.Context, *emptypb.Empty) (*types.NodeInfoReply, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (s *server) PeerById(ctx context.Context, in *isentry.PeerByIdRequest) (*isentry.PeerByIdReply, error) { @@ -151,7 +150,7 @@ func (s *server) PeerById(ctx context.Context, in *isentry.PeerByIdRequest) (*is peer, ok := s.peers[peerId] if !ok { - return nil, fmt.Errorf("unknown peer") + return nil, errors.New("unknown peer") } info := peer.Info() @@ -177,11 +176,11 @@ func (s *server) PeerCount(context.Context, *isentry.PeerCountRequest) (*isentry } func (s *server) PeerEvents(*isentry.PeerEventsRequest, isentry.Sentry_PeerEventsServer) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (s *server) PeerMinBlock(context.Context, *isentry.PeerMinBlockRequest) (*emptypb.Empty, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (s *server) Peers(context.Context, *emptypb.Empty) (*isentry.PeersReply, error) { @@ -224,7 +223,7 @@ func (s *server) sendMessageById(ctx context.Context, peerId [64]byte, messageDa peer, ok := s.peers[peerId] if !ok { - return fmt.Errorf("unknown peer") + return errors.New("unknown peer") } switch messageData.Id { diff --git a/p2p/sentry/simulator/simulator_test.go b/p2p/sentry/simulator/simulator_test.go index 53b85407b14..5517c54398e 100644 --- a/p2p/sentry/simulator/simulator_test.go +++ b/p2p/sentry/simulator/simulator_test.go @@ -43,7 +43,7 @@ func TestSimulatorStart(t *testing.T) { // logger.SetHandler(log.StdoutHandler) dataDir := t.TempDir() - sim, err := simulator.NewSentry(ctx, "mumbai", dataDir, 1, logger) + sim, err := simulator.NewSentry(ctx, "amoy", dataDir, 1, logger) if err != nil { t.Fatal(err) } diff --git a/p2p/sentry/status_data_provider.go b/p2p/sentry/status_data_provider.go index f8db9e9cab7..0dfb5f03bbc 100644 --- a/p2p/sentry/status_data_provider.go +++ b/p2p/sentry/status_data_provider.go @@ -84,7 +84,7 @@ func uint256FromBigInt(num *big.Int) (*uint256.Int, error) { num256 := new(uint256.Int) overflow := num256.SetFromBig(num) if overflow { - return nil, fmt.Errorf("uint256FromBigInt: big.Int greater than 2^256-1") + return nil, errors.New("uint256FromBigInt: big.Int greater than 2^256-1") } return num256, nil } diff --git a/p2p/server.go b/p2p/server.go index 328e8613f1c..893743053ee 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -26,7 +26,6 @@ import ( "crypto/ecdsa" "encoding/hex" "errors" - "fmt" "net" "sort" "strconv" @@ -653,7 +652,7 @@ func (srv *Server) setupDiscovery(ctx context.Context) error { Unhandled: unhandled, Log: srv.logger, } - ntab, err := discover.ListenV4(ctx, fmt.Sprint(srv.Config.Protocols[0].Version), conn, srv.localnode, cfg) + ntab, err := discover.ListenV4(ctx, strconv.FormatUint(uint64(srv.Config.Protocols[0].Version), 10), conn, srv.localnode, cfg) if err != nil { return err } @@ -669,11 +668,12 @@ func (srv *Server) setupDiscovery(ctx context.Context) error { Bootnodes: srv.BootstrapNodesV5, Log: srv.logger, } + version := uint64(srv.Config.Protocols[0].Version) var err error if sconn != nil { - srv.DiscV5, err = discover.ListenV5(ctx, fmt.Sprint(srv.Config.Protocols[0].Version), sconn, srv.localnode, cfg) + srv.DiscV5, err = discover.ListenV5(ctx, strconv.FormatUint(version, 10), sconn, srv.localnode, cfg) } else { - srv.DiscV5, err = discover.ListenV5(ctx, fmt.Sprint(srv.Config.Protocols[0].Version), conn, srv.localnode, cfg) + srv.DiscV5, err = discover.ListenV5(ctx, strconv.FormatUint(version, 10), conn, srv.localnode, cfg) } if err != nil { return err @@ -987,13 +987,13 @@ func (srv *Server) checkInboundConn(fd net.Conn, remoteIP net.IP) error { } // Reject connections that do not match NetRestrict. if srv.NetRestrict != nil && !srv.NetRestrict.Contains(remoteIP) { - return fmt.Errorf("not whitelisted in NetRestrict") + return errors.New("not whitelisted in NetRestrict") } // Reject Internet peers that try too often. now := srv.clock.Now() srv.inboundHistory.expire(now, nil) if !netutil.IsLAN(remoteIP) && srv.inboundHistory.contains(remoteIP.String()) { - return fmt.Errorf("too many attempts") + return errors.New("too many attempts") } srv.inboundHistory.add(remoteIP.String(), now.Add(inboundThrottleTime)) return nil diff --git a/p2p/simulations/http.go b/p2p/simulations/http.go index 4c990b85023..07861bcaf3c 100644 --- a/p2p/simulations/http.go +++ b/p2p/simulations/http.go @@ -189,7 +189,7 @@ func (c *Client) CreateNode(config *adapters.NodeConfig) (*p2p.NodeInfo, error) // GetNode returns details of a node func (c *Client) GetNode(nodeID string) (*p2p.NodeInfo, error) { node := &p2p.NodeInfo{} - return node, c.Get(fmt.Sprintf("/nodes/%s", nodeID), node) + return node, c.Get("/nodes/"+nodeID, node) } // StartNode starts a node diff --git a/p2p/transport.go b/p2p/transport.go index 2135a5b51de..51dab6dceb9 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -22,6 +22,7 @@ package p2p import ( "bytes" "crypto/ecdsa" + "errors" "fmt" "io" "net" @@ -168,7 +169,7 @@ func readProtocolHandshake(rw MsgReader) (*protoHandshake, error) { return nil, err } if msg.Size > baseProtocolMaxMsgSize { - return nil, fmt.Errorf("message too big") + return nil, errors.New("message too big") } if msg.Code == discMsg { // Disconnect before protocol handshake is valid according to the diff --git a/params/bootnodes.go b/params/bootnodes.go index 55bd4075b24..3b12a7b79ec 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -180,8 +180,6 @@ func BootnodeURLsOfChain(chain string) []string { return HoleskyBootnodes case networkname.SepoliaChainName: return SepoliaBootnodes - case networkname.MumbaiChainName: - return MumbaiBootnodes case networkname.AmoyChainName: return AmoyBootnodes case networkname.BorMainnetChainName: diff --git a/params/bootnodes_mumbai.go b/params/bootnodes_mumbai.go deleted file mode 100644 index 61842a34874..00000000000 --- a/params/bootnodes_mumbai.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package params - -var MumbaiBootnodes = []string{ - // official - "enode://bdcd4786a616a853b8a041f53496d853c68d99d54ff305615cd91c03cd56895e0a7f6e9f35dbf89131044e2114a9a782b792b5661e3aff07faf125a98606a071@34.93.67.27:30303", - "enode://209aaf7ed549cf4a5700fd833da25413f80a1248bd3aa7fe2a87203e3f7b236dd729579e5c8df61c97bf508281bae4969d6de76a7393bcbd04a0af70270333b3@54.216.248.9:30303", - - // third-party - "enode://00cd3b0c0927629e46f560b50da8352ba6c0f6775bb53787f987760dae5f3fb1497101c14cf16b9ee21d0c9f1ac1757ae460ab84e12e8de3c776d30763273a00@44.200.54.250:30303", - "enode://029e793eab7979dffb09c68c375ba6ab28feac837d683c2fe8e6d7e6b86ff392f2e37dd79730b181ad3306b79848279b95241ae7b0a5187c9acdad38e716a42f@195.14.6.165:30303", - "enode://029e793eab7979dffb09c68c375ba6ab28feac837d683c2fe8e6d7e6b86ff392f2e37dd79730b181ad3306b79848279b95241ae7b0a5187c9acdad38e716a42f@195.14.6.165:37188", - "enode://04a3620abe5a813f802ef69b0d3dd81d09f9a88c7bae0c925a6fcb215e0a4dda15fa3d4257187532879867c2b082fb8efd8594150b47a482240b94f3b8b812c0@3.16.130.85:30303", - "enode://056bb8c3f58ad090760fee637d6d3ca02c278c804bc0145b2beaec3897e4f68aef522cdcb57506bfcef986a52d317a31c9b1aa8b85b15ce164130b5fbdca747a@51.81.244.143:30024", - "enode://056bb8c3f58ad090760fee637d6d3ca02c278c804bc0145b2beaec3897e4f68aef522cdcb57506bfcef986a52d317a31c9b1aa8b85b15ce164130b5fbdca747a@51.81.244.143:57482", - "enode://05e01911ad8ec9948c2dd74b72a7146734a4ef4c8a60a8bea23dd04983dbac616d0b92e4efa8419716568c3330ae90ab9eed5d8bc6081f9c0d742ed7a2147176@136.243.32.238:30248", - "enode://05e01911ad8ec9948c2dd74b72a7146734a4ef4c8a60a8bea23dd04983dbac616d0b92e4efa8419716568c3330ae90ab9eed5d8bc6081f9c0d742ed7a2147176@136.243.32.238:30248", - "enode://0914147720e01f212f6654bea019c57f80216af60c78794728b4803c8fe5d5fe74d4b85b6f7b1d4836b5d58b19ac5a56abfa6cc7c2bbeefa92847abe4794f9a9@44.202.78.164:40118", - "enode://0914147720e01f212f6654bea019c57f80216af60c78794728b4803c8fe5d5fe74d4b85b6f7b1d4836b5d58b19ac5a56abfa6cc7c2bbeefa92847abe4794f9a9@54.234.197.198:30303", - "enode://0914147720e01f212f6654bea019c57f80216af60c78794728b4803c8fe5d5fe74d4b85b6f7b1d4836b5d58b19ac5a56abfa6cc7c2bbeefa92847abe4794f9a9@54.234.197.198:45124", - "enode://0957e18c7be7bd6062c54895164b89026d224c22b4b3f3a7167b11e7703987bc0d473d5c855b1a63d913cb430db360db6307710ea1a7eb1ea4c274c163f80136@54.90.105.204:30304", - "enode://0b311bc1643dd4e4a9e55a6e40e2f005ac7e85223429d705e9c2cd3209846e029f2f854d3a33e0a718d5cbad83ecdf6fb6937894568bf675ee85af9cf73a89c5@52.214.83.78:30303", - "enode://0e9bb56fdeab88e071b4c8b4450aa058e681905561577c41e8b692592da95c4616abd22a1c8e13fd81c76aa5c4a1a561fdf029117ee5312a77d72c538932b934@129.213.160.149:24632", - "enode://115a260dd3080f73df37644adc035abc1ffe5174e1131495ed8267567344501c6d58a55768cbdcc2dd7d49237f35f33aa4ca5087963b64a814f875711ed1bd4e@89.168.107.150:48316", - "enode://12b2021173ec6cc072d1f429eed17e51c06959f6683207ca43db97da0590e1e0772ade1137fcb61ea1d9db3d32f809c4220b00ffea88303c8ef44ff9804514a9@139.99.69.4:30303", - "enode://13a774e107ef9875312d6dd27f60a50b718a618b5a26051bb55e74537728244a3423b3225b1d4362744fe37498a811810f6d5d346a8ac605a979a9eb1a06c92c@54.164.83.148:30303", - "enode://13b420bcac1d8c9e919487cf3e9c0ffa5004ce61c14f62b9e5d3c1761b7c798538d28bcf2e346bc65fa703a1833488a80e9abac0b2aac669d184f05e4a244520@66.206.1.250:33904", - "enode://15db06b34d792bd7ea4f1ebb737847f08712a6b5fa79520c7c78784ce7e4f4501a8713367689b4e7abc54a23843c10cbe748022cb94ecb8147014cf81bfffd51@65.108.238.175:30303", - "enode://15fae7bd85e689f637c04ea32b36768899cb73032c893ee0f4b09ed181b1e2c526cf33831a5812f3a46cc3c1061233832b83e52655f1599483a31df6f8db10c6@66.129.102.21:30303", - "enode://15fae7bd85e689f637c04ea32b36768899cb73032c893ee0f4b09ed181b1e2c526cf33831a5812f3a46cc3c1061233832b83e52655f1599483a31df6f8db10c6@66.129.102.21:30303", - "enode://163d474bdf80d67634917c72fb5c23b4f3c65716ab65f9fc26f96c9d140c5a7f2addc398935e2d876ff6f34cccf6eae912b088065358c94a6f1b74bae0afcda3@65.109.106.6:30304", - "enode://165123708a4b8c09c340c5793136ae34ef83a9a5c7ffc260d9c3eb3a34218232fc487e1369cf72d5cfcc2b38cdbe67f47cd2180e4aa5f4fbc8dd94ce3d4a07a6@52.31.209.103:45704", - "enode://1697bf70969ed0c98a6f66b14354dd0d635c2c54ebeec6dfbc0b9652d2f81aa0c0050cc74001516f027f9ac00f6df2da6d9cf961222d91829bb8ebcb0d21432d@135.125.160.122:57050", - "enode://16e521b9daedd3d0ec10739d9b6d895e82229bcb73241c9bac9b213951fd48ae3d8428695b60e0fa5fafb9c8766a287d3330e9e42ac339f9935f423eada8d538@63.251.15.90:18220", - "enode://16e521b9daedd3d0ec10739d9b6d895e82229bcb73241c9bac9b213951fd48ae3d8428695b60e0fa5fafb9c8766a287d3330e9e42ac339f9935f423eada8d538@63.251.15.90:18220", - "enode://16e521b9daedd3d0ec10739d9b6d895e82229bcb73241c9bac9b213951fd48ae3d8428695b60e0fa5fafb9c8766a287d3330e9e42ac339f9935f423eada8d538@63.251.15.90:18222", - "enode://1a56c0bd8e84c8eafe67537845e8971e7c8a7f728d98dfba5331f8fb4f084e9eb47b810a66bf179e0aa1c5cdebebd3382fd184197103643052402a83c4b3a3d7@54.227.161.221:30303", - "enode://1a56c0bd8e84c8eafe67537845e8971e7c8a7f728d98dfba5331f8fb4f084e9eb47b810a66bf179e0aa1c5cdebebd3382fd184197103643052402a83c4b3a3d7@54.227.161.221:30304", - "enode://1aa9e9bdb922062930c99d5736ef89b26226d3e8afc54483917742884ab6bc3bb0e64b1761f31bfbd7eee9db1c2ed4753791228b0b1659869551b91541ddde2e@34.143.131.89:30303", - "enode://1c6b9cd7fa0ff28e9b3893c631f2f44ae70018780c6c0b20ea0bc945b7d2ded91c551251124b9b8c192032533dd8476df444d26132ad332d4bc1e8a4ac3aa3ac@129.80.185.224:30303", - "enode://1c6b9cd7fa0ff28e9b3893c631f2f44ae70018780c6c0b20ea0bc945b7d2ded91c551251124b9b8c192032533dd8476df444d26132ad332d4bc1e8a4ac3aa3ac@129.80.185.224:46474", - "enode://1d078fb83dae63a64867e375d1054ec0b38ea93200d1243e5f18271e310bd4a7dbe9a16a1d857ce5bdee1de69ef8d778de611268e5acce51103cec5727561478@162.55.244.33:31330", - "enode://1d8574f9dd65856131250e72c013c64481f65e1e19b42c7aad51cd76dc65a8db2e4b4ee1bf1346d065fcae908b9fc211e8d175bb4394eedfe66726116b1a0e45@3.82.69.223:44894", - "enode://1d8574f9dd65856131250e72c013c64481f65e1e19b42c7aad51cd76dc65a8db2e4b4ee1bf1346d065fcae908b9fc211e8d175bb4394eedfe66726116b1a0e45@3.82.69.223:47084", - "enode://1d881cf105faca3b95c9d5e76abe3a09dc294e6486eb9c2b36f2661cb82e4277a04e3f12147ffdb68a834a604c2c0255e830eaa46aca5d04f3feb52caf0834ac@144.24.175.119:30303", - "enode://1e58f0462d16edec8df71baed0517a5ad08ee1b9517557b432b516d259b5a9c2ad7584c6ce5a297ad1eec0a622a8482da4485a21b025a57dc647ed487a882240@52.79.90.66:30303", - "enode://1e58f0462d16edec8df71baed0517a5ad08ee1b9517557b432b516d259b5a9c2ad7584c6ce5a297ad1eec0a622a8482da4485a21b025a57dc647ed487a882240@52.79.90.66:49558", - "enode://1e9d440ab7a68708b1ed9fd2cd45fbb65b2db3e8d2a60f78b165258a62f4a67fad91992afc15ebc241e0145bd52d8acc3cec96c382ce94bf2e4e4c3b88dac692@216.155.53.69:18210", - "enode://1e9d440ab7a68708b1ed9fd2cd45fbb65b2db3e8d2a60f78b165258a62f4a67fad91992afc15ebc241e0145bd52d8acc3cec96c382ce94bf2e4e4c3b88dac692@216.155.53.69:18212", - "enode://1e9d440ab7a68708b1ed9fd2cd45fbb65b2db3e8d2a60f78b165258a62f4a67fad91992afc15ebc241e0145bd52d8acc3cec96c382ce94bf2e4e4c3b88dac692@216.155.53.69:37960", - "enode://1fe6595cce80bb6d0dd2b864ff6bc0731bb43866a16422c913cac3e1d726906eabef43ec6c24f5e72cfc7b872f0f2500fc6f5157f92442a844ffb0a2883da314@54.165.52.139:30304", - "enode://1fe6595cce80bb6d0dd2b864ff6bc0731bb43866a16422c913cac3e1d726906eabef43ec6c24f5e72cfc7b872f0f2500fc6f5157f92442a844ffb0a2883da314@54.165.52.139:40486", - "enode://288bf817720677bb16bf6e97969c798058d5fce1b0056764f5eb275e577ebe8d0f8eff8ada5cbdee51ac43899dbfa34d9595826bcf9e89cffed47fb01c53119f@100.24.48.53:30303", - "enode://28e7fb1a6432241e3448008998d6f5fff013c98b1ebf6444fab0c221befa7439230445c0a3fe96c71d5aed63911ee045a38fb9743174a54f36ffa38e2079d546@34.224.174.220:30304", - "enode://28e7fb1a6432241e3448008998d6f5fff013c98b1ebf6444fab0c221befa7439230445c0a3fe96c71d5aed63911ee045a38fb9743174a54f36ffa38e2079d546@34.224.174.220:30304", - "enode://2931f29e653e04063a4300e75dc2732c6a70b7a2bf5d5ecd6f0ce87c5463393edfaefe1a9875e0a0f74322199d5531502ccdeb8c6d95bd90fa51c7c7c0c0f210@216.155.53.69:33120", - "enode://29b5257f68d4e274288805576b1902d6a2eb2ae3497861031c8d222fdd660f20717d1087bcd4f857a117c4ebf7977a2e1948bafedaa08cfd9008ac0abdb45bf3@206.125.34.196:44632", - "enode://29da96e1df872a042f6118e5c3ce3e5ec482f5021a9bb77d563cc36116381ff14d1c82cac838b32c0875f5c7129bb4e55745188420666804da9ad65fc1e03e69@152.69.217.151:30303", - "enode://2abeb69ed6a82cf4d31fcbefc74d190b6f798d6998092eb0677d51cfd28ead42c047e48f0d49cb7ed3f7967981a8d83ee5f7f17d731130b1a0400a488ff94e7a@3.89.32.220:30304", - "enode://2c091513bdd07089570bddb936bb58180e2cab1378f47c3e2311dfe7617ba143ef43f15143dfc527871bdc41886433f8f47c28d353100405abd6a2947ed91268@157.175.98.235:43136", - "enode://2c44cfa716b745c82c50a7fb3b17c20791bd1801ada5a56797798a536b167f6d6d336e9ef595a76e618baadff0f2c44322a50b190ea29da4484df184f3b45f22@185.209.82.99:1057", - "enode://2cfeb05adba879c8e17acfe26a29afff83be198dd89e8bbd6f198b1c5cff3a4f52adb719e54a844a6d9c7c3340f2326d11f02e7b76234969de3e556987710659@80.64.208.112:13046", - "enode://2d0b223a080e191a0ff059cda8327b5750e9651b5f4caa67e8a27a8ccf92c4982f7bb460da93a225fc11b1f15551547bd03760e0a02b3b960f8209a41ca97a7d@18.141.115.121:3431", - "enode://30a3c35238dd19ea73907190b4d0d897a83bbd732c957ab37c6174c7012da4ab8704f422f7021a33da0b3f4f257eea2023fccc240ccf32ec2d0fbf1b08e774a9@51.15.44.112:30303", - "enode://31c04843944c31011bc7b221b706e78a732d0855f2a9956699d1fc3c223cc8ef62e09ddd4faa63d5a64e77b3215498d426489c6d23f3cb27f89a816c42cd7ddf@44.212.35.211:30303", - "enode://3271284ea36198180cb93838a7514e926f575b092f608c488bca740d50fdfbc86d714977c118b5d23e7fa56f1967b7500bc99b36c8b70f6d9f0cf04411fdc028@44.195.30.243:59236", - "enode://330d0ddf625c0c5bf643bc4810365d0564f3f0f94a6bda937a4a4e08ccdb7b33ebb44254d948d0f5e957d8cf5e803b650b3a66821f39d604129f912cadc140e0@150.136.219.31:30303", - "enode://35fd15d922a29f443f192ac9414bf497241e3146319f5421185778884ea00e583dec0803d0e59d10b3e9c245cb0d6496dac464080c08ca3d0d22551d99f67764@3.215.134.157:57832", - "enode://36585e4d8f2b90bd510137b649d96e597a9882b7973d1c2ab32b966995dbd3987a74bb647d37fe1e3ff183a869b7bf9843780fbfdce7e92bd84ba233c0df137d@80.64.208.163:56524", - "enode://3661c5e9e015c94fe4ee472a84f1638d22e6326d941be1047453fdabce45733085c374842601637abef1f162733a553adaf275691efd2d6f9c54290a3acbc7b5@159.138.123.91:40366", - "enode://3701285197c038a9c2b0cdaf5167ca65424c40ecd639bc9cf3d1e70ed136c6843a55830108afbdd2eb3bb4b11b4a65425f85952a28ef3efc8004da61ae01832e@51.81.184.92:30018", - "enode://3701285197c038a9c2b0cdaf5167ca65424c40ecd639bc9cf3d1e70ed136c6843a55830108afbdd2eb3bb4b11b4a65425f85952a28ef3efc8004da61ae01832e@51.81.184.92:30018", - "enode://376614d048ac1ae178b9ce4e86ddfae659f84be8b4f238c029334d3b19ab46650c33668a91ebc60da7352c7a0113a3241374b51f44af571b837f7568a3451431@3.233.194.125:59928", - "enode://3803eabc341ad5043acfae627b8af63822c60b54f31f06e69cdca973326fa5d06d206c89c718f57328dc1882863b6d67183257265cef0fcbc4eb747e3dba79c9@54.91.94.230:30303", - "enode://3803eabc341ad5043acfae627b8af63822c60b54f31f06e69cdca973326fa5d06d206c89c718f57328dc1882863b6d67183257265cef0fcbc4eb747e3dba79c9@54.91.94.230:30303", - "enode://398af25d3ed696592d165f277959373e25751fd3a0e836e9eb5c89c598173e0c2c5f6e0f5a0c5f2f9507876c6651820bd82b56f825f9f916ed9ec01e563dc72e@34.239.186.76:30303", - "enode://398af25d3ed696592d165f277959373e25751fd3a0e836e9eb5c89c598173e0c2c5f6e0f5a0c5f2f9507876c6651820bd82b56f825f9f916ed9ec01e563dc72e@34.239.186.76:30303", - "enode://398af25d3ed696592d165f277959373e25751fd3a0e836e9eb5c89c598173e0c2c5f6e0f5a0c5f2f9507876c6651820bd82b56f825f9f916ed9ec01e563dc72e@34.239.186.76:30304", - "enode://3a0f449749b6b5ee8a50d4bc0f024cd41a33e8203825b2eb29dfc8663fa8077c9c7fbe498c0bdb9e1f47682084c402345e68e6afbceac3df87ed3efefd8758c8@54.226.147.167:30303", - "enode://3a0f449749b6b5ee8a50d4bc0f024cd41a33e8203825b2eb29dfc8663fa8077c9c7fbe498c0bdb9e1f47682084c402345e68e6afbceac3df87ed3efefd8758c8@54.226.147.167:30304", - "enode://3b12280f6e9d18445c6deb217160507ab10f0ec19c06e7e5db2fb2e7c333d5dd5e1c269fa3144e20702d181bebe019e83704b66eec6ee71064b701b3942943d0@18.233.148.51:30303", - "enode://3c11286eeda537fc800e0a83ecd3b30bf6cdc28170fcbde0c9bb4ba29e2b365c1a102c4e2af5e70f568342bb8ef83cf57e24cea9ea0112f1f877c212a92b8850@134.65.193.93:40303", - "enode://3c5c1536ffe73786238839741d2505c3088fb22a555764066eae95b06bd46a67cd52e02951ffa1c0c9bd3f951122dd5a46a6fbae8493c6dca007266ecd806c5a@15.204.216.82:30303", - "enode://3e7056fcb155cf847ca5ba2fea3724ec5798cd01ceefef1506fe07f83ef1d7f78588c2454e13ae6dfdda834e8ae80fcbc92a0b898adce100262eec947149722c@134.65.192.170:40303", - "enode://3eebff3ecb6ead0cc050978f6a685ac57696bf620ff8a1c2c62ffa3cd09709fee224f98adf57741636b5154fbe7ab25b7d8eed4cf2a5e02246c4a3744b6d8d49@174.129.134.198:30303", - "enode://3eebff3ecb6ead0cc050978f6a685ac57696bf620ff8a1c2c62ffa3cd09709fee224f98adf57741636b5154fbe7ab25b7d8eed4cf2a5e02246c4a3744b6d8d49@174.129.134.198:30304", - "enode://3ef0085f3ca704e5633e468aa702cd61f2e85c348376bf722a09efdc756ff11eacb7cc6cfd1fa27bc6d30e374e21a7e812baa5167431c9db74a2e23811961680@34.147.201.200:30303", - "enode://40be81ed455404d0faaa018787ef747bff46cdae166fd43d8acb73acbd8529e3892174e7448abde5de76610597eca7933285c97604dca474007d8f2a8e11bff8@38.95.13.166:30303", - "enode://41a63b973225722ab1406d95a8950246bfed647140e88ed2040040d1bdbd507af77448eea53f9335501daac81401b1c1e9089434d61b92865d0d7404d996881c@46.4.95.49:30305", - "enode://428d3f0ad83589de41ddf8f255396446fe26f82caf56dd41f1157840e5459513fc1c041248c47da8b2893af4e05f37ccfd20a4e17eac8a6304c8ea947fd2a3fa@46.4.96.158:30305", - "enode://43ea9f9ad3c3a27ff19d8a0c04657c51d55e35ef3333d3e8fa7209d195af4cf4423956c3460c0c772cca6d67b0487f9ad628bb6e3a6484e76430ab029e06f1e1@148.251.179.30:30303", - "enode://4436c8fe220fc0c8bde39317e4c85adb44349ceeecd80b33123f542756b56887801a32960e4a03230877661ad3ece9f0ec3db2f991bf753cea39989630767bea@107.23.178.9:30304", - "enode://4582de18bd33c8c425adf100258fe0d3d516e942827467255286b45369c2afc2098d741fce2b4a3a46f041e52ebc4b1f2843c9128e481f1faa483acf1f5b179c@138.2.174.187:30303", - "enode://459a3623ae6e1923e1d9da99a11f2a568324593a38a4e1acc396bf56da9def2bb33c91391d3158b14f4e61558b1802632ae2ad2bf299f2f80338e7529917cec2@3.91.232.245:30304", - "enode://49b0256a90f9a5e1e24ee768ec976b9d141a9efb6ed28cb9993ae82749392b055783cd819a02b23b9d1959857ea6063f2b57f74eac3c267735653e38ecff9773@35.230.162.74:45118", - "enode://4a9eee040e712fe08b8a9e298ec653c38d823e3575f85b174e3eaebf79f358532d57885dd453a4cbd6150b166f3143cc63eb5389f71b1dec38aa69ea25499137@35.210.46.95:30303?discport=45597", - "enode://4c23447a5270edc96074ecab4a39ae549a4317fb9a9ade1558cfa1cb0988ebb6c70b2ce1435ef0a6d071dc269e364fd26dff457098a3548dec8a234b0f388ad6@62.12.164.254:35853", - "enode://4c23447a5270edc96074ecab4a39ae549a4317fb9a9ade1558cfa1cb0988ebb6c70b2ce1435ef0a6d071dc269e364fd26dff457098a3548dec8a234b0f388ad6@62.12.164.254:46965", - "enode://4c4b69fff98582172a5fac795d92e82c31df716b6ccef981c56dc0022026feddbcb96c08dd98a8ef589814c147aa7bb0ac2785719d08a6237d6f0dfe398bafee@3.85.225.185:30303", - "enode://4c75e39d4dba08fe5891bdf6ae91730ff00aa629b071dfd86b506f92bf810019f6fc0476dea69decdfdc040119ccaf5bab7e87ad9487504f10dd79a78eb26319@165.227.27.33:30303", - "enode://4d0c9140f5338b5b5d598d25ceafe0c106948c8a37e44ac78800878e20aab6d11abfa43e76464473bc1fc3aa735da6afe4d255f2b7a9e8a4aba397d7a58e96bb@86.152.85.68:30304", - "enode://4e168f7f08eb33b93236c64677be8fa4721e3803811bb23bb295de44928e445970ce12d51590e3f81fe47370065085f2708a4696c89b8cb6339568e366e4ed45@34.206.3.152:49122", - "enode://508b462bb0aa3045488f4f14b48378035779ffcf50be6c26e357f00be912669e62a6dc8a27e88c6ae4059f12fc0ce7a689edd26b6e2193bf778370ed70077d12@3.90.14.189:30303", - "enode://508b462bb0aa3045488f4f14b48378035779ffcf50be6c26e357f00be912669e62a6dc8a27e88c6ae4059f12fc0ce7a689edd26b6e2193bf778370ed70077d12@3.90.14.189:30303", - "enode://508b462bb0aa3045488f4f14b48378035779ffcf50be6c26e357f00be912669e62a6dc8a27e88c6ae4059f12fc0ce7a689edd26b6e2193bf778370ed70077d12@3.90.14.189:30304", - "enode://51f2fa022fea828f242f50a1a5949f327b7f91b899519cc46f14da054b0dbd16b468523bc92a380abde9a2662636cb83fd39aa9b180ae04f77770f9f1563bd68@178.165.64.123:43500", - "enode://55d869987b39edd38f4fff4e8b131b5126a432e04c91296ed929e818e7428973f0751fffd4ecbc15352658af6036540861eb559b0801e8ea870e38d916625977@54.81.171.127:30303", - "enode://55f34211a0c05e4987959a35570879e18cbec13ab3ee96900f60791000c3253e24f52df2de6705bcc93d6c920b8a263665570e8d8cea2908a4f36977813e06aa@44.214.42.42:30303", - "enode://570e5768eeb205badafcd0c96a36cdcad08055ba2d0b2b1ffd009e92216bae00a9e6b4600aa56704569881339c0589b2ac10b043b4f9541e8ecf3cf508ecfa3d@34.213.145.105:42695", - "enode://57588f3b85d6d49f1e3253a5c0440f4c1c77eb9aa104e53e54bcdf1b8d3f6bd68bffe1c8d120531e7a8ee91b07e582c56efad0dc87e4248545370f116d1ae85c@51.81.184.91:30024", - "enode://57588f3b85d6d49f1e3253a5c0440f4c1c77eb9aa104e53e54bcdf1b8d3f6bd68bffe1c8d120531e7a8ee91b07e582c56efad0dc87e4248545370f116d1ae85c@51.81.184.91:30024", - "enode://57e20c23634c7c910f3b35434a3db5548449fdadb982cfb599fc0fe4252dbf29774ef07b9379f82bcee79fc5916a3bc69dd21d936296e0866b6e8287195081de@46.4.95.49:30305", - "enode://5ae3dfc80d5db600113d9fb8e2eb45fa851997003d79f52245bd9fc04a27e0eb6a1ab87d032324c59906fa48672700af271e26797e25ce9ae98912d90a338cfb@52.23.208.178:30303", - "enode://5ae3dfc80d5db600113d9fb8e2eb45fa851997003d79f52245bd9fc04a27e0eb6a1ab87d032324c59906fa48672700af271e26797e25ce9ae98912d90a338cfb@52.23.208.178:30303", - "enode://5ae3dfc80d5db600113d9fb8e2eb45fa851997003d79f52245bd9fc04a27e0eb6a1ab87d032324c59906fa48672700af271e26797e25ce9ae98912d90a338cfb@52.23.208.178:30304", - "enode://5b29d4e905ca088e723121c24f9b8656e0c2e10cebcfafc74d84a9e9646def6cfba4631138c5b657d3ede3e532fe5721fc5db80c450a9f40ad298aa14d3fae3a@15.204.140.147:30026", - "enode://5b29d4e905ca088e723121c24f9b8656e0c2e10cebcfafc74d84a9e9646def6cfba4631138c5b657d3ede3e532fe5721fc5db80c450a9f40ad298aa14d3fae3a@15.204.140.147:30026", - "enode://5b514b3a49afc7539fb6a85c81d5a7f373409fb4416f2235cd569c84e8e919f3bb313d104893243fee6f91297aeb9baeadb7784730b9aa837926642b7615a8bd@132.226.206.147:30303", - "enode://5cbb21da70c216a0a47c6dc505cfbbb6be3abcf14fdf47220e2ac3e0c8e4d0d1f6993dfb77c057539ffad7a01289062108d0d6f5ddb01e605c14cc1c46e27d6d@69.4.239.33:18210", - "enode://5cbb21da70c216a0a47c6dc505cfbbb6be3abcf14fdf47220e2ac3e0c8e4d0d1f6993dfb77c057539ffad7a01289062108d0d6f5ddb01e605c14cc1c46e27d6d@69.4.239.33:18210", - "enode://5cbb21da70c216a0a47c6dc505cfbbb6be3abcf14fdf47220e2ac3e0c8e4d0d1f6993dfb77c057539ffad7a01289062108d0d6f5ddb01e605c14cc1c46e27d6d@69.4.239.33:43590", - "enode://5d325739378353d10e30d6b6dce20a58e0474d2f3a5e26a87d5e1700f00769326f8013f98e8451b675ef49e221aa3bc1286e51a172afdcd2909c8ec2bf33e9c1@216.66.68.49:30391", - "enode://5f4651a0b1949b7e4b2760da4b89899dd4ca66df21254b6d0081fefbbd52682dc9adb35cc7f08d499b919b438d1f055cd17c69a7005a109ba204425d4c6ca21d@130.61.140.145:30303", - "enode://5f4651a0b1949b7e4b2760da4b89899dd4ca66df21254b6d0081fefbbd52682dc9adb35cc7f08d499b919b438d1f055cd17c69a7005a109ba204425d4c6ca21d@130.61.140.145:30303", - "enode://631f234a6dd6730a563f5fd78b39ad6da0d18185829f1c0e6dfb1858f179a5e6440ca7dafa1af950312e70fa019cd588f1c89f9f1651850cd68898432725c91c@52.90.182.97:30304", - "enode://653216c0c416d94232319475e208cb91b98d4e642313717fe8e57bb0b31b03c863dfa1d4df610d779670d2b0ac94df2266e5f82c3ba36e859650323f6de7191b@86.109.12.19:30303", - "enode://66e658a1b1858aa590075dc1158888993b04f41612a9644203ab92ce63d92bd04677f118773e065bd22579e44f8f4477cdd776343e76fd077b440cff9917d870@192.3.168.3:31324", - "enode://67a79a3a38d1a46db7bcf4bc8c49794cc83217fcd166d3d38e518925c138dd73a589e0f625f1d96bab422665d9caa715cbc8193fc6172c414bd49b84dffa1afc@216.155.53.69:18222", - "enode://67a79a3a38d1a46db7bcf4bc8c49794cc83217fcd166d3d38e518925c138dd73a589e0f625f1d96bab422665d9caa715cbc8193fc6172c414bd49b84dffa1afc@216.155.53.69:55390", - "enode://68dfd4a780b2f900e4f5d9361722b6b7637e271b1137b252bfeaf6617087a6bf3de815def35426669d337a854ca8f35c8ed4f937057ce2704f485a5c187873f3@129.159.136.96:30303", - "enode://69af9688c2ec9367ef14440d7d3e6ca712dca929124765abf256273a0eef69e3f174c541f7cf5eadb3bad4cfc74846a347869c686586c07c9b622193b483f2dc@185.209.82.99:28840", - "enode://6a5774a77325ff1e9bbcf847e2e9a8199654913643cf93086f6189d5d0c0f0e274448cdd38c546855eab7e8e9177408db750067992a417ad377fe22c88902229@130.61.38.149:30303", - "enode://6a5774a77325ff1e9bbcf847e2e9a8199654913643cf93086f6189d5d0c0f0e274448cdd38c546855eab7e8e9177408db750067992a417ad377fe22c88902229@130.61.38.149:30303", - "enode://6c2a449f92845df89078d2db31e0032f7030eef6a9914d51937204a8ac8224e749424a07a01a1938ceedd55aff23b6f79b6d420cdb943c43283d2b2f0216e0c6@18.178.165.51:30303", - "enode://6cccb844149571087aec6a11ec387641282552ecbfbfaf6b5adaac9c3cba62b6533050a2d7997c20ce57a6467a2a23aae598ce453a663a26a5e744eb31f3d729@207.188.6.55:30030", - "enode://6cccb844149571087aec6a11ec387641282552ecbfbfaf6b5adaac9c3cba62b6533050a2d7997c20ce57a6467a2a23aae598ce453a663a26a5e744eb31f3d729@207.188.6.55:30030", - "enode://6d7d778e9e1c99cdc77b241bd4e70ba9b2ad5d870b324deb7c8c63f4863f5818209b2bcec1c0f3079cebf681c88fc33635ea6194d4eb10956020c5278a081d24@18.232.166.200:30303", - "enode://6e22aa1ee45cefb61895b36db39181e9e44f4d85e9515098e935ac702b77aae695586476b55b972a72dd221c734e470678b1142edc7b16938abc9d03c30d12ce@57.129.23.105:30303", - "enode://6ecdba41cca94a371826294161e2d1fb9ecdf6f83f307645b2d6e29291b3d8722be12bb60eabfadb4d90b65048dee0abdff7e63c6bb1aa8391903699a44286fa@50.19.243.132:63939", - "enode://6f22ccde85c9ebe1f3511faf74be2c95ba44c52fe555fa60fa2606ccecb6ffa7cbe24ba572c85111732037a8a345278da6c05f56542418a4290c2b202c17b170@35.242.235.245:35391", - "enode://6f22ccde85c9ebe1f3511faf74be2c95ba44c52fe555fa60fa2606ccecb6ffa7cbe24ba572c85111732037a8a345278da6c05f56542418a4290c2b202c17b170@35.242.235.245:35598", - "enode://70cda0222b54afba4f5b7943443ec19f2c2ec01887911cbb85c737c640fa4801c282baefe49b64bd473523d7fb235cc25959395d0252dffa98b1a7f50fad8a30@185.79.245.76:30312", - "enode://720c818d3bc0dd2f9651ae5a5fc1762e64b31386971325ca4d6f5cbbac627330a8f5cf10153a14b4c2820a7f5c93083d512ddd9490d3e595f34397ce70403e36@3.136.220.39:49241", - "enode://72a3287c4fc40c96c0d300d9ff842b472b893d3049df57a5ec0fbd9ba09d896c20dcdea35b25a54bad5c7ada29b79cd3857d3306f10a1bb871168b0fad9a942e@15.235.43.227:57672", - "enode://72f3a90ab2dcb906770dbfaf058ce890f03a5ffa7389ccd5c6454d86839a06547b536951d93cd4a49433d6afd8c25cbd67a85114427e0559b1236611e0b17b86@54.144.87.117:13598", - "enode://75569d599f2d99cb1c221f5527fdffba104baeae6230d9495a1a483a5ebdd7ea960ca6697a99598b773c1b21f1f4dcaeb42a5dee4446fa469f89ce5e1ad0ad00@35.197.198.100:30308", - "enode://756a9f53fec0c7694122ba9c86f7f906c7cb52addef278497d30e71827a572c6a0162bcf50fe65af2df7f85e96ded56a62f24dc4d1d924815509139ce63f31bb@114.119.173.223:50610", - "enode://75b3899a2e2944c63cd0c053d896e1218c0305b7d77d16966e435b8c187f73a2172a04f342525cb0642188cbfe6bb88388b98d2af71b76c2c9541d58578d0985@79.136.48.218:34550", - "enode://76a190eefd1d0ee161718719e99bd16ed46e86d7fdfd468a2bd8c6a3aeec4cb3b354f9f0c4c2a28b21301eed2bd92c7447b91ac92a8fce6b00115a549f143143@68.234.2.82:30303", - "enode://78aa4379268f21a253fddac12b8d8957c586103ad7732a24f5cf8766e33e0414302e3e2d302e5cb9ef566c2f7cab007ff9c2f1d2fb138fbfbbaf2dbf06854f0b@66.129.102.22:50444", - "enode://78ba1eab9fac35aff952b10b4a2fb13fecd50c1a70cd23dcdf3d074db7dcfe66ad0e6fe5a52fa0f150e54f109e14177b9addbde2261b6b4ecb1f844baf787d4f@216.66.68.49:48030", - "enode://7970f2a8f70637af935cd8c8eb7644f28ed72678d51982e9ca2f2ea2492021cfe9e5473c46a3da582e887af9e702a22170e13c387e5ddf1e7c30d4c5e0bf5cb9@54.87.43.217:30303", - "enode://7970f2a8f70637af935cd8c8eb7644f28ed72678d51982e9ca2f2ea2492021cfe9e5473c46a3da582e887af9e702a22170e13c387e5ddf1e7c30d4c5e0bf5cb9@54.87.43.217:30303", - "enode://7970f2a8f70637af935cd8c8eb7644f28ed72678d51982e9ca2f2ea2492021cfe9e5473c46a3da582e887af9e702a22170e13c387e5ddf1e7c30d4c5e0bf5cb9@54.87.43.217:30304", - "enode://7a367a38374d4a11bde63ade026afec7cd7d65d506521a8b3a9d2825f8b7bfbe7069bf1c6b7061d5fdb01afacf30f371da1215af48d0e77debf6c92faad8ff11@176.103.222.177:41308", - "enode://7a4e51c497b1c2b23400086a82357fb53531363e47d6f2633f22f7a3b93484335138e4c12b2f0e970583ef13b899b43352772365ceb154c8e58985b5c6a34bb3@54.145.113.228:30303", - "enode://7aba4a92c00be8260e1a04dc36392fce02a7dc6bb96a028a5260fda90035cf0e5fa18d843e2a16b4c4ad843a95ae6b1c77e5955bbd97a45fd47d2ae5ab900f24@164.152.162.92:40303?discport=25694", - "enode://7b336316176813e6eb8b8af54ac68a0f6b4d17c42f16e5925475ef58439f454542701b731a8cf17a194974715aeb381015d5c9a03f341c16a9073748e81733f3@34.233.71.76:30303", - "enode://7b5d545481b6d799e5c68d181e2cb73a5f334e7c582996e59097e8fd7cf705e91611f293770d76e9dcedd08b092e8a36d9537a6f4304c32ffb6da7cd64ab4c8f@150.136.144.184:30303", - "enode://7c0a5e2a92dd56139d9ee29bd8d7ee2f8a8f1c608b4a6f9f02553ec1ee75748b8afccb18d24725c258d0297e235b970468174c9d71a8ea940bba2e089be8f6d7@38.170.199.194:31330", - "enode://7c0a5e2a92dd56139d9ee29bd8d7ee2f8a8f1c608b4a6f9f02553ec1ee75748b8afccb18d24725c258d0297e235b970468174c9d71a8ea940bba2e089be8f6d7@38.170.199.194:31330", - "enode://7d9e467cfc50dcb8f608aaceebf62b15b2820d3f71ced04f5d56e67dbbd2eb979da1725ab050c8a517e7660bee5c0ea735013819ef7fcdd051e5cb8f9367b862@13.112.17.216:47254", - "enode://7e1b419ae6474d8537dc8f2fd53d5b8004c6b8cd4f50819021576e7aac65163f2c7b88c9e9695d8ba0fc2eba8e1867a804f4159bd1328b48ef92b5ed9b303546@185.8.106.242:31304", - "enode://7e1b419ae6474d8537dc8f2fd53d5b8004c6b8cd4f50819021576e7aac65163f2c7b88c9e9695d8ba0fc2eba8e1867a804f4159bd1328b48ef92b5ed9b303546@185.8.106.242:31304", - "enode://814a68fa3d22f4c1bb7ae67fb2f86979f70e95ad69974b88c67e0eea5c7305201ecedd00899d6316ea278eb3eda1f4c8561bc7307c371a8e11419a18e8113276@18.235.1.246:56694", - "enode://81aa60dfc336e46f117c763e77d65077356df7cce844035f8d67e001989d9d1537756363c40d4daf530dde424824bf789cac684dd6a2abf52df554777d4dd4c5@216.66.68.49:38870", - "enode://81aa60dfc336e46f117c763e77d65077356df7cce844035f8d67e001989d9d1537756363c40d4daf530dde424824bf789cac684dd6a2abf52df554777d4dd4c5@216.66.68.49:52874", - "enode://824f19709f333f37fe51a56d3cead141b8b495ea28e61b870834f4bf787e637ffcfb7119504a5af591a09f96017c6122901f444e929882945897dfaeab38c1fb@146.59.70.240:30026", - "enode://824f19709f333f37fe51a56d3cead141b8b495ea28e61b870834f4bf787e637ffcfb7119504a5af591a09f96017c6122901f444e929882945897dfaeab38c1fb@146.59.70.240:30026", - "enode://83453a62720cdb81d1cced1a5c8b467a44918f4ff73446221dbee9e921f7fc54ceb895f4dd45932aef0f7910a5437d13593e8a2103b579177bbd836358c0d193@35.188.231.96:52050", - "enode://8481083462f0c702baefaf82979a31c2f340a3c1c3fa86d0b81e5d65bc7055ef6d816fa22f2c9e1f2ac7ad92bec861b4307745e650b9194d2555a54f15b66355@3.213.8.33:53881", - "enode://85103907aec8bc1cc0ff6988417cb752958d3f3ee8b87860229af99de30d5f99b987223eba2b99387cbcef70d1d448f7c392a2109eb2049a5581dd7c8a86cee9@52.3.201.219:30303", - "enode://85103907aec8bc1cc0ff6988417cb752958d3f3ee8b87860229af99de30d5f99b987223eba2b99387cbcef70d1d448f7c392a2109eb2049a5581dd7c8a86cee9@52.3.201.219:30304", - "enode://85103907aec8bc1cc0ff6988417cb752958d3f3ee8b87860229af99de30d5f99b987223eba2b99387cbcef70d1d448f7c392a2109eb2049a5581dd7c8a86cee9@52.3.201.219:30304", - "enode://856844132b945799f29f1ebd3082716f7afff2c17e675bbc825da89a1643fe70de3fef6e41792c50733dc79e03ccdec4b0ffbdf219fa575df6e699f2dc450a47@3.228.39.183:34395", - "enode://85d8a5a7f7373a6901f139f5c3d7477a4c17f0d8fb2815ac4777ad7c620da8297101e3e4394a643e34fad6257cf2a497a291aa79b97838b59d3db9edc369ceb5@3.39.35.120:37602", - "enode://868f8cc762fba72a547c888fe058e70adf4336f02f6b5a318f25fe72833a4df801e487bf5ad64e67ac186121ca376b7528f8c74d9c73abfcab4707e00c067a16@119.8.164.87:56394", - "enode://86939103277ce9c61a4d49d98285a625d5c81fc655a2d00abc8dabcc6da41b1f21789aff51b18a5b83a4517537f2a096364f02d9b4f3b66c2795b9b3c13d8ee4@157.90.131.181:22050", - "enode://86966721fd8cad329c7a283d6d283ed012a0c03cf172c379fd208c3ea6086de6af484f5d470a560b143be5a35a3e9c8e7f38ad7a8307a2c3438451bd8aca56bd@13.41.37.94:30303", - "enode://88259fbb0dab73efc9b42481e8f32468ba466bfddb9a72f5e98542fd866a4ef6a4cdc44d4b1d20c099b8770465aa0f41f7263ff7a0f7c3b05c8ae62f1d3e5a2d@3.83.81.217:30303", - "enode://88259fbb0dab73efc9b42481e8f32468ba466bfddb9a72f5e98542fd866a4ef6a4cdc44d4b1d20c099b8770465aa0f41f7263ff7a0f7c3b05c8ae62f1d3e5a2d@3.83.81.217:30303", - "enode://88e0fdad56f02ca95a13b7b69f94f957539634dbb4f29849c3190a85711346db7c4125a4744c83325d0bcb461b7bc31d1efbd73ec1b3a9d3b58a1cf781cc0f1d@34.234.211.249:30303", - "enode://88e874f91f450fbfaa75843d0b1fc0591853457d44424e8cdbba748d9d4e98db335b59d816d60c0c38c40e8d3bd8e0009ebc648cfb08923d891cd2721f446374@54.83.170.219:10781", - "enode://88e874f91f450fbfaa75843d0b1fc0591853457d44424e8cdbba748d9d4e98db335b59d816d60c0c38c40e8d3bd8e0009ebc648cfb08923d891cd2721f446374@54.83.170.219:14316", - "enode://89226ca5251b86be8743793617f06f2d88a306e3e8efc25e2423c280a65ba42f4cdd428a6f89fd917dc9298d9a741f0e61939d11246d18cd45c5f0eca453cf37@54.83.170.219:43301", - "enode://89a788469792b1974c045b335b5f41404173d9c7ad8bca6c222adbbccf5f152dd7dbd0c2560c3d552937e0133cff2f3a2c58e0e994184f26d7fa584fe83e6b78@63.251.232.124:18210", - "enode://89a788469792b1974c045b335b5f41404173d9c7ad8bca6c222adbbccf5f152dd7dbd0c2560c3d552937e0133cff2f3a2c58e0e994184f26d7fa584fe83e6b78@63.251.232.124:18211", - "enode://89a788469792b1974c045b335b5f41404173d9c7ad8bca6c222adbbccf5f152dd7dbd0c2560c3d552937e0133cff2f3a2c58e0e994184f26d7fa584fe83e6b78@63.251.232.124:18212", - "enode://8ad33730778def989a30e2da388232626380b432ef237daea9716b74b45b739a518a7047c8611fd1c1831494ad23d728c300a288f259fff167d97f294d69c37d@95.213.33.11:43890", - "enode://8b63e044dac9476c207c2684e431f80fd621d6380f7f19bb2a93a3f6ab99af0bd624c7aedb27978c12189d18371d1d6f5f67cdbe552899bf01449c445cb6ba0b@34.255.201.83:30303", - "enode://8d3e6c8e8b434b3a10b0ddc30555c6736fd6bee148dcd2684bcb2edb7d7a48443fd46ffbae3217dc3719f6c947f2e93202e2a08c21f26d7ad1fe0c041f5571ff@93.103.12.140:60896", - "enode://8f12b965a84ce49a7e909196374b09eed79b51d92af8334046d50aea78ebc04d94d149ffbb1d00109ae02f0c006c4c5d4de37c44aa5c8454970182eab0f66d5c@141.98.219.55:30303?discport=54371", - "enode://90b823609d23be99c01e0c1c3e69147151ad60a2e91f03c2fb5b4b90cbb6c7bed1165a3c8877e02d00191fc9a81171a9f595b35b948b9018c024018a3769fbf6@15.235.14.210:31305", - "enode://90b823609d23be99c01e0c1c3e69147151ad60a2e91f03c2fb5b4b90cbb6c7bed1165a3c8877e02d00191fc9a81171a9f595b35b948b9018c024018a3769fbf6@15.235.14.210:31305", - "enode://93203b1bc8c32ecbf10bf8ad1a201eee1a3bd136962deec5aaac633a5541babfd4cb3c2a9ffdf86f70ef17fb0414989032d21fad0d206f50cf63c3fb8e841986@150.136.249.112:30303", - "enode://932245a127e3b4211258aa81cc8a3cffaa2e470ad281c46bf00dc6697173190aa166b0e04aa8d7777edc3865c403c3384fffe69cc75f5f7f482987c665c6a892@35.234.142.158:30307", - "enode://9439df2edb40a03b98d534d6ae35ca94c095b595d31c3a3e4dfe04868b91d23a05c962c65e8fc5109e4c6281ca9f2739f19e16eb89e32350f69fb074bb58fe4a@64.25.109.179:30303", - "enode://9602b654ff54c6bc4164027a63051d0f4db53fe182872fa0cf569e43dc2ba983eade5c57a6ff5762b3ce1a5a2c87c4d2b4a4ecb5e05e4016943188df509a3cfc@107.6.91.43:18211", - "enode://9602b654ff54c6bc4164027a63051d0f4db53fe182872fa0cf569e43dc2ba983eade5c57a6ff5762b3ce1a5a2c87c4d2b4a4ecb5e05e4016943188df509a3cfc@107.6.91.43:59640", - "enode://97abd5038215953e47be40a7454305e4fe46a600d6f07d083067faa1049f546da6a170ea7f83c90d0a6e3cca1777fd6bc070b3d32756db3b9855c8cc0a2542ee@141.95.45.80:30024", - "enode://97abd5038215953e47be40a7454305e4fe46a600d6f07d083067faa1049f546da6a170ea7f83c90d0a6e3cca1777fd6bc070b3d32756db3b9855c8cc0a2542ee@141.95.45.80:37512", - "enode://97b785f6833eb56871d3e7d3b1e6a86ad5db779c4fbcc5879efa887761fe8de264cb829b3b08e97bd20e23eeaf3527ea7834fbaba7a37a68a84d00a73ce762d9@15.204.143.194:30313", - "enode://97b785f6833eb56871d3e7d3b1e6a86ad5db779c4fbcc5879efa887761fe8de264cb829b3b08e97bd20e23eeaf3527ea7834fbaba7a37a68a84d00a73ce762d9@15.204.143.194:36104", - "enode://983843aeac14f5319d19ff406c8af8a13c8879cd39ee7137e8855551167e4c92fa8faa9c56c608fa8118f0d1b08575139f3caf004da55fc833d59afb592f200c@109.202.198.246:30503?discport=38194", - "enode://9aa8ddb10c6d2c847a2e42b884f24fc0c137ba67e097f7ec4007a3764bf0b8471b0811c38e4f82e9f9228343a17bc53f6ba25b8a4d05c96c4363abdc49ea54a0@65.75.210.202:30527", - "enode://9cb276a966ef1522c290b906ac915ba4b4f59f60acd36e3269969f630fbb58aa738bcfbf65301e45b65874ab0a854297318d94cdb61c0d016f40ab0ed397582b@138.2.176.116:30303", - "enode://9cb276a966ef1522c290b906ac915ba4b4f59f60acd36e3269969f630fbb58aa738bcfbf65301e45b65874ab0a854297318d94cdb61c0d016f40ab0ed397582b@138.2.176.116:30303", - "enode://9f3908c360e9fb385de76b54b99076157f5fe81efb87eb01648f52ce7795ed415431dbdb801787be06e201e4e594876b5779caf5530b8a838479e16c9b472c4a@54.81.209.83:30303", - "enode://9f3908c360e9fb385de76b54b99076157f5fe81efb87eb01648f52ce7795ed415431dbdb801787be06e201e4e594876b5779caf5530b8a838479e16c9b472c4a@54.81.209.83:30303", - "enode://9fc3b7b90743608cebb6c1650be5a7abe410c0fae3419225bae499ea4a32bb09aee88dee51b8bc516c9693e88ba609bbbfcdcefa3150d47e9d9f1b991c6fb0da@70.224.208.230:56094", - "enode://a00b673ee99d1aa0e2028ddb86de1ae8edd45139f08050d1256ae4cc8aa8dde934f37439246eaa22d6c6651111e4d713a7ac758be601426caf149648cffc8f47@164.152.110.159:30303", - "enode://a00b673ee99d1aa0e2028ddb86de1ae8edd45139f08050d1256ae4cc8aa8dde934f37439246eaa22d6c6651111e4d713a7ac758be601426caf149648cffc8f47@164.152.110.159:30303", - "enode://a18fefbb947edd472ad05627a8c0d457134aedadbf53214a194294fa050686ba9f8560f220a3f7d226b890fe0e23dd1b8377c75f26f464d9615705becada813f@212.8.243.57:30303", - "enode://a1ed5c2dd1050af7969726fe70464e84699dfe870eba82c6b9977f6441ecccd2f3f4b08e1cdca10a5e6124ec3ff196cd6777824b410bbc0bb7d744ed5c3af8a5@141.94.134.13:30303", - "enode://a1ed5c2dd1050af7969726fe70464e84699dfe870eba82c6b9977f6441ecccd2f3f4b08e1cdca10a5e6124ec3ff196cd6777824b410bbc0bb7d744ed5c3af8a5@141.94.134.13:30303", - "enode://a223d5685d32ecdb4d920e3d3094f07362b44632c6dc6c519eb372eb6849a7cd03768ae71603597cd052f9f5a69172896fae46da351bfc6c6e68efb959a1c654@18.234.36.2:30304", - "enode://a223d5685d32ecdb4d920e3d3094f07362b44632c6dc6c519eb372eb6849a7cd03768ae71603597cd052f9f5a69172896fae46da351bfc6c6e68efb959a1c654@18.234.36.2:49652", - "enode://a2da6500cb9bba423367c41495d3961cb484f7df008349f47c5a150526d70787511b3c38ffe92602fdec89b71aedd2473af44d9f35861113c5804e4ad8632374@51.210.114.83:42344", - "enode://a4676272fb0923a1be9db1dc0d36f8f854acbdcba34b7886071fcac56bb8520ed7322d15547caa7e110ec58fdf591363c62ccbee751a7e37a28c2909eb175416@82.220.39.130:30303", - "enode://a4dea92f0dba45a6e2701d2dc9d92d65783065988550c481cb5ed6ef9ae97fee027e298b696259dd2c90a2b2402241d809581cd0a582b64a1d9246c69fe5e000@34.159.242.76:52442", - "enode://a4dea92f0dba45a6e2701d2dc9d92d65783065988550c481cb5ed6ef9ae97fee027e298b696259dd2c90a2b2402241d809581cd0a582b64a1d9246c69fe5e000@34.159.242.76:53054", - "enode://a5401a6562d20855b80e0c470b9e7a3aac232243fa4630e5eb0c88d56b90dae47e5df4588aa8ec38f2967c7a5fbb3770807f9993de12e40ca372d12c8e17e5fc@3.209.128.173:60607", - "enode://a552f25b8f8e88e434ea6b1d40d48da370db790655660d31616b9146de401bdc73596f3fec5cb77fab063cfd116c3d569ebff8fe4b1710b2e7040f4466da1977@158.101.106.41:30303", - "enode://a597d7de13b34880e1cdf1d9b4147029fb8aa6bf85097285d21265e0966221cfb9586b6cb9791265777d80a90b768d716b3a28a68fc97bf3a809116a01ccc3c1@103.180.28.201:52930", - "enode://a64f78d87dcc2e85851c66353185b89bee3a72b6beb9e207a7b002039a6f64d7c92d172a4bf864964fc7d5ccf77b8b86873dd39ff2f60c5c3dee746baca42793@54.162.233.19:30303", - "enode://a973e6041cad0a60af776872edf5e98e5cadc86a8ac6555a23a6a76abecf337185b3d48adc4bf423ba4234ee86f40a4bd216d114b51dff0c004b4f7240ff88bc@204.236.199.150:30303", - "enode://a973e6041cad0a60af776872edf5e98e5cadc86a8ac6555a23a6a76abecf337185b3d48adc4bf423ba4234ee86f40a4bd216d114b51dff0c004b4f7240ff88bc@204.236.199.150:30304", - "enode://a973e6041cad0a60af776872edf5e98e5cadc86a8ac6555a23a6a76abecf337185b3d48adc4bf423ba4234ee86f40a4bd216d114b51dff0c004b4f7240ff88bc@204.236.199.150:30304", - "enode://a990a42e888cc7bd22ed4f351c23e524ef3b2ef593f3f8c75b37c887bf307d299dd32fc83ffd7ba3f6a70b2c3ed2c7c7ff2a4382c997bd4d07e0d9b3a5a14508@3.213.8.33:7770", - "enode://ab01ff5e669123e263f9a56c7759ede374e10ba305646303150eacde728de2645fdd88e11be3594f7960cf68463b64472108821597260696bc1d26d910aab49c@129.213.87.141:30303", - "enode://ac42caeddf76f96fdbf41043c6cce52a477eaecb22a53dd76e2c6873f58aed0785043f58a8860c2425c54d4bfdb4923314a75537df21e647617063973647ca76@209.58.191.9:30303", - "enode://accfc075ba8be40f8875b4aba6a6af81fc36a6ef02bce09b91800f9ca232ff55423c39acac05463cb54ee28e452765b0124f5d11612454fce56a75a83890b574@130.162.253.39:30303", - "enode://aeaf883dbb2fa7c7740218617e58ab26b99e7106ebf329caed872c407cc543ac5a3b238c21a05fec7df3eafe241e2b80d65c350e4ef12d79ab02ddffa3a36858@44.203.136.195:30303", - "enode://aeaf883dbb2fa7c7740218617e58ab26b99e7106ebf329caed872c407cc543ac5a3b238c21a05fec7df3eafe241e2b80d65c350e4ef12d79ab02ddffa3a36858@44.203.136.195:30304", - "enode://aeaf883dbb2fa7c7740218617e58ab26b99e7106ebf329caed872c407cc543ac5a3b238c21a05fec7df3eafe241e2b80d65c350e4ef12d79ab02ddffa3a36858@44.203.136.195:52882", - "enode://b08bdc6adb902f31e2525753a40c32a37f58f56687ee5e3927bf6293c116961b78ca8d7adc3e108ebbdd1afd18726ef1b220e8e85eed4d304b492d5bae5b0733@63.251.15.90:18210", - "enode://b08bdc6adb902f31e2525753a40c32a37f58f56687ee5e3927bf6293c116961b78ca8d7adc3e108ebbdd1afd18726ef1b220e8e85eed4d304b492d5bae5b0733@63.251.15.90:18211", - "enode://b08bdc6adb902f31e2525753a40c32a37f58f56687ee5e3927bf6293c116961b78ca8d7adc3e108ebbdd1afd18726ef1b220e8e85eed4d304b492d5bae5b0733@63.251.15.90:18212", - "enode://b3dfc39c2e1e1ad490918f70c543e03342e198fc065c730af91b8a5f9b42be062834db9510187f09e3bd7f610f7ee0a6f7c88450e8a7cb11a126fc4bff6d0cab@132.145.247.198:30303", - "enode://b4fefeb40f792b7801abcee9bdd923c6e3e95942c7bc5d84f100dccdba286f19d3e348fc1c43b5e06047a4309ea80d25760b774c276dfb957059e0c0e53ceef0@185.209.82.99:43490", - "enode://b5d425bb096f11a7534971a909c678eec5c73db99cc5efbb780604bf25815e53310033a3064cebaf5a1947c89c7ae6e96c2e8400adee451fd881c19b3b373480@18.222.63.154:30303?discport=65201", - "enode://b5dea509e0a6576bc0367d3aec4f5e7b0ed29499220c41e4d3ff675e587ed2b705ccbd2ceae7936a21546cbbfbcdcc17ee91564bbf24907e031ce1a8abfd1572@34.198.224.121:35417", - "enode://b611b092d7781accb1d5b0e4ce7e0495ef51141e856fc52d173b0b801e6275cffd75293781d1db242272391de2add28f704081a4763d81d5a24f52f88a2d4be0@144.91.122.225:30303", - "enode://b6775f9de5761c5cf80089b02548ee4ffd7fb2d57bc081e98d1612002fcc50177fbe9227b7792ac5c8ff1279453aa26ea12fec995085713ae2a3759f4d0f65dc@157.90.106.242:53840", - "enode://b91df1df535c146bba2ca9247e59e1cbac75853cb3e049c2f6bc1c56a8790dc3047b282ceb7dbed91135699fdae3c5bf0717d3175bb1d4867d7856368a76a9b4@129.80.203.98:30303", - "enode://b91df1df535c146bba2ca9247e59e1cbac75853cb3e049c2f6bc1c56a8790dc3047b282ceb7dbed91135699fdae3c5bf0717d3175bb1d4867d7856368a76a9b4@129.80.203.98:30303", - "enode://ba1251300edd654c13bcc09c3610e9b8adf50f08b9753760c3a1398696972040b9f6bbfab08f3e3267e1db7b8d48f377004509dee022ca33057d826918bd9029@103.106.59.6:23167", - "enode://ba14b89f96fbdf046597396493ce687df032417459c1ce4e71abac72fe9473962d96ec74318dc0bfa2336381185a47da5d36f9996d680b3df669f595778de605@15.235.10.151:30313", - "enode://ba14b89f96fbdf046597396493ce687df032417459c1ce4e71abac72fe9473962d96ec74318dc0bfa2336381185a47da5d36f9996d680b3df669f595778de605@15.235.10.151:38088", - "enode://ba753754ac64310e94bf2e39f208b57370401aa7073af96bf3478299f5ca3f905107985b97029f717f66ca52aa661fab576f42740192c13598eff1020159318f@54.175.27.209:43651", - "enode://bb16d4b94113833539b902acf4a45ffdfada715528f34aaa1cad31965117997ea81ccc121fb2d9512ea92365448926241ee5e327149f878ccf5347c392b683bd@44.193.10.210:55176", - "enode://bb217923f38ebc93b15989b4313485dc9b5205335a60ad69b99d8dd058c196e0928091c0345ab63e4af345d8a6fddaab704292eacbff3aa60e5d32186bece239@54.159.27.246:30304", - "enode://bc7ba85976d8dc3734da82d0c448069264e1d384c3f5ebcf5c73cb4a7b0fdf563207c3a939cf6828d33e535a35ea49d529c3b8582bada6fcd14e2949752e5c6d@144.91.64.35:30303", - "enode://bdccc3d51ebd46b6e8b73fb19d7f987acb79f34b3ae867778fa3b98e71e04c86ba2ffe3427be5c03ad07cc0054b75160dcc87efd19672f2115e31e5c09e8379c@66.18.13.163:30303", - "enode://be7b2b5f15e6bc71ee2765118055154f0ae6c568e85b2c49145d1d9df00944da029181a3c8490352d1dc5b107ddce78914955501ede82cd2c800fd700eeb72e4@147.135.31.203:30303", - "enode://bfd0f932fb352a1cccbef0e4304996f9b39f024396300d96c8d84e876058c07b88dae2e1f9e04b9170e89726ec54dd407152536b60dc43e1bd5a16eb1080788e@54.228.163.150:30303", - "enode://c017860de48a2a23ad86fa86e2bfcf73fa920bfc5c5379883a752beb0e8d0a206ae1749e053bb3131ff895991e8125c6af6863deb6e298e84a74f32ed9eb5277@34.84.31.214:36088", - "enode://c017860de48a2a23ad86fa86e2bfcf73fa920bfc5c5379883a752beb0e8d0a206ae1749e053bb3131ff895991e8125c6af6863deb6e298e84a74f32ed9eb5277@35.221.116.119:52204", - "enode://c03dbbf8f2e14098e452ae5f3526e2762e9189ef5a7adbc5d1ec91accf7d2dd5272a8a86f72521d4cea96b5a31b150f91746d9ce58ebbd4c37ba8c7c0bc5c05b@150.136.112.65:30303", - "enode://c0b59c590fd04467d43e7dd755de4588dd5afb344451eb01ae03d30a4923175d6312144a15794a404f78c604c7fc708c3413076e41157ae1ca0330f579903e32@89.168.91.64:30303", - "enode://c0b59c590fd04467d43e7dd755de4588dd5afb344451eb01ae03d30a4923175d6312144a15794a404f78c604c7fc708c3413076e41157ae1ca0330f579903e32@89.168.91.64:30303", - "enode://c0c3afdcada45e79d5413d7cf9a8a465dccb30f599f516c5e3b70daf4cecad8ad1dcea193dd36b96b9c5e95d818586c841b373e505dad1ccc84e240915a6ea9b@62.12.164.254:15325", - "enode://c0c3afdcada45e79d5413d7cf9a8a465dccb30f599f516c5e3b70daf4cecad8ad1dcea193dd36b96b9c5e95d818586c841b373e505dad1ccc84e240915a6ea9b@62.12.164.254:32618", - "enode://c14b05bab1d7ae9c804cadceb22f332a18909fffabc12ede88c10c41103edd909eb7f8675e5b57cbbfacb9a419392e21fd00d796fe3f629d9f95a34765543536@54.81.98.230:30304", - "enode://c14b05bab1d7ae9c804cadceb22f332a18909fffabc12ede88c10c41103edd909eb7f8675e5b57cbbfacb9a419392e21fd00d796fe3f629d9f95a34765543536@54.81.98.230:41322", - "enode://c14b05bab1d7ae9c804cadceb22f332a18909fffabc12ede88c10c41103edd909eb7f8675e5b57cbbfacb9a419392e21fd00d796fe3f629d9f95a34765543536@54.81.98.230:60914", - "enode://c3c06cc8ca743c32dc78584d6043372239923c4af1749ae8f65ac43ced289282da002f4d9c37833d14828004426b91bb5edee65be256917c80944807d1041597@40.115.211.154:30303", - "enode://c3f1051d3e226221b7ec8b45b5195b4c2c6c96ed216c64c83f0a2255a5003f330e34e96ac5928f608127eb67896226a3fd1e0e30e69519cf87a73212f2d9620b@44.240.230.67:58276", - "enode://c3f1051d3e226221b7ec8b45b5195b4c2c6c96ed216c64c83f0a2255a5003f330e34e96ac5928f608127eb67896226a3fd1e0e30e69519cf87a73212f2d9620b@44.240.230.67:7175", - "enode://c4ad0fc7abb288c0156ae7f98f7d73eb0431b1863d270ebacba2edbd620da85d6681f59e5a99d0ea12ffdd37c844d3c873e5f343780769d53bb39fa2c4960a7a@169.155.170.17:46600", - "enode://c6ca003916e2675beeeee70221642cd43117f62c353a2de7054f8c52296b783e6a884f02aae50f40357c2bbdac715aa0e355caa3c13d44107c8f9041ea0a243b@141.144.195.33:58904", - "enode://c857b7da987063d7e6bb5d2f3cdb1a0ea0f496863cebf55206abe759c60f759f6746484a249d0afb05bcfc886e40c7a163d165c9cf1618fb4df2d3deae12f8f2@136.144.63.129:30303", - "enode://c957d2bc31f6ccdc4f53285f52363cc8edc73872f2cac480a80ffad9451ffc9413d24227f5c0f15569d0180503a3f5ac6b10d51f5627912d0078490ccfdb38ba@35.187.60.60:30303", - "enode://c9f85b0ba65d250115b2f50e9ab8646e00fba7421dcaaca0205feed3ca7bf00d8f70bc6493de0869d747a4093324851a5a8170453b5dd26bb31d2a22f9342118@51.81.184.9:30018", - "enode://c9f85b0ba65d250115b2f50e9ab8646e00fba7421dcaaca0205feed3ca7bf00d8f70bc6493de0869d747a4093324851a5a8170453b5dd26bb31d2a22f9342118@51.81.184.9:46418", - "enode://ca78548ddc1905b2f3979bd56d94f123e48c3e2556c0a44dd757c895e7e5fe597eff871e9c7a95a4c71164c7e5634848e99e5ca4776b5c9111b21ba751c820b9@107.6.112.166:18220", - "enode://cb630ba1544f2c001db5a7513b7eb49ddd15185935e8087670ece0f85d2ccae043c32c4c5eaffb328caa9d913dbe5a98f0fa864944ee60e62929935ac6a19aad@34.245.102.222:1372", - "enode://cda43440a3b12124a28e0a9aeb18377ee764510457ec9143435fd21b7faa99c79b7925469266feac77f28cc719793f503a033ae8cc0585ee15e1acf6f7df8ac9@74.118.139.205:44640", - "enode://cf8eb1893e07cecfbfb7d13714a0c80831824da1367216faabcee69bd273c246a73537bc8cf4d507bcbf2411c410ec8c052a0f1bbc8880c612a980be4970e3d5@3.209.128.173:17265", - "enode://d06fe32a28c8ac962aa2a6afd1b2fc2943c6174ca1db8d6e43b17d14cfae1f5e09f350a4a55a0221c28a91fdb8d876e14241350dbb5a74ff2faa9f07c8bdad32@18.141.115.121:29188", - "enode://d210e95cca01a51da93a7a386b08e16f2fdc1f685b92ce4c36cf263d89764d8a4240e573e4276862b83550cb375781b157b16b2b35f8faba49814465e2722923@206.72.199.150:31330", - "enode://d26253af13ef20aeba3daa6b433cc90f85f73662a0ab588cb35e60ed2a42efcf67f727dc6dbe38eeb02cbdf66aff2200ffd0711ad765ee86025a4f77ad377263@65.109.95.210:20438", - "enode://d26253af13ef20aeba3daa6b433cc90f85f73662a0ab588cb35e60ed2a42efcf67f727dc6dbe38eeb02cbdf66aff2200ffd0711ad765ee86025a4f77ad377263@65.109.95.210:50437", - "enode://d49eb9f5ec890f9199f1aede07ac856cfc4e4e85915497e6d1a39311d3251ff860ab55298fe6990f66c2d8d6b8c3bf6e35cb11e4f560a2e72d94fb680154bc50@18.206.171.222:30303?discport=55618", - "enode://d54c0b6d456068469d157264664583167b77ddb17f607971ed2bea0928046eafab95d4f7d19e74c14c17299ae85c386cd245b1be93a862ac6ab87ae24e851750@51.81.244.9:30304", - "enode://d5726d305200281ee7997c140b9f342351b82875e3b1814195b4661a9d55b753258e1520bd19cd14968a5a1b585bb3afe2840fd4af15dd96db6dfc4c97165434@129.80.98.224:30303", - "enode://d5726d305200281ee7997c140b9f342351b82875e3b1814195b4661a9d55b753258e1520bd19cd14968a5a1b585bb3afe2840fd4af15dd96db6dfc4c97165434@129.80.98.224:30303", - "enode://d5c710b9d4cb70987c9fdfee062a04f11e712f16d0f29b1cec1f08cf13221290a147f0e9aef4a650cbc6220a6c307c1ae35d22859fc6eb016c24fad38dff2345@69.4.239.33:18220", - "enode://d5c710b9d4cb70987c9fdfee062a04f11e712f16d0f29b1cec1f08cf13221290a147f0e9aef4a650cbc6220a6c307c1ae35d22859fc6eb016c24fad38dff2345@69.4.239.33:18222", - "enode://d70e68870971938d59021cb960b93f8b6b0a785e5412dde0fa1a04dade74ab1086853c48857abad5a6018f0897c8ef0c149c13e15c227dffc9471d97a0b33543@185.127.231.90:44472", - "enode://d79ad3cdc2fc834964c749f02029d99c904896d298ccd9ed2185305ea1dc76642dcb1ce09636b229160da4150617542cc17fa070cefc2a6e3c1913c241351015@3.93.64.78:30303", - "enode://d81e825e24312a2561740889cc320cfa2d217310b4c358b93a2e6595923d41c897219d22d3f26391c5a19982a743f84801765bb615bf33f9f52fb98c438f01ca@54.172.75.221:30304", - "enode://d81e825e24312a2561740889cc320cfa2d217310b4c358b93a2e6595923d41c897219d22d3f26391c5a19982a743f84801765bb615bf33f9f52fb98c438f01ca@54.172.75.221:30304", - "enode://d81e825e24312a2561740889cc320cfa2d217310b4c358b93a2e6595923d41c897219d22d3f26391c5a19982a743f84801765bb615bf33f9f52fb98c438f01ca@54.172.75.221:51928", - "enode://d9138ffcd8ad3d279321d5e0f1257104696603724350ed222cfdec6254510e03e53598374eaa4ab26156708e4a7a18e136e987a0d6edad2fdf674484264ba1d8@107.6.113.60:18210", - "enode://d9138ffcd8ad3d279321d5e0f1257104696603724350ed222cfdec6254510e03e53598374eaa4ab26156708e4a7a18e136e987a0d6edad2fdf674484264ba1d8@107.6.113.60:18212", - "enode://d9138ffcd8ad3d279321d5e0f1257104696603724350ed222cfdec6254510e03e53598374eaa4ab26156708e4a7a18e136e987a0d6edad2fdf674484264ba1d8@107.6.113.60:44016", - "enode://dc3c2b96c782a6b699f70b0d7924cd347721fc9aaebbd94ae7f90493c4165b0b85ae33cfdd09b0bec8c2a50b56e10c8a59c9a2d54f650a1a2dcb1c8c4cfe039d@3.82.42.100:30303", - "enode://de235005f7482ed51b932f0bbe151b22aa20e123a39919cfa92e9471556e41db77ab6630a547853e42214fbcc20c95ac41d171fc53b73a138ae57ece282e08c5@176.9.148.15:30303", - "enode://de235005f7482ed51b932f0bbe151b22aa20e123a39919cfa92e9471556e41db77ab6630a547853e42214fbcc20c95ac41d171fc53b73a138ae57ece282e08c5@176.9.148.15:30303", - "enode://e119b183e7bfb8de52a44ce686df13045faed1198e4fb03b81b22e323c8ee54706627276218d34829181fd97a2175974a1b48995af66c2fc8dd66f7655a1c9b7@52.221.179.246:30303", - "enode://e119b183e7bfb8de52a44ce686df13045faed1198e4fb03b81b22e323c8ee54706627276218d34829181fd97a2175974a1b48995af66c2fc8dd66f7655a1c9b7@52.221.179.246:54404", - "enode://e24615a26d9690aaf395c2861031fc550c260da4bb147ea4c8d80406343be22a79053bf1dae4b6f20a47b0bd90b30566f9717f5fc6926e225d7ca025ed085b80@130.61.44.105:30303", - "enode://e31a84d94250071b4bff8d0375eb9423c041b8b9e3d75a8b1b3d13f20f489c44293e2ff7d5f2a82f86f7d24f7e4f479edc2bc29909a938665dcee8cde2040e01@18.140.239.111:30303", - "enode://e36d0c159ab988de250def88cacf56b644e9c35b1d650d5eeb6a3e92a1195f105f9f15e22dfccf1319f2c3d4446c9a3ca5c2b49813f0fb9d34023c92c89c2f7e@44.200.232.251:30303", - "enode://e4d7ee9431c64db127a318e35e94e6f1df2db1c2cacac19553392db9a364f6f8ac23e6276a97140c33b433fa23e5fb3c98c7e74ac410f89f19861a2f151558d8@129.153.225.118:15664", - "enode://e4d7ee9431c64db127a318e35e94e6f1df2db1c2cacac19553392db9a364f6f8ac23e6276a97140c33b433fa23e5fb3c98c7e74ac410f89f19861a2f151558d8@129.153.225.118:30303", - "enode://e5288ffc744c30fa13a7cffedfd004c0c01feb0cee4a9883ca8e3fec9515e6bd4ea4086661c01e6f16c00a585166be98fd3d0c10600fb8e23cfa80526965a371@94.16.108.194:37777", - "enode://e5288ffc744c30fa13a7cffedfd004c0c01feb0cee4a9883ca8e3fec9515e6bd4ea4086661c01e6f16c00a585166be98fd3d0c10600fb8e23cfa80526965a371@94.16.108.194:37777", - "enode://e53eb22d4e28d1ebc0e18e871c4f3aec3ce719c7dd7086670620508f0c7424ac104fe2bcf35fb23e3226f2b04365e59e469cc71488138382d9a307b682e698dc@18.234.158.102:30303", - "enode://e54f56b9caedb9034aa3e37d00b4161d0438b0bb9abef0b6b1f00991e690f0ac2b64543698245f93d47d1d85d647e37f617e04adb9cb78d0bafa0e348ee19c79@13.209.198.123:30303", - "enode://e5e46812e7442d1540f9bc9aafa5e910ea84c425659fb32a0ce9f4c9914b8958a4a93a06e22b59ca9fbb3a4a00b7a3a3607e07a17e9e333ed6325ea121c8b88c@95.217.112.102:58856", - "enode://e60acc5c53660f25b7ea664390b704e8b7b797eb8d4fab4e70b09c3311c03544739a8822518654c9617ba48aef15ca1177844fd99e54b7d35dac85648e941061@220.85.113.44:30303", - "enode://e60acc5c53660f25b7ea664390b704e8b7b797eb8d4fab4e70b09c3311c03544739a8822518654c9617ba48aef15ca1177844fd99e54b7d35dac85648e941061@220.85.113.44:50834", - "enode://e6565c81eb11219251292690a19c235035a2f021356d9c2b269e312723901af9eaeb2a5ebc6fcd8479f2283929bbe44b2740f36e6819c366b63bdcc6fb672ec4@150.230.173.49:30303", - "enode://e6565c81eb11219251292690a19c235035a2f021356d9c2b269e312723901af9eaeb2a5ebc6fcd8479f2283929bbe44b2740f36e6819c366b63bdcc6fb672ec4@150.230.173.49:40138", - "enode://e99d10cf22a92cedb7fefd4db917e90a5f15d2a14890cfaacfbe74d868977c2299a29a7ae22c69a2e03ee0969862b284bf59a0ffa896ab7e46b494c805932c1b@193.122.62.242:16092", - "enode://e99d10cf22a92cedb7fefd4db917e90a5f15d2a14890cfaacfbe74d868977c2299a29a7ae22c69a2e03ee0969862b284bf59a0ffa896ab7e46b494c805932c1b@193.122.62.242:30303", - "enode://e9f28353b13a503890cd2bdeb66414c8c71c517f147ec5b1c05338b1611f7aa7989b30f5f78197aeaf1e6304e3d11db017907e56bfc479ac531413e651c23120@3.81.44.106:33628", - "enode://e9f28353b13a503890cd2bdeb66414c8c71c517f147ec5b1c05338b1611f7aa7989b30f5f78197aeaf1e6304e3d11db017907e56bfc479ac531413e651c23120@54.227.15.85:41816", - "enode://f02a4b13b48aa3a9e2b5e502c6f1734b0b37d67f94cf63179b29ffcd3320ec28f6f518a30e290e2880943676f1c0d3bc487caa0081ae6dd6cf398fe2fbf8123e@77.140.42.76:34318", - "enode://f1851b4e183ca59493aa9426953b2f498f96b8f6e74e2aad8f97454ef05b9fabdd2524714099a2f3694556a1b9bd6c10818d0a8d2044b61f3b36a961868936fd@18.176.246.44:30303", - "enode://f3b8549e7f372bdbe0541c6afab80689e26d85d2ae0e2a5831e8c5b211bf077129b148bbd7d74ad5fb29a3dde4756485ba3a2acf1e4d4c4587a68535fa5cf6f6@150.136.254.118:30303", - "enode://f4aa9d0ff1e54c3984600bcde3d51827dda94d3de55f1cff2fb1b097a74453a10c06d732ce7f29b93c31c9cd6aae79e29c610cb5ef81b499295675faa06ce544@103.141.171.130:30303", - "enode://f63c87e4e05bde84e744987dfee560adb3246d3aa4de13f7ecaa73f0fa030495cf0055bfd7e4e22a2a2f3fca68508833ba3327e46b01933d3302aadf852de367@216.66.68.49:30300", - "enode://f63c87e4e05bde84e744987dfee560adb3246d3aa4de13f7ecaa73f0fa030495cf0055bfd7e4e22a2a2f3fca68508833ba3327e46b01933d3302aadf852de367@216.66.68.49:30300", - "enode://f785120a717ec30d97943bc3e817b87c6a3f28c4d789f431c56fc4601542af358bb495c626393d3205425e7352dbd117b0bbdb17c1bed990787c9d8a4f6eeeb0@54.205.14.16:30303", - "enode://f785120a717ec30d97943bc3e817b87c6a3f28c4d789f431c56fc4601542af358bb495c626393d3205425e7352dbd117b0bbdb17c1bed990787c9d8a4f6eeeb0@54.205.14.16:30304", - "enode://f83529868670e4f685d79bb040aa9a60e64c341534956f3c26dc338bd84c86a1bf026bbbb08d3d6103167b75175ddacf00a648498d9911a29c9591b071a435c0@94.152.214.4:30305", - "enode://f8405f65f0eb1eeb21454bfffd7b7244840e648d4f3ea7d4463a134c41953fdc32ad83285bcd4af194626cea540789266cd535dfed2eceaabbe748fda075e203@3.213.8.33:57583", - "enode://f8a6386df0fb0592ff0036e226d007804e1bb1f66e62f7703b3815b8c1a7e1d0073ae215a07e73f246d368d48e1f34316ced9b571ea9549e0b2c56d97a9e1834@63.251.232.124:18220", - "enode://f8a6386df0fb0592ff0036e226d007804e1bb1f66e62f7703b3815b8c1a7e1d0073ae215a07e73f246d368d48e1f34316ced9b571ea9549e0b2c56d97a9e1834@63.251.232.124:18222", - "enode://f8a6386df0fb0592ff0036e226d007804e1bb1f66e62f7703b3815b8c1a7e1d0073ae215a07e73f246d368d48e1f34316ced9b571ea9549e0b2c56d97a9e1834@63.251.232.124:18222", - "enode://f99636ee96b4939e18f79e1018b5cdfee7165f98a766388482cc25f27c487b9aa136a8f76b57fe9ba7591cba89c3c00702f320a2c27061284facb91cc1582759@54.195.45.74:60604", - "enode://fa4e25259a90b1975497d64cebc4805016830f90bf212a63e259b770bfd8f74d0334a8ca5099b9c80533f8e694d6e18d4ec68f33b45260fe3e575990d66f50e0@129.213.94.45:30303", - "enode://fb22bfffeb8dddcd1322827d98939ab7da8de2258d4d18a73def100ee47faa4649a6c2604e2bdcef56a4a8c2ae360bc2ae58b2d75a614e06cb131b682e194821@195.191.219.107:30303", - "enode://fb56c9e0b0beceaf05ac459269ea0958c621316c29f233dfb1532adc4cf0e94d3391aba2e38d46e7a8971235dd0d07a10f48996f42d86ba577779a2daa26cec2@155.248.194.46:59694", - "enode://fd83a8d55121389b5676dde6173c9575eb71d60d07af01afc213f5c52656e7c3d590c7571ea76c2e917f55070db387b39d81c3d99f02c94cd4bcfbb55fb800ca@135.181.164.15:30303", -} diff --git a/params/chainspecs/mumbai.json b/params/chainspecs/mumbai.json deleted file mode 100644 index 6da0a68c4c5..00000000000 --- a/params/chainspecs/mumbai.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "chainName": "mumbai", - "chainId": 80001, - "consensus": "bor", - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 2722000, - "muirGlacierBlock": 2722000, - "berlinBlock": 13996000, - "londonBlock": 22640000, - "burntContract": { - "22640000": "0x70bcA57F4579f58670aB2d18Ef16e02C17553C38", - "41874000": "0x617b94CCCC2511808A3C9478ebb96f455CF167aA" - }, - "bor": { - "period": { - "0": 2, - "25275000": 5, - "29638656": 2 - }, - "producerDelay": { - "0": 6, - "29638656": 4 - }, - "sprint": { - "0": 64, - "29638656": 16 - }, - "backupMultiplier": { - "0": 2, - "25275000": 5, - "29638656": 2 - }, - "stateSyncConfirmationDelay": { - "37075456": 128 - }, - "validatorContract": "0x0000000000000000000000000000000000001000", - "stateReceiverContract": "0x0000000000000000000000000000000000001001", - "overrideStateSyncRecords": null, - "blockAlloc": { - "22244000": { - "0000000000000000000000000000000000001010": { - "balance": "0x0", - "code": "0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610caa565b005b3480156103eb57600080fd5b506103f4610dfc565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610e05565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610fc1565b005b3480156104e857600080fd5b506104f1611090565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b506105486110b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506110dc565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b506106046110fd565b005b34801561061257600080fd5b5061061b6111cd565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506111d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b50610758611358565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af611381565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de6113d8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e611411565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061144e565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b50610964611474565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b8101908080359060200190929190505050611501565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050611521565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a65611541565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a90611548565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb61154e565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506115db565b005b348015610b2e57600080fd5b50610b376115f8565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b60006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b60003390506000610cba826110dc565b9050610cd18360065461161e90919063ffffffff16565b600681905550600083118015610ce657508234145b610d58576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610dd4876110dc565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610e0d611381565b610e1657600080fd5b600081118015610e535750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b610ea8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611da96023913960400191505060405180910390fd5b6000610eb3836110dc565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f19350505050158015610f00573d6000803e3d6000fd5b50610f168360065461163e90919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f68585610f98896110dc565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611027576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611d866023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061108c8261165d565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b611105611381565b61110e57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146111ee5760009350505050611352565b602085015192506040850151915060ff6041860151169050601b8160ff16101561121957601b810190505b601b8160ff16141580156112315750601c8160ff1614155b156112425760009350505050611352565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561129f573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561134e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600381526020017f013881000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611460576000905061146e565b61146b338484611755565b90505b92915050565b6040518060800160405280605b8152602001611e1e605b91396040516020018082805190602001908083835b602083106114c357805182526020820191506020810190506020830392506114a0565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061153761153286868686611b12565b611be8565b9050949350505050565b6201388181565b60015481565b604051806080016040528060528152602001611dcc605291396040516020018082805190602001908083835b6020831061159d578051825260208201915060208101905060208303925061157a565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6115e3611381565b6115ec57600080fd5b6115f58161165d565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282111561162d57600080fd5b600082840390508091505092915050565b60008082840190508381101561165357600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561169757600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117d557600080fd5b505afa1580156117e9573d6000803e3d6000fd5b505050506040513d60208110156117ff57600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561189157600080fd5b505afa1580156118a5573d6000803e3d6000fd5b505050506040513d60208110156118bb57600080fd5b810190808051906020019092919050505090506118d9868686611c32565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156119e157600080fd5b505afa1580156119f5573d6000803e3d6000fd5b505050506040513d6020811015611a0b57600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a9957600080fd5b505afa158015611aad573d6000803e3d6000fd5b505050506040513d6020811015611ac357600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b6000806040518060800160405280605b8152602001611e1e605b91396040516020018082805190602001908083835b60208310611b645780518252602082019150602081019050602083039250611b41565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415611cd4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611d1a573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820ccd6c2a9c259832bbb367986ee06cd87af23022681b0cb22311a864b701d939564736f6c63430005100032" - } - }, - "41874000": { - "0x0000000000000000000000000000000000001001": { - "balance": "0x0", - "code": "0x608060405234801561001057600080fd5b506004361061005e576000357c01000000000000000000000000000000000000000000000000000000009004806319494a17146100635780633434735f146100fe5780635407ca6714610148575b600080fd5b6100e46004803603604081101561007957600080fd5b8101908080359060200190929190803590602001906401000000008111156100a057600080fd5b8201836020820111156100b257600080fd5b803590602001918460018302840111640100000000831117156100d457600080fd5b9091929391929390505050610166565b604051808215151515815260200191505060405180910390f35b6101066104d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6101506104eb565b6040518082815260200191505060405180910390f35b600073fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461021d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4e6f742053797374656d2041646465737321000000000000000000000000000081525060200191505060405180910390fd5b606061027461026f85858080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506104f1565b61051f565b905060006102958260008151811061028857fe5b60200260200101516105fc565b90508060016000540114610311576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f537461746549647320617265206e6f742073657175656e7469616c000000000081525060200191505060405180910390fd5b600080815480929190600101919050555060006103418360018151811061033457fe5b602002602001015161066d565b905060606103628460028151811061035557fe5b6020026020010151610690565b905061036d8261071c565b156104c8576000624c4b409050606084836040516024018083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156103c75780820151818401526020810190506103ac565b50505050905090810190601f1680156103f45780820380516001836020036101000a031916815260200191505b5093505050506040516020818303038152906040527f26c53bea000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008082516020840160008887f19650847f5a22725590b0a51c923940223f7458512164b1113359a735e86e7f27f44791ee88604051808215151515815260200191505060405180910390a250505b505050509392505050565b73fffffffffffffffffffffffffffffffffffffffe81565b60005481565b6104f961099c565b600060208301905060405180604001604052808451815260200182815250915050919050565b606061052a82610735565b61053357600080fd5b600061053e83610783565b905060608160405190808252806020026020018201604052801561057c57816020015b6105696109b6565b8152602001906001900390816105615790505b509050600061058e85602001516107f4565b8560200151019050600080600090505b848110156105ef576105af8361087d565b91506040518060400160405280838152602001848152508482815181106105d257fe5b60200260200101819052508183019250808060010191505061059e565b5082945050505050919050565b600080826000015111801561061657506021826000015111155b61061f57600080fd5b600061062e83602001516107f4565b9050600081846000015103905060008083866020015101905080519150602083101561066157826020036101000a820491505b81945050505050919050565b6000601582600001511461068057600080fd5b610689826105fc565b9050919050565b606060008260000151116106a357600080fd5b60006106b283602001516107f4565b905060008184600001510390506060816040519080825280601f01601f1916602001820160405280156106f45781602001600182028038833980820191505090505b5090506000816020019050610710848760200151018285610935565b81945050505050919050565b600080823b905060008163ffffffff1611915050919050565b6000808260000151141561074c576000905061077e565b60008083602001519050805160001a915060c060ff168260ff1610156107775760009250505061077e565b6001925050505b919050565b6000808260000151141561079a57600090506107ef565b600080905060006107ae84602001516107f4565b84602001510190506000846000015185602001510190505b808210156107e8576107d78261087d565b8201915082806001019350506107c6565b8293505050505b919050565b600080825160001a9050608060ff16811015610814576000915050610878565b60b860ff16811080610839575060c060ff168110158015610838575060f860ff1681105b5b15610848576001915050610878565b60c060ff168110156108685760018060b80360ff16820301915050610878565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff1681101561089e576001915061092b565b60b860ff168110156108bb576001608060ff16820301915061092a565b60c060ff168110156108eb5760b78103600185019450806020036101000a85510460018201810193505050610929565b60f860ff1681101561090857600160c060ff168203019150610928565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b600081141561094357610997565b5b602060ff1681106109735782518252602060ff1683019250602060ff1682019150602060ff1681039050610944565b6000600182602060ff16036101000a03905080198451168184511681811785525050505b505050565b604051806040016040528060008152602001600081525090565b60405180604001604052806000815260200160008152509056fea265627a7a723158208f1ea6fcf63d6911ac5dbfe340be1029614581802c6a750e7d6354b32ce6647c64736f6c63430005110032" - } - } - }, - "jaipurBlock": 22770000, - "delhiBlock": 29638656, - "indoreBlock": 37075456, - "agraBlock": 41874000, - "napoliBlock": 45648608 - } -} diff --git a/params/config.go b/params/config.go index efea80307e0..4898be8b458 100644 --- a/params/config.go +++ b/params/config.go @@ -68,7 +68,6 @@ var ( MainnetGenesisHash = libcommon.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") HoleskyGenesisHash = libcommon.HexToHash("0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4") SepoliaGenesisHash = libcommon.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9") - MumbaiGenesisHash = libcommon.HexToHash("0x7b66506a9ebdbf30d32b43c5f15a3b1216269a1ec3a75aa3182b86176a2b1ca7") AmoyGenesisHash = libcommon.HexToHash("0x7202b2b53c5a0836e773e319d18922cc756dd67432f9a1f65352b61f4406c697") BorMainnetGenesisHash = libcommon.HexToHash("0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") BorDevnetGenesisHash = libcommon.HexToHash("0x5a06b25b0c6530708ea0b98a3409290e39dce6be7f558493aeb6e4b99a172a87") @@ -144,8 +143,6 @@ var ( Clique: &chain.CliqueConfig{Period: 0, Epoch: 30000}, } - MumbaiChainConfig = readChainSpec("chainspecs/mumbai.json") - AmoyChainConfig = readChainSpec("chainspecs/amoy.json") BorMainnetChainConfig = readChainSpec("chainspecs/bor-mainnet.json") @@ -232,8 +229,6 @@ func ChainConfigByChainName(chain string) *chain.Config { return ChapelChainConfig case networkname.RialtoChainName: return RialtoChainConfig - case networkname.MumbaiChainName: - return MumbaiChainConfig case networkname.AmoyChainName: return AmoyChainConfig case networkname.BorMainnetChainName: @@ -265,8 +260,6 @@ func GenesisHashByChainName(chain string) *libcommon.Hash { return &ChapelGenesisHash case networkname.RialtoChainName: return &RialtoGenesisHash - case networkname.MumbaiChainName: - return &MumbaiGenesisHash case networkname.AmoyChainName: return &AmoyGenesisHash case networkname.BorMainnetChainName: @@ -298,8 +291,6 @@ func ChainConfigByGenesisHash(genesisHash libcommon.Hash) *chain.Config { return ChapelChainConfig case genesisHash == RialtoGenesisHash: return RialtoChainConfig - case genesisHash == MumbaiGenesisHash: - return MumbaiChainConfig case genesisHash == AmoyGenesisHash: return AmoyChainConfig case genesisHash == BorMainnetGenesisHash: diff --git a/params/config_test.go b/params/config_test.go index 95ac0f9a181..913a052ce49 100644 --- a/params/config_test.go +++ b/params/config_test.go @@ -119,22 +119,22 @@ func TestGetBurntContract(t *testing.T) { require.NotNil(t, addr) assert.Equal(t, common.HexToAddress("0x6BBe78ee9e474842Dbd4AB4987b3CeFE88426A92"), *addr) - // Mumbai - addr = MumbaiChainConfig.GetBurntContract(22640000) + // Bor Mainnet + addr = BorMainnetChainConfig.GetBurntContract(23850000) require.NotNil(t, addr) assert.Equal(t, common.HexToAddress("0x70bcA57F4579f58670aB2d18Ef16e02C17553C38"), *addr) - addr = MumbaiChainConfig.GetBurntContract(22640000 + 1) + addr = BorMainnetChainConfig.GetBurntContract(23850000 + 1) require.NotNil(t, addr) assert.Equal(t, common.HexToAddress("0x70bcA57F4579f58670aB2d18Ef16e02C17553C38"), *addr) - addr = MumbaiChainConfig.GetBurntContract(41874000 - 1) + addr = BorMainnetChainConfig.GetBurntContract(50523000 - 1) require.NotNil(t, addr) assert.Equal(t, common.HexToAddress("0x70bcA57F4579f58670aB2d18Ef16e02C17553C38"), *addr) - addr = MumbaiChainConfig.GetBurntContract(41874000) + addr = BorMainnetChainConfig.GetBurntContract(50523000) require.NotNil(t, addr) - assert.Equal(t, common.HexToAddress("0x617b94CCCC2511808A3C9478ebb96f455CF167aA"), *addr) - addr = MumbaiChainConfig.GetBurntContract(41874000 + 1) + assert.Equal(t, common.HexToAddress("0x7A8ed27F4C30512326878652d20fC85727401854"), *addr) + addr = BorMainnetChainConfig.GetBurntContract(50523000 + 1) require.NotNil(t, addr) - assert.Equal(t, common.HexToAddress("0x617b94CCCC2511808A3C9478ebb96f455CF167aA"), *addr) + assert.Equal(t, common.HexToAddress("0x7A8ed27F4C30512326878652d20fC85727401854"), *addr) // Amoy addr = AmoyChainConfig.GetBurntContract(0) diff --git a/params/version.go b/params/version.go index 56133ecd0b9..e68d0939c15 100644 --- a/params/version.go +++ b/params/version.go @@ -37,7 +37,7 @@ const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 3 // Minor version component of the current release VersionMicro = 0 // Patch version component of the current release - VersionModifier = "alpha1" // Modifier component of the current release + VersionModifier = "alpha2" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" VersionKeyFinished = "ErigonVersionFinished" ) diff --git a/polygon/bor/bor.go b/polygon/bor/bor.go index e87d77d9a72..3abcff4c09e 100644 --- a/polygon/bor/bor.go +++ b/polygon/bor/bor.go @@ -38,6 +38,7 @@ import ( "golang.org/x/crypto/sha3" "golang.org/x/sync/errgroup" + "github.com/erigontech/erigon-lib/common/dbg" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon-lib/chain" @@ -63,7 +64,6 @@ import ( "github.com/erigontech/erigon/polygon/bor/finality/whitelist" "github.com/erigontech/erigon/polygon/bor/statefull" "github.com/erigontech/erigon/polygon/bor/valset" - "github.com/erigontech/erigon/polygon/bridge" "github.com/erigontech/erigon/polygon/heimdall" "github.com/erigontech/erigon/rlp" "github.com/erigontech/erigon/rpc" @@ -254,6 +254,15 @@ type ValidateHeaderTimeSignerSuccessionNumber interface { GetSignerSuccessionNumber(signer libcommon.Address, number uint64) (int, error) } +type spanReader interface { + Span(ctx context.Context, id uint64) (*heimdall.Span, bool, error) +} + +type bridgeReader interface { + Events(ctx context.Context, blockNum uint64) ([]*types.Message, error) + EventTxnLookup(ctx context.Context, borTxHash libcommon.Hash) (uint64, bool, error) +} + func ValidateHeaderTime( header *types.Header, now time.Time, @@ -318,6 +327,8 @@ type Bor struct { spanner Spanner GenesisContractsClient GenesisContracts HeimdallClient heimdall.HeimdallClient + spanReader spanReader + bridgeReader bridgeReader // scope event.SubscriptionScope // The fields below are for testing only @@ -329,7 +340,6 @@ type Bor struct { frozenSnapshotsInit sync.Once rootHashCache *lru.ARCCache[string, string] headerProgress HeaderProgress - polygonBridge bridge.PolygonBridge } type signer struct { @@ -346,7 +356,8 @@ func New( heimdallClient heimdall.HeimdallClient, genesisContracts GenesisContracts, logger log.Logger, - polygonBridge bridge.Service, + bridgeReader bridgeReader, + spanReader spanReader, ) *Bor { // get bor config borConfig := chainConfig.Bor.(*borcfg.BorConfig) @@ -373,7 +384,8 @@ func New( execCtx: context.Background(), logger: logger, closeCh: make(chan struct{}), - polygonBridge: polygonBridge, + bridgeReader: bridgeReader, + spanReader: spanReader, } c.authorizedSigner.Store(&signer{ @@ -399,19 +411,19 @@ type rwWrapper struct { } func (w rwWrapper) Update(ctx context.Context, f func(tx kv.RwTx) error) error { - return fmt.Errorf("Update not implemented") + return errors.New("Update not implemented") } func (w rwWrapper) UpdateNosync(ctx context.Context, f func(tx kv.RwTx) error) error { - return fmt.Errorf("UpdateNosync not implemented") + return errors.New("UpdateNosync not implemented") } func (w rwWrapper) BeginRw(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("BeginRw not implemented") + return nil, errors.New("BeginRw not implemented") } func (w rwWrapper) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { - return nil, fmt.Errorf("BeginRwNosync not implemented") + return nil, errors.New("BeginRwNosync not implemented") } // This is used by the rpcdaemon and tests which need read only access to the provided data services @@ -1384,6 +1396,16 @@ func (c *Bor) fetchAndCommitSpan( } heimdallSpan = *s + } else if c.spanReader != nil { + span, ok, err := c.spanReader.Span(context.Background(), newSpanID) + if err != nil { + return err + } + if !ok { + return errors.New(fmt.Sprintf("error fetching span %v", newSpanID)) + } + + heimdallSpan = *span } else { spanJson := chain.Chain.BorSpan(newSpanID) if err := json.Unmarshal(spanJson, &heimdallSpan); err != nil { @@ -1472,6 +1494,7 @@ func (c *Bor) getHeaderByNumber(ctx context.Context, tx kv.Tx, number uint64) (* return nil, err } if header == nil { + _, _ = c.blockReader.HeaderByNumber(dbg.ContextWithDebug(ctx, true), tx, number) return nil, fmt.Errorf("[bor] header not found: %d", number) } return header, nil @@ -1486,8 +1509,8 @@ func (c *Bor) CommitStates( ) error { blockNum := header.Number.Uint64() - if c.polygonBridge != nil { - events, err := c.polygonBridge.GetEvents(c.execCtx, blockNum) + if c.bridgeReader != nil { + events, err := c.bridgeReader.Events(c.execCtx, blockNum) if err != nil { return err } @@ -1609,45 +1632,6 @@ func (c *Bor) getNextHeimdallSpanForTest( return &heimdallSpan, nil } -func validatorContains(a []*valset.Validator, x *valset.Validator) (*valset.Validator, bool) { - for _, n := range a { - if bytes.Equal(n.Address.Bytes(), x.Address.Bytes()) { - return n, true - } - } - - return nil, false -} - -func getUpdatedValidatorSet(oldValidatorSet *valset.ValidatorSet, newVals []*valset.Validator, logger log.Logger) *valset.ValidatorSet { - v := oldValidatorSet - oldVals := v.Validators - - changes := make([]*valset.Validator, 0, len(oldVals)) - - for _, ov := range oldVals { - if f, ok := validatorContains(newVals, ov); ok { - ov.VotingPower = f.VotingPower - } else { - ov.VotingPower = 0 - } - - changes = append(changes, ov) - } - - for _, nv := range newVals { - if _, ok := validatorContains(changes, nv); !ok { - changes = append(changes, nv) - } - } - - if err := v.UpdateWithChangeSet(changes); err != nil { - logger.Error("[bor] Error while updating change set", "error", err) - } - - return v -} - func isSprintStart(number, sprint uint64) bool { return number%sprint == 0 } diff --git a/polygon/bor/bor_test.go b/polygon/bor/bor_test.go index cd3956f2027..735e6a438bb 100644 --- a/polygon/bor/bor_test.go +++ b/polygon/bor/bor_test.go @@ -19,6 +19,7 @@ package bor_test import ( "context" "encoding/json" + "errors" "fmt" "math/big" "testing" @@ -93,7 +94,7 @@ func (h *test_heimdall) FetchSpan(ctx context.Context, spanID uint64) (*heimdall nextSpan.StartBlock = 1 //256 } else { if spanID != uint64(h.currentSpan.Id+1) { - return nil, fmt.Errorf("Can't initialize span: non consecutive span") + return nil, errors.New("Can't initialize span: non consecutive span") } nextSpan.StartBlock = h.currentSpan.EndBlock + 1 @@ -125,42 +126,42 @@ func (h test_heimdall) currentSprintLength() int { } func (h test_heimdall) FetchCheckpoint(ctx context.Context, number int64) (*heimdall.Checkpoint, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h test_heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h *test_heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*heimdall.Checkpoint, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h test_heimdall) FetchMilestone(ctx context.Context, number int64) (*heimdall.Milestone, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h test_heimdall) FetchMilestoneCount(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h test_heimdall) FetchFirstMilestoneNum(ctx context.Context) (int64, error) { - return 0, fmt.Errorf("TODO") + return 0, errors.New("TODO") } func (h test_heimdall) FetchNoAckMilestone(ctx context.Context, milestoneID string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (h test_heimdall) FetchLastNoAckMilestone(ctx context.Context) (string, error) { - return "", fmt.Errorf("TODO") + return "", errors.New("TODO") } func (h test_heimdall) FetchMilestoneID(ctx context.Context, milestoneID string) error { - return fmt.Errorf("TODO") + return errors.New("TODO") } func (h test_heimdall) FetchLatestSpan(ctx context.Context) (*heimdall.Span, error) { - return nil, fmt.Errorf("TODO") + return nil, errors.New("TODO") } func (h test_heimdall) Close() {} @@ -324,6 +325,7 @@ func newValidator(t *testing.T, heimdall *test_heimdall, blocks map[uint64]*type test_genesisContract{}, logger, nil, + nil, ) /*fmt.Printf("Private: 0x%s\nPublic: 0x%s\nAddress: %s\n", diff --git a/polygon/bor/finality/rawdb/milestone.go b/polygon/bor/finality/rawdb/milestone.go index 10e6a4703fc..4c839ff0883 100644 --- a/polygon/bor/finality/rawdb/milestone.go +++ b/polygon/bor/finality/rawdb/milestone.go @@ -195,7 +195,7 @@ func ReadLockField(db kv.RwDB) (bool, uint64, libcommon.Hash, map[string]struct{ } if err = json.Unmarshal(data, &lockField); err != nil { - log.Error(fmt.Sprintf("Unable to unmarshal the lock field in database"), "err", err) + log.Error("Unable to unmarshal the lock field in database", "err", err) return false, 0, libcommon.Hash{}, nil, fmt.Errorf("%w(%v) for lock field , data %v(%q)", ErrIncorrectLockField, err, data, string(data)) @@ -254,7 +254,7 @@ func ReadFutureMilestoneList(db kv.RwDB) ([]uint64, map[uint64]libcommon.Hash, e } if err = json.Unmarshal(data, &futureMilestoneField); err != nil { - log.Error(fmt.Sprintf("Unable to unmarshal the future milestone field in database"), "err", err) + log.Error("Unable to unmarshal the future milestone field in database", "err", err) return nil, nil, fmt.Errorf("%w(%v) for future milestone field, data %v(%q)", ErrIncorrectFutureMilestoneField, err, data, string(data)) diff --git a/polygon/bor/finality/whitelist.go b/polygon/bor/finality/whitelist.go index 7dffe9d363d..cbe72189524 100644 --- a/polygon/bor/finality/whitelist.go +++ b/polygon/bor/finality/whitelist.go @@ -162,9 +162,9 @@ func retryHeimdallHandler(fn heimdallHandler, config *config, tickerDuration tim if err != nil { if errors.Is(err, errMissingBlocks) { - config.logger.Debug(fmt.Sprintf("[bor] unable to handle %s", fnName), "err", err) + config.logger.Debug("[bor] unable to handle "+fnName, "err", err) } else { - config.logger.Warn(fmt.Sprintf("[bor] unable to handle %s", fnName), "err", err) + config.logger.Warn("[bor] unable to handle "+fnName, "err", err) } } case <-config.closeCh: diff --git a/polygon/bor/snapshot.go b/polygon/bor/snapshot.go index 6fa1b5212fb..27278101871 100644 --- a/polygon/bor/snapshot.go +++ b/polygon/bor/snapshot.go @@ -199,7 +199,7 @@ func (s *Snapshot) Apply(parent *types.Header, headers []*types.Header, logger l // get validators from headers and use that for new validator set newVals, _ := valset.ParseValidators(validatorBytes) - v := getUpdatedValidatorSet(snap.ValidatorSet.Copy(), newVals, logger) + v := valset.GetUpdatedValidatorSet(snap.ValidatorSet.Copy(), newVals, logger) v.IncrementProposerPriority(1) snap.ValidatorSet = v } diff --git a/polygon/bor/snaptype/types.go b/polygon/bor/snaptype/types.go index e886b239e1f..cd4316ff2db 100644 --- a/polygon/bor/snaptype/types.go +++ b/polygon/bor/snaptype/types.go @@ -54,7 +54,6 @@ func initTypes() { borTypes := append(coresnaptype.BlockSnapshotTypes, BorSnapshotTypes()...) borTypes = append(borTypes, coresnaptype.E3StateTypes...) - snapcfg.RegisterKnownTypes(networkname.MumbaiChainName, borTypes) snapcfg.RegisterKnownTypes(networkname.AmoyChainName, borTypes) snapcfg.RegisterKnownTypes(networkname.BorMainnetChainName, borTypes) } diff --git a/polygon/bor/valset/validator_set.go b/polygon/bor/valset/validator_set.go index d51139aa7a4..82990b715c9 100644 --- a/polygon/bor/valset/validator_set.go +++ b/polygon/bor/valset/validator_set.go @@ -20,6 +20,7 @@ package valset import ( "bytes" + "errors" "fmt" "math" "math/big" @@ -27,6 +28,7 @@ import ( "strings" libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/log/v3" ) // MaxTotalVotingPower - the maximum allowed total voting power. @@ -617,7 +619,7 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes // Check that the resulting set will not be empty. if numNewValidators == 0 && len(vals.Validators) == len(deletes) { - return fmt.Errorf("applying the validator changes would result in empty set") + return errors.New("applying the validator changes would result in empty set") } // Compute the priorities for updates. @@ -817,3 +819,42 @@ func safeSubClip(a, b int64) int64 { return c } + +func GetUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*Validator, logger log.Logger) *ValidatorSet { + v := oldValidatorSet + oldVals := v.Validators + + changes := make([]*Validator, 0, len(oldVals)) + + for _, ov := range oldVals { + if f, ok := validatorContains(newVals, ov); ok { + ov.VotingPower = f.VotingPower + } else { + ov.VotingPower = 0 + } + + changes = append(changes, ov) + } + + for _, nv := range newVals { + if _, ok := validatorContains(changes, nv); !ok { + changes = append(changes, nv) + } + } + + if err := v.UpdateWithChangeSet(changes); err != nil { + logger.Error("error while updating change set", "err", err) + } + + return v +} + +func validatorContains(a []*Validator, x *Validator) (*Validator, bool) { + for _, n := range a { + if bytes.Equal(n.Address.Bytes(), x.Address.Bytes()) { + return n, true + } + } + + return nil, false +} diff --git a/polygon/bridge/bridge.go b/polygon/bridge/bridge.go index a70e1131291..315aa6d11ae 100644 --- a/polygon/bridge/bridge.go +++ b/polygon/bridge/bridge.go @@ -18,13 +18,17 @@ package bridge import ( "context" + "errors" "fmt" + "sync/atomic" "time" + "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/common/u256" "github.com/erigontech/erigon/core" "github.com/erigontech/erigon/core/state" + bortypes "github.com/erigontech/erigon/polygon/bor/types" "github.com/erigontech/erigon/polygon/polygoncommon" libcommon "github.com/erigontech/erigon-lib/common" @@ -34,13 +38,15 @@ import ( "github.com/erigontech/erigon/polygon/heimdall" ) +var ErrMapNotAvailable = errors.New("map not available") + type fetchSyncEventsType func(ctx context.Context, fromId uint64, to time.Time, limit int) ([]*heimdall.EventRecordWithTime, error) type Bridge struct { store Store - ready bool - lastProcessedBlockNumber uint64 - lastProcessedEventID uint64 + ready atomic.Bool + lastProcessedBlockNumber atomic.Uint64 + lastProcessedEventID atomic.Uint64 log log.Logger borConfig *borcfg.BorConfig @@ -50,21 +56,19 @@ type Bridge struct { } func Assemble(dataDir string, logger log.Logger, borConfig *borcfg.BorConfig, fetchSyncEvents fetchSyncEventsType, stateReceiverABI abi.ABI) *Bridge { - bridgeDB := polygoncommon.NewDatabase(dataDir, logger) + bridgeDB := polygoncommon.NewDatabase(dataDir, kv.PolygonBridgeDB, databaseTablesCfg, logger) bridgeStore := NewStore(bridgeDB) return NewBridge(bridgeStore, logger, borConfig, fetchSyncEvents, stateReceiverABI) } func NewBridge(store Store, logger log.Logger, borConfig *borcfg.BorConfig, fetchSyncEvents fetchSyncEventsType, stateReceiverABI abi.ABI) *Bridge { return &Bridge{ - store: store, - log: logger, - borConfig: borConfig, - fetchSyncEvents: fetchSyncEvents, - lastProcessedBlockNumber: 0, - lastProcessedEventID: 0, - stateReceiverABI: stateReceiverABI, - stateClientAddress: libcommon.HexToAddress(borConfig.StateReceiverContract), + store: store, + log: logger, + borConfig: borConfig, + fetchSyncEvents: fetchSyncEvents, + stateReceiverABI: stateReceiverABI, + stateClientAddress: libcommon.HexToAddress(borConfig.StateReceiverContract), } } @@ -76,11 +80,18 @@ func (b *Bridge) Run(ctx context.Context) error { defer b.Close() // get last known sync ID - lastEventID, err := b.store.GetLatestEventID(ctx) + lastEventID, err := b.store.LatestEventID(ctx) + if err != nil { + return err + } + + lastProcessedEventID, err := b.store.LastProcessedEventID(ctx) if err != nil { return err } + b.lastProcessedEventID.Store(lastProcessedEventID) + // start syncing b.log.Debug(bridgeLogPrefix("Bridge is running"), "lastEventID", lastEventID) @@ -99,20 +110,20 @@ func (b *Bridge) Run(ctx context.Context) error { } if len(events) != 0 { - b.ready = false - if err := b.store.AddEvents(ctx, events, b.stateReceiverABI); err != nil { + b.ready.Store(false) + if err := b.store.PutEvents(ctx, events, b.stateReceiverABI); err != nil { return err } lastEventID = events[len(events)-1].ID } else { - b.ready = true + b.ready.Store(true) if err := libcommon.Sleep(ctx, 30*time.Second); err != nil { return err } } - b.log.Debug(bridgeLogPrefix(fmt.Sprintf("got %v new events, last event ID: %v, ready: %v", len(events), lastEventID, b.ready))) + b.log.Debug(bridgeLogPrefix(fmt.Sprintf("got %v new events, last event ID: %v, ready: %v", len(events), lastEventID, b.ready.Load()))) } } @@ -125,29 +136,50 @@ func (b *Bridge) Close() { // ProcessNewBlocks iterates through all blocks and constructs a map from block number to sync events func (b *Bridge) ProcessNewBlocks(ctx context.Context, blocks []*types.Block) error { eventMap := make(map[uint64]uint64) + txMap := make(map[libcommon.Hash]uint64) + var prevSprintTime time.Time + for _, block := range blocks { // check if block is start of span - if !b.isSprintStart(block.NumberU64()) { + blockNum := block.NumberU64() + if !b.isSprintStart(blockNum) { continue } - blockTimestamp := time.Unix(int64(block.Time()), 0) - lastDBID, err := b.store.GetSprintLastEventID(ctx, b.lastProcessedEventID, blockTimestamp, b.stateReceiverABI) + var timeLimit time.Time + if b.borConfig.IsIndore(blockNum) { + stateSyncDelay := b.borConfig.CalculateStateSyncDelay(blockNum) + timeLimit = time.Unix(int64(block.Time()-stateSyncDelay), 0) + } else { + timeLimit = prevSprintTime + } + + prevSprintTime = time.Unix(int64(block.Time()), 0) + + lastDBID, err := b.store.SprintLastEventID(ctx, b.lastProcessedEventID.Load(), timeLimit, b.stateReceiverABI) if err != nil { return err } - if lastDBID != 0 && lastDBID > b.lastProcessedEventID { - b.log.Debug(bridgeLogPrefix(fmt.Sprintf("Creating map for block %d, start ID %d, end ID %d", block.NumberU64(), b.lastProcessedEventID, lastDBID))) - eventMap[block.NumberU64()] = b.lastProcessedEventID + if lastDBID > b.lastProcessedEventID.Load() { + b.log.Debug(bridgeLogPrefix(fmt.Sprintf("Creating map for block %d, start ID %d, end ID %d", blockNum, b.lastProcessedEventID.Load(), lastDBID))) - b.lastProcessedEventID = lastDBID + k := bortypes.ComputeBorTxHash(blockNum, block.Hash()) + eventMap[blockNum] = b.lastProcessedEventID.Load() + txMap[k] = blockNum + + b.lastProcessedEventID.Store(lastDBID) } - b.lastProcessedBlockNumber = block.NumberU64() + b.lastProcessedBlockNumber.Store(blockNum) + } + + err := b.store.PutEventIDs(ctx, eventMap) + if err != nil { + return err } - err := b.store.StoreEventID(ctx, eventMap) + err = b.store.PutEventTxnToBlockNum(ctx, txMap) if err != nil { return err } @@ -163,7 +195,7 @@ func (b *Bridge) Synchronize(ctx context.Context, tip *types.Header) error { return ctx.Err() default: } - if b.ready || b.lastProcessedBlockNumber >= tip.Number.Uint64() { + if b.ready.Load() || b.lastProcessedBlockNumber.Load() >= tip.Number.Uint64() { return nil } } @@ -174,23 +206,25 @@ func (b *Bridge) Unwind(ctx context.Context, tip *types.Header) error { return b.store.PruneEventIDs(ctx, tip.Number.Uint64()) } -// GetEvents returns all sync events at blockNum -func (b *Bridge) GetEvents(ctx context.Context, blockNum uint64) ([]*types.Message, error) { - start, end, err := b.store.GetEventIDRange(ctx, blockNum) +// Events returns all sync events at blockNum +func (b *Bridge) Events(ctx context.Context, blockNum uint64) ([]*types.Message, error) { + start, end, err := b.store.EventIDRange(ctx, blockNum) if err != nil { + if errors.Is(err, ErrMapNotAvailable) { + return nil, nil + } + return nil, err } if end == 0 { // exception for tip processing - end = b.lastProcessedEventID + end = b.lastProcessedEventID.Load() } - b.log.Debug("got map", "blockNum", blockNum, "start", start, "end", end) - eventsRaw := make([]*types.Message, 0, end-start+1) // get events from DB - events, err := b.store.GetEvents(ctx, start+1, end+1) + events, err := b.store.Events(ctx, start+1, end+1) if err != nil { return nil, err } @@ -217,6 +251,10 @@ func (b *Bridge) GetEvents(ctx context.Context, blockNum uint64) ([]*types.Messa return eventsRaw, nil } +func (b *Bridge) EventTxnLookup(ctx context.Context, borTxHash libcommon.Hash) (uint64, bool, error) { + return b.store.EventTxnToBlockNum(ctx, borTxHash) +} + // Helper functions func (b *Bridge) isSprintStart(headerNum uint64) bool { if headerNum%b.borConfig.CalculateSprintLength(headerNum) != 0 || headerNum == 0 { diff --git a/polygon/bridge/bridge_test.go b/polygon/bridge/bridge_test.go index 2b3af979619..861f6ff2ff7 100644 --- a/polygon/bridge/bridge_test.go +++ b/polygon/bridge/bridge_test.go @@ -19,7 +19,6 @@ import ( "github.com/erigontech/erigon/polygon/bor/borcfg" "github.com/erigontech/erigon/polygon/bridge" "github.com/erigontech/erigon/polygon/heimdall" - "github.com/erigontech/erigon/polygon/polygoncommon" "github.com/erigontech/erigon/rlp" "github.com/erigontech/erigon/turbo/testlog" ) @@ -33,9 +32,7 @@ func setup(t *testing.T, abi abi.ABI) (*heimdall.MockHeimdallClient, *bridge.Bri } heimdallClient := heimdall.NewMockHeimdallClient(ctrl) - polygonBridgeDB := polygoncommon.NewDatabase(t.TempDir(), logger) - store := bridge.NewStore(polygonBridgeDB) - b := bridge.NewBridge(store, logger, &borConfig, heimdallClient.FetchStateSyncEvents, abi) + b := bridge.Assemble(t.TempDir(), logger, &borConfig, heimdallClient.FetchStateSyncEvents, abi) return heimdallClient, b } @@ -76,7 +73,7 @@ func TestBridge(t *testing.T) { event1 := &heimdall.EventRecordWithTime{ EventRecord: heimdall.EventRecord{ ID: 1, - ChainID: "80001", + ChainID: "80002", Data: hexutil.MustDecode("0x01"), }, Time: time.Unix(50, 0), // block 2 @@ -84,7 +81,7 @@ func TestBridge(t *testing.T) { event2 := &heimdall.EventRecordWithTime{ EventRecord: heimdall.EventRecord{ ID: 2, - ChainID: "80001", + ChainID: "80002", Data: hexutil.MustDecode("0x02"), }, Time: time.Unix(100, 0), // block 2 @@ -92,7 +89,7 @@ func TestBridge(t *testing.T) { event3 := &heimdall.EventRecordWithTime{ EventRecord: heimdall.EventRecord{ ID: 3, - ChainID: "80001", + ChainID: "80002", Data: hexutil.MustDecode("0x03"), }, Time: time.Unix(200, 0), // block 4 @@ -127,7 +124,7 @@ func TestBridge(t *testing.T) { err = b.ProcessNewBlocks(ctx, blocks) require.NoError(t, err) - res, err := b.GetEvents(ctx, 2) + res, err := b.Events(ctx, 4) require.NoError(t, err) event1Data, err := event1.Pack(stateReceiverABI) @@ -140,25 +137,19 @@ func TestBridge(t *testing.T) { require.Equal(t, event1Data, rlp.RawValue(res[0].Data())) // check data fields require.Equal(t, event2Data, rlp.RawValue(res[1].Data())) - res, err = b.GetEvents(ctx, 4) + // get non-sprint block + res, err = b.Events(ctx, 1) + require.Equal(t, len(res), 0) require.NoError(t, err) - event3Data, err := event3.Pack(stateReceiverABI) + res, err = b.Events(ctx, 3) + require.Equal(t, len(res), 0) require.NoError(t, err) - require.Equal(t, 1, len(res)) - require.Equal(t, event3Data, rlp.RawValue(res[0].Data())) - - // get non-sprint block - _, err = b.GetEvents(ctx, 1) - require.Error(t, err) - - _, err = b.GetEvents(ctx, 3) - require.Error(t, err) - // check block 0 - _, err = b.GetEvents(ctx, 0) - require.Error(t, err) + res, err = b.Events(ctx, 0) + require.Equal(t, len(res), 0) + require.NoError(t, err) cancel() wg.Wait() @@ -174,7 +165,7 @@ func TestBridge_Unwind(t *testing.T) { event1 := &heimdall.EventRecordWithTime{ EventRecord: heimdall.EventRecord{ ID: 1, - ChainID: "80001", + ChainID: "80002", Data: hexutil.MustDecode("0x01"), }, Time: time.Unix(50, 0), // block 2 @@ -182,7 +173,7 @@ func TestBridge_Unwind(t *testing.T) { event2 := &heimdall.EventRecordWithTime{ EventRecord: heimdall.EventRecord{ ID: 2, - ChainID: "80001", + ChainID: "80002", Data: hexutil.MustDecode("0x02"), }, Time: time.Unix(100, 0), // block 2 @@ -190,7 +181,7 @@ func TestBridge_Unwind(t *testing.T) { event3 := &heimdall.EventRecordWithTime{ EventRecord: heimdall.EventRecord{ ID: 3, - ChainID: "80001", + ChainID: "80002", Data: hexutil.MustDecode("0x03"), }, Time: time.Unix(200, 0), // block 4 @@ -198,7 +189,7 @@ func TestBridge_Unwind(t *testing.T) { event4 := &heimdall.EventRecordWithTime{ EventRecord: heimdall.EventRecord{ ID: 4, - ChainID: "80001", + ChainID: "80002", Data: hexutil.MustDecode("0x03"), }, Time: time.Unix(300, 0), // block 6 @@ -233,18 +224,19 @@ func TestBridge_Unwind(t *testing.T) { err = b.ProcessNewBlocks(ctx, blocks) require.NoError(t, err) - event3Data, err := event3.Pack(stateReceiverABI) + event1Data, err := event1.Pack(stateReceiverABI) require.NoError(t, err) - res, err := b.GetEvents(ctx, 4) - require.Equal(t, event3Data, rlp.RawValue(res[0].Data())) + res, err := b.Events(ctx, 4) + require.Equal(t, event1Data, rlp.RawValue(res[0].Data())) require.NoError(t, err) err = b.Unwind(ctx, &types.Header{Number: big.NewInt(3)}) require.NoError(t, err) - _, err = b.GetEvents(ctx, 4) - require.Error(t, err) + res, err = b.Events(ctx, 4) + require.Equal(t, len(res), 0) + require.NoError(t, err) cancel() wg.Wait() diff --git a/polygon/bridge/db.go b/polygon/bridge/db.go index f911480e43e..9ca8056a6e4 100644 --- a/polygon/bridge/db.go +++ b/polygon/bridge/db.go @@ -20,10 +20,9 @@ import ( "bytes" "context" "encoding/binary" - "errors" - "fmt" "time" + libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon/accounts/abi" "github.com/erigontech/erigon/polygon/heimdall" @@ -46,18 +45,22 @@ import ( var databaseTablesCfg = kv.TableCfg{ kv.BorEvents: {}, kv.BorEventNums: {}, + kv.BorTxLookup: {}, } type Store interface { Prepare(ctx context.Context) error Close() - GetLatestEventID(ctx context.Context) (uint64, error) - GetSprintLastEventID(ctx context.Context, lastID uint64, timeLimit time.Time, stateContract abi.ABI) (uint64, error) - AddEvents(ctx context.Context, events []*heimdall.EventRecordWithTime, stateContract abi.ABI) error - GetEvents(ctx context.Context, start, end uint64) ([][]byte, error) - StoreEventID(ctx context.Context, eventMap map[uint64]uint64) error - GetEventIDRange(ctx context.Context, blockNum uint64) (uint64, uint64, error) + LatestEventID(ctx context.Context) (uint64, error) + LastProcessedEventID(ctx context.Context) (uint64, error) + PutEventTxnToBlockNum(ctx context.Context, txMap map[libcommon.Hash]uint64) error + EventTxnToBlockNum(ctx context.Context, borTxHash libcommon.Hash) (uint64, bool, error) + SprintLastEventID(ctx context.Context, lastID uint64, timeLimit time.Time, stateContract abi.ABI) (uint64, error) + PutEvents(ctx context.Context, events []*heimdall.EventRecordWithTime, stateContract abi.ABI) error + Events(ctx context.Context, start, end uint64) ([][]byte, error) + PutEventIDs(ctx context.Context, eventMap map[uint64]uint64) error + EventIDRange(ctx context.Context, blockNum uint64) (uint64, uint64, error) PruneEventIDs(ctx context.Context, blockNum uint64) error } @@ -70,7 +73,7 @@ func NewStore(db *polygoncommon.Database) *MdbxStore { } func (s *MdbxStore) Prepare(ctx context.Context) error { - err := s.db.OpenOnce(ctx, kv.PolygonBridgeDB, databaseTablesCfg) + err := s.db.OpenOnce(ctx) if err != nil { return err } @@ -84,7 +87,7 @@ func (s *MdbxStore) Close() { // GetLatestEventID the latest state sync event ID in given DB, 0 if DB is empty // NOTE: Polygon sync events start at index 1 -func (s *MdbxStore) GetLatestEventID(ctx context.Context) (uint64, error) { +func (s *MdbxStore) LatestEventID(ctx context.Context) (uint64, error) { tx, err := s.db.BeginRo(ctx) if err != nil { return 0, err @@ -109,45 +112,103 @@ func (s *MdbxStore) GetLatestEventID(ctx context.Context) (uint64, error) { return binary.BigEndian.Uint64(k), err } -// GetSprintLastEventID gets the last event id where event.ID >= lastID and event.Time <= time -func (s *MdbxStore) GetSprintLastEventID(ctx context.Context, lastID uint64, timeLimit time.Time, stateContract abi.ABI) (uint64, error) { - var eventID uint64 +// GetLastProcessedEventID gets the last seen event ID in the BorEventNums table +func (s *MdbxStore) LastProcessedEventID(ctx context.Context) (uint64, error) { + tx, err := s.db.BeginRo(ctx) + if err != nil { + return 0, err + } + defer tx.Rollback() + + cursor, err := tx.Cursor(kv.BorEventNums) + if err != nil { + return 0, err + } + defer cursor.Close() + + _, v, err := cursor.Last() + if err != nil { + return 0, err + } + + if len(v) == 0 { + return 0, nil + } + + return binary.BigEndian.Uint64(v), err +} + +func (s *MdbxStore) PutEventTxnToBlockNum(ctx context.Context, txMap map[libcommon.Hash]uint64) error { + tx, err := s.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + vByte := make([]byte, 8) + + for k, v := range txMap { + binary.BigEndian.PutUint64(vByte, v) + + err = tx.Put(kv.BorTxLookup, k.Bytes(), vByte) + if err != nil { + return err + } + } + + return tx.Commit() +} + +func (s *MdbxStore) EventTxnToBlockNum(ctx context.Context, borTxHash libcommon.Hash) (uint64, bool, error) { + var blockNum uint64 tx, err := s.db.BeginRo(ctx) if err != nil { - return eventID, err + return blockNum, false, err } defer tx.Rollback() - count, err := tx.Count(kv.BorEvents) + v, err := tx.GetOne(kv.BorTxLookup, borTxHash.Bytes()) if err != nil { - return eventID, err + return blockNum, false, err } - if count == 0 { - return eventID, nil + if v == nil { // we don't have a map + return blockNum, false, nil } - cursor, err := tx.Cursor(kv.BorEvents) + blockNum = binary.BigEndian.Uint64(v) + return blockNum, true, nil +} + +// GetSprintLastEventID gets the last event id where event.ID >= lastID and event.Time <= time +func (s *MdbxStore) SprintLastEventID(ctx context.Context, lastID uint64, timeLimit time.Time, stateContract abi.ABI) (uint64, error) { + var eventID uint64 + + tx, err := s.db.BeginRo(ctx) if err != nil { return eventID, err } - defer cursor.Close() + defer tx.Rollback() - kDBLast, _, err := cursor.Last() + count, err := tx.Count(kv.BorEvents) if err != nil { return eventID, err } + if count == 0 { + return eventID, nil + } kLastID := make([]byte, 8) binary.BigEndian.PutUint64(kLastID, lastID) - _, _, err = cursor.Seek(kLastID) + it, err := tx.RangeAscend(kv.BorEvents, kLastID, nil, -1) if err != nil { return eventID, err } + defer it.Close() - for { - k, v, err := cursor.Next() + for it.HasNext() { + _, v, err := it.Next() if err != nil { return eventID, err } @@ -165,14 +226,12 @@ func (s *MdbxStore) GetSprintLastEventID(ctx context.Context, lastID uint64, tim } eventID = event.ID - - if bytes.Equal(k, kDBLast) { - return eventID, nil - } } + + return eventID, nil } -func (s *MdbxStore) AddEvents(ctx context.Context, events []*heimdall.EventRecordWithTime, stateContract abi.ABI) error { +func (s *MdbxStore) PutEvents(ctx context.Context, events []*heimdall.EventRecordWithTime, stateContract abi.ABI) error { tx, err := s.db.BeginRw(ctx) if err != nil { return err @@ -197,7 +256,7 @@ func (s *MdbxStore) AddEvents(ctx context.Context, events []*heimdall.EventRecor } // GetEvents gets raw events, start inclusive, end exclusive -func (s *MdbxStore) GetEvents(ctx context.Context, start, end uint64) ([][]byte, error) { +func (s *MdbxStore) Events(ctx context.Context, start, end uint64) ([][]byte, error) { var events [][]byte kStart := make([]byte, 8) @@ -223,13 +282,13 @@ func (s *MdbxStore) GetEvents(ctx context.Context, start, end uint64) ([][]byte, return nil, err } - events = append(events, v) + events = append(events, bytes.Clone(v)) } return events, err } -func (s *MdbxStore) StoreEventID(ctx context.Context, eventMap map[uint64]uint64) error { +func (s *MdbxStore) PutEventIDs(ctx context.Context, eventMap map[uint64]uint64) error { tx, err := s.db.BeginRw(ctx) if err != nil { return err @@ -255,7 +314,7 @@ func (s *MdbxStore) StoreEventID(ctx context.Context, eventMap map[uint64]uint64 // GetEventIDRange returns the state sync event ID range for the given block number. // An error is thrown if the block number is not found in the database. If the given block // number is the last in the database, then the second uint64 (representing end ID) is 0. -func (s *MdbxStore) GetEventIDRange(ctx context.Context, blockNum uint64) (uint64, uint64, error) { +func (s *MdbxStore) EventIDRange(ctx context.Context, blockNum uint64) (uint64, uint64, error) { var start, end uint64 tx, err := s.db.BeginRo(ctx) @@ -277,13 +336,10 @@ func (s *MdbxStore) GetEventIDRange(ctx context.Context, blockNum uint64) (uint6 return start, end, err } if v == nil { // we don't have a map - return start, end, errors.New(fmt.Sprintf("map not available for block %d", blockNum)) + return start, end, ErrMapNotAvailable } - err = binary.Read(bytes.NewReader(v), binary.BigEndian, &start) - if err != nil { - return start, end, err - } + start = binary.BigEndian.Uint64(v) _, v, err = cursor.Next() if err != nil { @@ -291,10 +347,7 @@ func (s *MdbxStore) GetEventIDRange(ctx context.Context, blockNum uint64) (uint6 } if v != nil { // may be empty if blockNum is the last entry - err = binary.Read(bytes.NewReader(v), binary.BigEndian, &end) - if err != nil { - return start, end, err - } + end = binary.BigEndian.Uint64(v) } return start, end, nil diff --git a/polygon/bridge/log_prefix.go b/polygon/bridge/log_prefix.go index 90e003d0926..e40d4746d13 100644 --- a/polygon/bridge/log_prefix.go +++ b/polygon/bridge/log_prefix.go @@ -16,8 +16,6 @@ package bridge -import "fmt" - func bridgeLogPrefix(message string) string { - return fmt.Sprintf("[bridge] %s", message) + return "[bridge] " + message } diff --git a/polygon/bridge/service.go b/polygon/bridge/service.go index d9511e82180..1e966647e5b 100644 --- a/polygon/bridge/service.go +++ b/polygon/bridge/service.go @@ -19,6 +19,7 @@ package bridge import ( "context" + libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon/core/types" ) @@ -26,7 +27,8 @@ type PolygonBridge interface { ProcessNewBlocks(ctx context.Context, blocks []*types.Block) error Synchronize(ctx context.Context, tip *types.Header) error Unwind(ctx context.Context, tip *types.Header) error - GetEvents(ctx context.Context, blockNum uint64) ([]*types.Message, error) + Events(ctx context.Context, blockNum uint64) ([]*types.Message, error) + EventTxnLookup(ctx context.Context, borTxHash libcommon.Hash) (uint64, bool, error) } type Service interface { diff --git a/polygon/heimdall/checkpoint.go b/polygon/heimdall/checkpoint.go index 9be8e0b4fc7..2bffb8645d7 100644 --- a/polygon/heimdall/checkpoint.go +++ b/polygon/heimdall/checkpoint.go @@ -19,6 +19,7 @@ package heimdall import ( "encoding/binary" "encoding/json" + "errors" "fmt" "math/big" @@ -159,7 +160,7 @@ type CheckpointListResponse struct { Result Checkpoints `json:"result"` } -var ErrCheckpointNotFound = fmt.Errorf("checkpoint not found") +var ErrCheckpointNotFound = errors.New("checkpoint not found") func CheckpointIdAt(tx kv.Tx, block uint64) (CheckpointId, error) { var id uint64 diff --git a/polygon/heimdall/client.go b/polygon/heimdall/client.go index 41f6e2fa9f9..7f84698a415 100644 --- a/polygon/heimdall/client.go +++ b/polygon/heimdall/client.go @@ -26,6 +26,7 @@ import ( "net/url" "path" "sort" + "strconv" "strings" "time" @@ -530,15 +531,15 @@ func stateSyncListURL(urlString string, fromID uint64, to int64) (*url.URL, erro } func stateSyncURL(urlString string, id uint64) (*url.URL, error) { - return makeURL(urlString, fmt.Sprintf(fetchStateSyncEvent, fmt.Sprint(id)), "") + return makeURL(urlString, fmt.Sprintf(fetchStateSyncEvent, strconv.FormatUint(id, 10)), "") } func checkpointURL(urlString string, number int64) (*url.URL, error) { - url := "" + var url string if number == -1 { url = fmt.Sprintf(fetchCheckpoint, "latest") } else { - url = fmt.Sprintf(fetchCheckpoint, fmt.Sprint(number)) + url = fmt.Sprintf(fetchCheckpoint, strconv.FormatInt(number, 10)) } return makeURL(urlString, url, "") diff --git a/polygon/heimdall/cmp_block_range.go b/polygon/heimdall/cmp_block_range.go new file mode 100644 index 00000000000..fc536709381 --- /dev/null +++ b/polygon/heimdall/cmp_block_range.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package heimdall + +// cmpBlockRange returns 0 if blockNum is inside [startBlockNum, endBlockNum] range. +// returns -1 if before range. +// returns 1 if after range. +func cmpBlockRange(startBlockNum, endBlockNum, blockNum uint64) int { + if blockNum < startBlockNum { + return -1 + } + + if blockNum > endBlockNum { + return 1 + } + + return 0 +} diff --git a/polygon/heimdall/entity_fetcher.go b/polygon/heimdall/entity_fetcher.go index e8c0061ed5d..b5977e8afaf 100644 --- a/polygon/heimdall/entity_fetcher.go +++ b/polygon/heimdall/entity_fetcher.go @@ -19,7 +19,6 @@ package heimdall import ( "cmp" "context" - "fmt" "slices" "time" @@ -69,21 +68,21 @@ func newEntityFetcher[TEntity Entity]( } func (f *entityFetcherImpl[TEntity]) FetchEntityIdRange(ctx context.Context) (ClosedRange, error) { - var idRange ClosedRange - - if f.fetchFirstEntityId == nil { - idRange.Start = 1 - } else { - first, err := f.fetchFirstEntityId(ctx) - if err != nil { - return idRange, err - } - idRange.Start = uint64(first) + first, err := f.fetchFirstEntityId(ctx) + if err != nil { + return ClosedRange{}, err } last, err := f.fetchLastEntityId(ctx) - idRange.End = uint64(last) - return idRange, err + if err != nil { + return ClosedRange{}, err + } + + res := ClosedRange{ + Start: uint64(first), + End: uint64(last), + } + return res, nil } const entityFetcherBatchFetchThreshold = 100 @@ -138,7 +137,7 @@ func (f *entityFetcherImpl[TEntity]) FetchAllEntities(ctx context.Context) ([]TE select { case <-progressLogTicker.C: f.logger.Debug( - heimdallLogPrefix(fmt.Sprintf("%s progress", f.name)), + heimdallLogPrefix(f.name+" progress"), "page", page, "len", len(entities), ) @@ -158,7 +157,7 @@ func (f *entityFetcherImpl[TEntity]) FetchAllEntities(ctx context.Context) ([]TE } f.logger.Debug( - heimdallLogPrefix(fmt.Sprintf("%s done", f.name)), + heimdallLogPrefix(f.name+" done"), "len", len(entities), "duration", time.Since(fetchStartTime), ) diff --git a/polygon/heimdall/entity_store.go b/polygon/heimdall/entity_store.go index 57537e74e18..71d9985e471 100644 --- a/polygon/heimdall/entity_store.go +++ b/polygon/heimdall/entity_store.go @@ -29,17 +29,18 @@ import ( ) var databaseTablesCfg = kv.TableCfg{ - kv.BorCheckpoints: {}, - kv.BorMilestones: {}, - kv.BorSpans: {}, + kv.BorCheckpoints: {}, + kv.BorMilestones: {}, + kv.BorSpans: {}, + kv.BorProducerSelections: {}, } type EntityStore[TEntity Entity] interface { Prepare(ctx context.Context) error Close() - GetLastEntityId(ctx context.Context) (uint64, bool, error) - GetLastEntity(ctx context.Context) (TEntity, error) - GetEntity(ctx context.Context, id uint64) (TEntity, error) + LastEntityId(ctx context.Context) (uint64, bool, error) + LastEntity(ctx context.Context) (TEntity, bool, error) + Entity(ctx context.Context, id uint64) (TEntity, bool, error) PutEntity(ctx context.Context, id uint64, entity TEntity) error RangeFromBlockNum(ctx context.Context, startBlockNum uint64) ([]TEntity, error) } @@ -47,12 +48,9 @@ type EntityStore[TEntity Entity] interface { type RangeIndexFactory func(ctx context.Context) (*RangeIndex, error) type mdbxEntityStore[TEntity Entity] struct { - db *polygoncommon.Database - label kv.Label - table string - - makeEntity func() TEntity - + db *polygoncommon.Database + table string + makeEntity func() TEntity blockNumToIdIndexFactory RangeIndexFactory blockNumToIdIndex *RangeIndex prepareOnce sync.Once @@ -60,18 +58,14 @@ type mdbxEntityStore[TEntity Entity] struct { func newMdbxEntityStore[TEntity Entity]( db *polygoncommon.Database, - label kv.Label, table string, makeEntity func() TEntity, blockNumToIdIndexFactory RangeIndexFactory, ) *mdbxEntityStore[TEntity] { return &mdbxEntityStore[TEntity]{ - db: db, - label: label, - table: table, - - makeEntity: makeEntity, - + db: db, + table: table, + makeEntity: makeEntity, blockNumToIdIndexFactory: blockNumToIdIndexFactory, } } @@ -79,7 +73,7 @@ func newMdbxEntityStore[TEntity Entity]( func (s *mdbxEntityStore[TEntity]) Prepare(ctx context.Context) error { var err error s.prepareOnce.Do(func() { - err = s.db.OpenOnce(ctx, s.label, databaseTablesCfg) + err = s.db.OpenOnce(ctx) if err != nil { return } @@ -97,7 +91,7 @@ func (s *mdbxEntityStore[TEntity]) Close() { s.blockNumToIdIndex.Close() } -func (s *mdbxEntityStore[TEntity]) GetLastEntityId(ctx context.Context) (uint64, bool, error) { +func (s *mdbxEntityStore[TEntity]) LastEntityId(ctx context.Context) (uint64, bool, error) { tx, err := s.db.BeginRo(ctx) if err != nil { return 0, false, err @@ -122,16 +116,16 @@ func (s *mdbxEntityStore[TEntity]) GetLastEntityId(ctx context.Context) (uint64, return entityStoreKeyParse(lastKey), true, nil } -func (s *mdbxEntityStore[TEntity]) GetLastEntity(ctx context.Context) (TEntity, error) { - id, ok, err := s.GetLastEntityId(ctx) +func (s *mdbxEntityStore[TEntity]) LastEntity(ctx context.Context) (TEntity, bool, error) { + id, ok, err := s.LastEntityId(ctx) if err != nil { - return generics.Zero[TEntity](), err + return generics.Zero[TEntity](), false, err } // not found if !ok { - return generics.Zero[TEntity](), nil + return generics.Zero[TEntity](), false, nil } - return s.GetEntity(ctx, id) + return s.Entity(ctx, id) } func entityStoreKey(id uint64) [8]byte { @@ -152,24 +146,25 @@ func (s *mdbxEntityStore[TEntity]) entityUnmarshalJSON(jsonBytes []byte) (TEntit return entity, nil } -func (s *mdbxEntityStore[TEntity]) GetEntity(ctx context.Context, id uint64) (TEntity, error) { +func (s *mdbxEntityStore[TEntity]) Entity(ctx context.Context, id uint64) (TEntity, bool, error) { tx, err := s.db.BeginRo(ctx) if err != nil { - return generics.Zero[TEntity](), err + return generics.Zero[TEntity](), false, err } defer tx.Rollback() key := entityStoreKey(id) jsonBytes, err := tx.GetOne(s.table, key[:]) if err != nil { - return generics.Zero[TEntity](), err + return generics.Zero[TEntity](), false, err } // not found if jsonBytes == nil { - return generics.Zero[TEntity](), nil + return generics.Zero[TEntity](), false, nil } - return s.entityUnmarshalJSON(jsonBytes) + val, err := s.entityUnmarshalJSON(jsonBytes) + return val, true, err } func (s *mdbxEntityStore[TEntity]) PutEntity(ctx context.Context, id uint64, entity TEntity) error { diff --git a/polygon/heimdall/event_record.go b/polygon/heimdall/event_record.go index 44084fe85e1..7e4a480b48a 100644 --- a/polygon/heimdall/event_record.go +++ b/polygon/heimdall/event_record.go @@ -44,7 +44,7 @@ type EventRecordWithTime struct { Time time.Time `json:"record_time" yaml:"record_time"` } -var ErrEventRecordNotFound = fmt.Errorf("event record not found") +var ErrEventRecordNotFound = errors.New("event record not found") // String returns the string representation of a state record func (e *EventRecordWithTime) String() string { diff --git a/polygon/heimdall/log_prefix.go b/polygon/heimdall/log_prefix.go index 01b98f1a73d..260d2776af3 100644 --- a/polygon/heimdall/log_prefix.go +++ b/polygon/heimdall/log_prefix.go @@ -16,8 +16,6 @@ package heimdall -import "fmt" - func heimdallLogPrefix(message string) string { - return fmt.Sprintf("[bor.heimdall] %s", message) + return "[bor.heimdall] " + message } diff --git a/polygon/heimdall/milestone.go b/polygon/heimdall/milestone.go index 32c12ca41fb..b0381ff9dfd 100644 --- a/polygon/heimdall/milestone.go +++ b/polygon/heimdall/milestone.go @@ -19,6 +19,7 @@ package heimdall import ( "encoding/binary" "encoding/json" + "errors" "fmt" "math/big" @@ -169,7 +170,7 @@ type MilestoneIDResponse struct { Result MilestoneID `json:"result"` } -var ErrMilestoneNotFound = fmt.Errorf("milestone not found") +var ErrMilestoneNotFound = errors.New("milestone not found") func MilestoneIdAt(tx kv.Tx, block uint64) (MilestoneId, error) { var id uint64 diff --git a/polygon/heimdall/scraper.go b/polygon/heimdall/scraper.go index cfebd325894..2ec2b2fa72e 100644 --- a/polygon/heimdall/scraper.go +++ b/polygon/heimdall/scraper.go @@ -64,7 +64,7 @@ func (s *scraper[TEntity]) Run(ctx context.Context) error { } for ctx.Err() == nil { - lastKnownId, hasLastKnownId, err := s.store.GetLastEntityId(ctx) + lastKnownId, hasLastKnownId, err := s.store.LastEntityId(ctx) if err != nil { return err } @@ -96,7 +96,7 @@ func (s *scraper[TEntity]) Run(ctx context.Context) error { } } - go s.observers.Notify(entities) + s.observers.NotifySync(entities) // NotifySync preserves order of events } } return ctx.Err() diff --git a/polygon/heimdall/service.go b/polygon/heimdall/service.go index 3696ff12271..560ace05783 100644 --- a/polygon/heimdall/service.go +++ b/polygon/heimdall/service.go @@ -19,39 +19,43 @@ package heimdall import ( "context" "errors" - "slices" + "fmt" "time" "golang.org/x/sync/errgroup" libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/polygon/bor/borcfg" + "github.com/erigontech/erigon/polygon/bor/valset" "github.com/erigontech/erigon/polygon/polygoncommon" ) type Service interface { - FetchLatestSpans(ctx context.Context, count uint) ([]*Span, error) - FetchCheckpointsFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) - FetchMilestonesFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) - RegisterMilestoneObserver(callback func(*Milestone)) polygoncommon.UnregisterFunc - RegisterSpanObserver(callback func(*Span)) polygoncommon.UnregisterFunc + Span(ctx context.Context, id uint64) (*Span, bool, error) + CheckpointsFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) + MilestonesFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) + Producers(ctx context.Context, blockNum uint64) (*valset.ValidatorSet, error) + RegisterMilestoneObserver(callback func(*Milestone), opts ...ObserverOption) polygoncommon.UnregisterFunc Run(ctx context.Context) error + Synchronize(ctx context.Context) } type service struct { - store ServiceStore - checkpointScraper *scraper[*Checkpoint] - milestoneScraper *scraper[*Milestone] - spanScraper *scraper[*Span] + store ServiceStore + checkpointScraper *scraper[*Checkpoint] + milestoneScraper *scraper[*Milestone] + spanScraper *scraper[*Span] + spanBlockProducersTracker *spanBlockProducersTracker } -func AssembleService(heimdallUrl string, dataDir string, tmpDir string, logger log.Logger) Service { +func AssembleService(borConfig *borcfg.BorConfig, heimdallUrl string, dataDir string, tmpDir string, logger log.Logger) Service { store := NewMdbxServiceStore(logger, dataDir, tmpDir) client := NewHeimdallClient(heimdallUrl, logger) - return NewService(client, store, logger) + return NewService(borConfig, client, store, logger) } -func NewService(client HeimdallClient, store ServiceStore, logger log.Logger) Service { +func NewService(borConfig *borcfg.BorConfig, client HeimdallClient, store ServiceStore, logger log.Logger) Service { checkpointFetcher := newCheckpointFetcher(client, logger) milestoneFetcher := newMilestoneFetcher(client, logger) spanFetcher := newSpanFetcher(client, logger) @@ -78,17 +82,20 @@ func NewService(client HeimdallClient, store ServiceStore, logger log.Logger) Se ) return &service{ - store: store, - checkpointScraper: checkpointScraper, - milestoneScraper: milestoneScraper, - spanScraper: spanScraper, + store: store, + checkpointScraper: checkpointScraper, + milestoneScraper: milestoneScraper, + spanScraper: spanScraper, + spanBlockProducersTracker: newSpanBlockProducersTracker(logger, borConfig, store.SpanBlockProducerSelections()), } } func newCheckpointFetcher(client HeimdallClient, logger log.Logger) entityFetcher[*Checkpoint] { return newEntityFetcher( "CheckpointFetcher", - nil, + func(ctx context.Context) (int64, error) { + return 1, nil + }, client.FetchCheckpointCount, client.FetchCheckpoint, client.FetchCheckpoints, @@ -124,7 +131,9 @@ func newSpanFetcher(client HeimdallClient, logger log.Logger) entityFetcher[*Spa return newEntityFetcher( "SpanFetcher", - nil, + func(ctx context.Context) (int64, error) { + return 0, nil + }, fetchLastEntityId, fetchEntity, nil, @@ -133,81 +142,56 @@ func newSpanFetcher(client HeimdallClient, logger log.Logger) entityFetcher[*Spa ) } -func (s *service) FetchLatestSpan(ctx context.Context) (*Span, error) { - s.checkpointScraper.Synchronize(ctx) - return s.store.Spans().GetLastEntity(ctx) -} - -func (s *service) FetchLatestSpans(ctx context.Context, count uint) ([]*Span, error) { - if count == 0 { - return nil, errors.New("can't fetch 0 latest spans") +func (s *service) Span(ctx context.Context, id uint64) (*Span, bool, error) { + span, ok, err := s.store.Spans().Entity(ctx, id) + if err != nil || !ok { + return nil, ok, err } - span, err := s.FetchLatestSpan(ctx) - if err != nil { - return nil, err - } - - latestSpans := make([]*Span, 0, count) - latestSpans = append(latestSpans, span) - count-- - - for count > 0 { - prevSpanRawId := span.RawId() - if prevSpanRawId == 0 { - break - } - - span, err = s.store.Spans().GetEntity(ctx, prevSpanRawId-1) - if err != nil { - return nil, err - } - - latestSpans = append(latestSpans, span) - count-- - } - - slices.Reverse(latestSpans) - return latestSpans, nil + return span, ok, nil } func castEntityToWaypoint[TEntity Waypoint](entity TEntity) Waypoint { return entity } -func (s *service) synchronizeScrapers(ctx context.Context) { +func (s *service) Synchronize(ctx context.Context) { s.checkpointScraper.Synchronize(ctx) s.milestoneScraper.Synchronize(ctx) s.spanScraper.Synchronize(ctx) + s.spanBlockProducersTracker.Synchronize(ctx) } -func (s *service) FetchCheckpointsFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) { - s.synchronizeScrapers(ctx) +func (s *service) CheckpointsFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) { + s.Synchronize(ctx) entities, err := s.store.Checkpoints().RangeFromBlockNum(ctx, startBlock) return libcommon.SliceMap(entities, castEntityToWaypoint[*Checkpoint]), err } -func (s *service) FetchMilestonesFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) { - s.synchronizeScrapers(ctx) +func (s *service) MilestonesFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) { + s.Synchronize(ctx) entities, err := s.store.Milestones().RangeFromBlockNum(ctx, startBlock) return libcommon.SliceMap(entities, castEntityToWaypoint[*Milestone]), err } -// TODO: this limit is a temporary solution to avoid piping thousands of events -// during the first sync. Let's discuss alternatives. Hopefully we can remove this limit. -const maxEntityEvents = 5 +func (s *service) Producers(ctx context.Context, blockNum uint64) (*valset.ValidatorSet, error) { + s.Synchronize(ctx) + return s.spanBlockProducersTracker.Producers(ctx, blockNum) +} -func (s *service) RegisterMilestoneObserver(callback func(*Milestone)) polygoncommon.UnregisterFunc { +func (s *service) RegisterMilestoneObserver(callback func(*Milestone), opts ...ObserverOption) polygoncommon.UnregisterFunc { + options := NewObserverOptions(opts...) return s.milestoneScraper.RegisterObserver(func(entities []*Milestone) { - for _, entity := range libcommon.SliceTakeLast(entities, maxEntityEvents) { + for _, entity := range libcommon.SliceTakeLast(entities, options.eventsLimit) { callback(entity) } }) } -func (s *service) RegisterSpanObserver(callback func(*Span)) polygoncommon.UnregisterFunc { +func (s *service) RegisterSpanObserver(callback func(*Span), opts ...ObserverOption) polygoncommon.UnregisterFunc { + options := NewObserverOptions(opts...) return s.spanScraper.RegisterObserver(func(entities []*Span) { - for _, entity := range libcommon.SliceTakeLast(entities, maxEntityEvents) { + for _, entity := range libcommon.SliceTakeLast(entities, options.eventsLimit) { callback(entity) } }) @@ -220,9 +204,57 @@ func (s *service) Run(ctx context.Context) error { return nil } - scrapersGroup, scrapersGroupCtx := errgroup.WithContext(ctx) - scrapersGroup.Go(func() error { return s.checkpointScraper.Run(scrapersGroupCtx) }) - scrapersGroup.Go(func() error { return s.milestoneScraper.Run(scrapersGroupCtx) }) - scrapersGroup.Go(func() error { return s.spanScraper.Run(scrapersGroupCtx) }) - return scrapersGroup.Wait() + if err := s.replayUntrackedSpans(ctx); err != nil { + return err + } + + s.RegisterSpanObserver(func(span *Span) { + s.spanBlockProducersTracker.ObserveSpanAsync(span) + }) + + eg, ctx := errgroup.WithContext(ctx) + eg.Go(func() error { return s.checkpointScraper.Run(ctx) }) + eg.Go(func() error { return s.milestoneScraper.Run(ctx) }) + eg.Go(func() error { return s.spanScraper.Run(ctx) }) + eg.Go(func() error { return s.spanBlockProducersTracker.Run(ctx) }) + return eg.Wait() +} + +func (s *service) replayUntrackedSpans(ctx context.Context) error { + lastSpanId, ok, err := s.store.Spans().LastEntityId(ctx) + if err != nil { + return err + } + if !ok { + return nil + } + + lastProducerSelectionId, ok, err := s.store.SpanBlockProducerSelections().LastEntityId(ctx) + if err != nil { + return err + } + + var start uint64 + if ok { + start = lastProducerSelectionId + 1 + } else { + start = lastProducerSelectionId + } + + for id := start; id <= lastSpanId; id++ { + span, ok, err := s.store.Spans().Entity(ctx, id) + if err != nil { + return err + } + if !ok { + return fmt.Errorf("%w: %d", errors.New("can't replay missing span"), id) + } + + err = s.spanBlockProducersTracker.ObserveSpan(ctx, span) + if err != nil { + return err + } + } + + return nil } diff --git a/polygon/heimdall/service_observer_options.go b/polygon/heimdall/service_observer_options.go new file mode 100644 index 00000000000..80c889cc62e --- /dev/null +++ b/polygon/heimdall/service_observer_options.go @@ -0,0 +1,27 @@ +package heimdall + +import "math" + +func NewObserverOptions(opts ...ObserverOption) ObserverOptions { + defaultOptions := ObserverOptions{ + eventsLimit: math.MaxInt, + } + + for _, o := range opts { + o(&defaultOptions) + } + + return defaultOptions +} + +type ObserverOptions struct { + eventsLimit int +} + +type ObserverOption func(opts *ObserverOptions) + +func WithEventsLimit(eventsLimit int) ObserverOption { + return func(config *ObserverOptions) { + config.eventsLimit = eventsLimit + } +} diff --git a/polygon/heimdall/service_store.go b/polygon/heimdall/service_store.go index e7db5f83852..fbb89fef5e6 100644 --- a/polygon/heimdall/service_store.go +++ b/polygon/heimdall/service_store.go @@ -31,29 +31,32 @@ type ServiceStore interface { Checkpoints() EntityStore[*Checkpoint] Milestones() EntityStore[*Milestone] Spans() EntityStore[*Span] + SpanBlockProducerSelections() EntityStore[*SpanBlockProducerSelection] Prepare(ctx context.Context) error Close() } func NewMdbxServiceStore(logger log.Logger, dataDir string, tmpDir string) *MdbxServiceStore { - db := polygoncommon.NewDatabase(dataDir, logger) + db := polygoncommon.NewDatabase(dataDir, kv.HeimdallDB, databaseTablesCfg, logger) blockNumToIdIndexFactory := func(ctx context.Context) (*RangeIndex, error) { return NewRangeIndex(ctx, tmpDir, logger) } return &MdbxServiceStore{ - db: db, - checkpoints: newMdbxEntityStore(db, kv.HeimdallDB, kv.BorCheckpoints, generics.New[Checkpoint], blockNumToIdIndexFactory), - milestones: newMdbxEntityStore(db, kv.HeimdallDB, kv.BorMilestones, generics.New[Milestone], blockNumToIdIndexFactory), - spans: newMdbxEntityStore(db, kv.HeimdallDB, kv.BorSpans, generics.New[Span], blockNumToIdIndexFactory), + db: db, + checkpoints: newMdbxEntityStore(db, kv.BorCheckpoints, generics.New[Checkpoint], blockNumToIdIndexFactory), + milestones: newMdbxEntityStore(db, kv.BorMilestones, generics.New[Milestone], blockNumToIdIndexFactory), + spans: newMdbxEntityStore(db, kv.BorSpans, generics.New[Span], blockNumToIdIndexFactory), + spanBlockProducerSelections: newMdbxEntityStore(db, kv.BorProducerSelections, generics.New[SpanBlockProducerSelection], blockNumToIdIndexFactory), } } type MdbxServiceStore struct { - db *polygoncommon.Database - checkpoints EntityStore[*Checkpoint] - milestones EntityStore[*Milestone] - spans EntityStore[*Span] + db *polygoncommon.Database + checkpoints EntityStore[*Checkpoint] + milestones EntityStore[*Milestone] + spans EntityStore[*Span] + spanBlockProducerSelections EntityStore[*SpanBlockProducerSelection] } func (s *MdbxServiceStore) Checkpoints() EntityStore[*Checkpoint] { @@ -68,11 +71,16 @@ func (s *MdbxServiceStore) Spans() EntityStore[*Span] { return s.spans } +func (s *MdbxServiceStore) SpanBlockProducerSelections() EntityStore[*SpanBlockProducerSelection] { + return s.spanBlockProducerSelections +} + func (s *MdbxServiceStore) Prepare(ctx context.Context) error { eg, ctx := errgroup.WithContext(ctx) eg.Go(func() error { return s.checkpoints.Prepare(ctx) }) eg.Go(func() error { return s.milestones.Prepare(ctx) }) eg.Go(func() error { return s.spans.Prepare(ctx) }) + eg.Go(func() error { return s.spanBlockProducerSelections.Prepare(ctx) }) return eg.Wait() } diff --git a/polygon/heimdall/span.go b/polygon/heimdall/span.go index 18d8197e7f3..9a997d49b8a 100644 --- a/polygon/heimdall/span.go +++ b/polygon/heimdall/span.go @@ -37,7 +37,7 @@ func (s *Span) RawId() uint64 { return uint64(s.Id) } -func (s *Span) SetRawId(id uint64) { +func (s *Span) SetRawId(_ uint64) { panic("unimplemented") } @@ -58,15 +58,17 @@ func (s *Span) Less(other btree.Item) bool { } func (s *Span) CmpRange(n uint64) int { - if n < s.StartBlock { - return -1 - } + return cmpBlockRange(s.StartBlock, s.EndBlock, n) +} - if n > s.EndBlock { - return 1 +func (s *Span) Producers() []*valset.Validator { + res := make([]*valset.Validator, len(s.SelectedProducers)) + for i, p := range s.SelectedProducers { + pCopy := p + res[i] = &pCopy } - return 0 + return res } type SpanResponse struct { diff --git a/polygon/heimdall/span_block_producer_selection.go b/polygon/heimdall/span_block_producer_selection.go new file mode 100644 index 00000000000..562f91c8e80 --- /dev/null +++ b/polygon/heimdall/span_block_producer_selection.go @@ -0,0 +1,71 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package heimdall + +import ( + "github.com/erigontech/erigon/polygon/bor/valset" +) + +// SpanBlockProducerSelection represents the block producer selection at each epoch +// with their corresponding accumulated ProposerPriority. +// +// In the context of the bor chain, an epoch is equal to 1 span, while +// in the context of the heimdall chain, an epoch is equal to 1 checkpoint. +// This data type aims to make this distinction a bit more visible and is +// intended to be used specifically for span based epochs. +// +// The difference between SpanBlockProducerSelection and Span.SelectedProducers +// is that SpanBlockProducerSelection contains the correct accumulated +// ProposerPriority for each selected producer, while Span.SelectedProducers +// always has ProposerPriority=0. +// +// This is because the heimdall/bor/span/ +// API only tells us what the "frozen" selected producers for the next span epoch +// are. More info about how that works can be found in the "FreezeSet" logic in +// heimdall at https://github.com/maticnetwork/heimdall/tree/master/bor#how-does-it-work. +// +// However, to correctly calculate the accumulated proposer priorities, one has to start +// from span zero, create a valset.ValidatorSet, call IncrementProposerPriority(spanSprintCount) +// and at every next span call bor.GetUpdatedValidatorSet(oldValidatorSet, span.SelectedProducers) +// and repeat. +type SpanBlockProducerSelection struct { + SpanId SpanId + StartBlock uint64 + EndBlock uint64 + Producers *valset.ValidatorSet +} + +var _ Entity = (*SpanBlockProducerSelection)(nil) + +func (s *SpanBlockProducerSelection) RawId() uint64 { + return uint64(s.SpanId) +} + +func (s *SpanBlockProducerSelection) BlockNumRange() ClosedRange { + return ClosedRange{ + Start: s.StartBlock, + End: s.EndBlock, + } +} + +func (s *SpanBlockProducerSelection) SetRawId(id uint64) { + s.SpanId = SpanId(id) +} + +func (s *SpanBlockProducerSelection) CmpRange(n uint64) int { + return cmpBlockRange(s.StartBlock, s.EndBlock, n) +} diff --git a/polygon/heimdall/span_block_producers_tracker.go b/polygon/heimdall/span_block_producers_tracker.go new file mode 100644 index 00000000000..e9c52012464 --- /dev/null +++ b/polygon/heimdall/span_block_producers_tracker.go @@ -0,0 +1,188 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package heimdall + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon/polygon/bor/borcfg" + "github.com/erigontech/erigon/polygon/bor/valset" +) + +func newSpanBlockProducersTracker( + logger log.Logger, + borConfig *borcfg.BorConfig, + store EntityStore[*SpanBlockProducerSelection], +) *spanBlockProducersTracker { + return &spanBlockProducersTracker{ + logger: logger, + borConfig: borConfig, + store: store, + newSpans: make(chan *Span), + idleSignal: make(chan struct{}), + } +} + +type spanBlockProducersTracker struct { + logger log.Logger + borConfig *borcfg.BorConfig + store EntityStore[*SpanBlockProducerSelection] + newSpans chan *Span + queued atomic.Int32 + idleSignal chan struct{} +} + +func (t *spanBlockProducersTracker) Run(ctx context.Context) error { + defer close(t.idleSignal) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case newSpan := <-t.newSpans: + err := t.ObserveSpan(ctx, newSpan) + if err != nil { + return err + } + + t.queued.Add(-1) + if t.queued.Load() == 0 { + select { + case t.idleSignal <- struct{}{}: + default: // continue if a signal is already queued + } + } + } + } +} + +func (t *spanBlockProducersTracker) Synchronize(ctx context.Context) { + if t.queued.Load() == 0 { + return + } + + select { + case <-ctx.Done(): + return + case <-t.idleSignal: + return + } +} + +func (t *spanBlockProducersTracker) ObserveSpanAsync(span *Span) { + t.queued.Add(1) + t.newSpans <- span +} + +func (t *spanBlockProducersTracker) ObserveSpan(ctx context.Context, newSpan *Span) error { + t.logger.Debug(heimdallLogPrefix("block producers tracker observing span"), "id", newSpan.Id) + + lastProducerSelection, ok, err := t.store.LastEntity(ctx) + if err != nil { + return err + } + if !ok { + if newSpan.Id != 0 { + return errors.New("expected first new span to be span 0") + } + + newProducerSelection := &SpanBlockProducerSelection{ + SpanId: newSpan.Id, + StartBlock: newSpan.StartBlock, + EndBlock: newSpan.EndBlock, + Producers: valset.NewValidatorSet(newSpan.Producers()), + } + err = t.store.PutEntity(ctx, uint64(newProducerSelection.SpanId), newProducerSelection) + if err != nil { + return err + } + + return nil + } + + if newSpan.Id > lastProducerSelection.SpanId+1 { + return fmt.Errorf( + "%w: last=%d,new=%d", + errors.New("unexpected span gap"), + lastProducerSelection.SpanId, + newSpan.Id, + ) + } + + if newSpan.Id <= lastProducerSelection.SpanId { + return nil + } + + producers := lastProducerSelection.Producers + producers.UpdateValidatorMap() + err = producers.UpdateTotalVotingPower() + if err != nil { + return err + } + + spanStartSprintNum := t.borConfig.CalculateSprintNumber(lastProducerSelection.StartBlock) + spanEndSprintNum := t.borConfig.CalculateSprintNumber(lastProducerSelection.EndBlock) + increments := int(spanEndSprintNum - spanStartSprintNum) + if increments > 0 { + producers.IncrementProposerPriority(increments) + } + newProducers := valset.GetUpdatedValidatorSet(producers, newSpan.Producers(), t.logger) + newProducers.IncrementProposerPriority(1) + newProducerSelection := &SpanBlockProducerSelection{ + SpanId: newSpan.Id, + StartBlock: newSpan.StartBlock, + EndBlock: newSpan.EndBlock, + Producers: newProducers, + } + + err = t.store.PutEntity(ctx, uint64(newProducerSelection.SpanId), newProducerSelection) + if err != nil { + return err + } + + return nil +} + +func (t *spanBlockProducersTracker) Producers(ctx context.Context, blockNum uint64) (*valset.ValidatorSet, error) { + spanId := SpanIdAt(blockNum) + producerSelection, ok, err := t.store.Entity(ctx, uint64(spanId)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("no producers found for block num") + } + + producers := producerSelection.Producers + producers.UpdateValidatorMap() + err = producers.UpdateTotalVotingPower() + if err != nil { + return nil, err + } + + spanStartSprintNum := t.borConfig.CalculateSprintNumber(producerSelection.StartBlock) + currentSprintNum := t.borConfig.CalculateSprintNumber(blockNum) + increments := int(currentSprintNum - spanStartSprintNum) + if increments > 0 { + producers.IncrementProposerPriority(increments) + } + return producers, nil +} diff --git a/polygon/heimdall/waypoint.go b/polygon/heimdall/waypoint.go index 75604e2a3d4..33ae938736f 100644 --- a/polygon/heimdall/waypoint.go +++ b/polygon/heimdall/waypoint.go @@ -43,19 +43,12 @@ type WaypointFields struct { Timestamp uint64 `json:"timestamp"` } -func (a *WaypointFields) Length() uint64 { - return a.EndBlock.Uint64() - a.StartBlock.Uint64() + 1 +func (wf *WaypointFields) Length() uint64 { + return wf.EndBlock.Uint64() - wf.StartBlock.Uint64() + 1 } -func (a *WaypointFields) CmpRange(n uint64) int { - num := new(big.Int).SetUint64(n) - if num.Cmp(a.StartBlock) < 0 { - return -1 - } - if num.Cmp(a.EndBlock) > 0 { - return 1 - } - return 0 +func (wf *WaypointFields) CmpRange(n uint64) int { + return cmpBlockRange(wf.StartBlock.Uint64(), wf.EndBlock.Uint64(), n) } type Waypoints []Waypoint diff --git a/polygon/p2p/message_listener.go b/polygon/p2p/message_listener.go index 0d928dba223..8440771041a 100644 --- a/polygon/p2p/message_listener.go +++ b/polygon/p2p/message_listener.go @@ -241,5 +241,5 @@ func notifyInboundMessageObservers[TPacket any]( } func messageListenerLogPrefix(message string) string { - return fmt.Sprintf("[p2p.message.listener] %s", message) + return "[p2p.message.listener] " + message } diff --git a/polygon/polygoncommon/database.go b/polygon/polygoncommon/database.go index 0c0403147cc..151e433d9ef 100644 --- a/polygon/polygoncommon/database.go +++ b/polygon/polygoncommon/database.go @@ -23,47 +23,48 @@ import ( "github.com/c2h5oh/datasize" - "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/mdbx" + "github.com/erigontech/erigon-lib/log/v3" ) type Database struct { - db kv.RwDB - + db kv.RwDB dataDir string + label kv.Label + tableCfg kv.TableCfg openOnce sync.Once - - logger log.Logger + logger log.Logger } -func NewDatabase( - dataDir string, - logger log.Logger, -) *Database { - return &Database{dataDir: dataDir, logger: logger} +func NewDatabase(dataDir string, label kv.Label, tableCfg kv.TableCfg, logger log.Logger) *Database { + return &Database{ + dataDir: dataDir, + label: label, + tableCfg: tableCfg, + logger: logger, + } } -func (db *Database) open(ctx context.Context, label kv.Label, tableCfg kv.TableCfg) error { - dbPath := filepath.Join(db.dataDir, label.String()) - db.logger.Info("Opening Database", "label", label.String(), "path", dbPath) +func (db *Database) open(ctx context.Context) error { + dbPath := filepath.Join(db.dataDir, db.label.String()) + db.logger.Info("Opening Database", "label", db.label.String(), "path", dbPath) var err error db.db, err = mdbx.NewMDBX(db.logger). - Label(label). + Label(db.label). Path(dbPath). - WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return tableCfg }). + WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return db.tableCfg }). MapSize(16 * datasize.GB). GrowthStep(16 * datasize.MB). Open(ctx) return err } -func (db *Database) OpenOnce(ctx context.Context, label kv.Label, tableCfg kv.TableCfg) error { +func (db *Database) OpenOnce(ctx context.Context) error { var err error db.openOnce.Do(func() { - err = db.open(ctx, label, tableCfg) + err = db.open(ctx) }) return err } diff --git a/polygon/sync/block_downloader.go b/polygon/sync/block_downloader.go index 456e8dbc00d..3cd31d0a73e 100644 --- a/polygon/sync/block_downloader.go +++ b/polygon/sync/block_downloader.go @@ -112,7 +112,7 @@ type blockDownloader struct { } func (d *blockDownloader) DownloadBlocksUsingCheckpoints(ctx context.Context, start uint64) (*types.Header, error) { - waypoints, err := d.heimdall.FetchCheckpointsFromBlock(ctx, start) + waypoints, err := d.heimdall.CheckpointsFromBlock(ctx, start) if err != nil { return nil, err } @@ -121,7 +121,7 @@ func (d *blockDownloader) DownloadBlocksUsingCheckpoints(ctx context.Context, st } func (d *blockDownloader) DownloadBlocksUsingMilestones(ctx context.Context, start uint64) (*types.Header, error) { - waypoints, err := d.heimdall.FetchMilestonesFromBlock(ctx, start) + waypoints, err := d.heimdall.MilestonesFromBlock(ctx, start) if err != nil { return nil, err } @@ -202,7 +202,7 @@ func (d *blockDownloader) downloadBlocksUsingWaypoints( "peerCount", len(peers), "maxWorkers", d.maxWorkers, "blk/s", fmt.Sprintf("%.2f", float64(blockCount.Load())/time.Since(fetchStartTime).Seconds()), - "bytes/s", fmt.Sprintf("%s", common.ByteCount(uint64(float64(blocksTotalSize.Load())/time.Since(fetchStartTime).Seconds()))), + "bytes/s", common.ByteCount(uint64(float64(blocksTotalSize.Load())/time.Since(fetchStartTime).Seconds())), ) blockCount.Store(0) diff --git a/polygon/sync/block_downloader_test.go b/polygon/sync/block_downloader_test.go index 646362b14e4..c23a9469a10 100644 --- a/polygon/sync/block_downloader_test.go +++ b/polygon/sync/block_downloader_test.go @@ -236,7 +236,7 @@ func (hdt blockDownloaderTest) defaultInsertBlocksMock(capture *[]*types.Block) func TestBlockDownloaderDownloadBlocksUsingMilestones(t *testing.T) { test := newBlockDownloaderTest(t) test.heimdall.EXPECT(). - FetchMilestonesFromBlock(gomock.Any(), gomock.Any()). + MilestonesFromBlock(gomock.Any(), gomock.Any()). Return(test.fakeMilestones(4), nil). Times(1) test.p2pService.EXPECT(). @@ -272,7 +272,7 @@ func TestBlockDownloaderDownloadBlocksUsingMilestones(t *testing.T) { func TestBlockDownloaderDownloadBlocksUsingCheckpoints(t *testing.T) { test := newBlockDownloaderTest(t) test.heimdall.EXPECT(). - FetchCheckpointsFromBlock(gomock.Any(), gomock.Any()). + CheckpointsFromBlock(gomock.Any(), gomock.Any()). Return(test.fakeCheckpoints(8), nil). Times(1) test.p2pService.EXPECT(). @@ -323,7 +323,7 @@ func TestBlockDownloaderDownloadBlocksWhenInvalidHeadersThenPenalizePeerAndReDow }, }) test.heimdall.EXPECT(). - FetchCheckpointsFromBlock(gomock.Any(), gomock.Any()). + CheckpointsFromBlock(gomock.Any(), gomock.Any()). Return(test.fakeCheckpoints(6), nil). Times(1) test.p2pService.EXPECT(). @@ -378,7 +378,7 @@ func TestBlockDownloaderDownloadBlocksWhenInvalidHeadersThenPenalizePeerAndReDow func TestBlockDownloaderDownloadBlocksWhenZeroPeersTriesAgain(t *testing.T) { test := newBlockDownloaderTest(t) test.heimdall.EXPECT(). - FetchCheckpointsFromBlock(gomock.Any(), gomock.Any()). + CheckpointsFromBlock(gomock.Any(), gomock.Any()). Return(test.fakeCheckpoints(8), nil). Times(1) test.p2pService.EXPECT(). @@ -427,7 +427,7 @@ func TestBlockDownloaderDownloadBlocksWhenInvalidBodiesThenPenalizePeerAndReDown }, }) test.heimdall.EXPECT(). - FetchCheckpointsFromBlock(gomock.Any(), gomock.Any()). + CheckpointsFromBlock(gomock.Any(), gomock.Any()). Return(test.fakeCheckpoints(6), nil). Times(1) test.p2pService.EXPECT(). @@ -482,7 +482,7 @@ func TestBlockDownloaderDownloadBlocksWhenInvalidBodiesThenPenalizePeerAndReDown func TestBlockDownloaderDownloadBlocksWhenMissingBodiesThenPenalizePeerAndReDownload(t *testing.T) { test := newBlockDownloaderTestWithOpts(t, blockDownloaderTestOpts{}) test.heimdall.EXPECT(). - FetchCheckpointsFromBlock(gomock.Any(), gomock.Any()). + CheckpointsFromBlock(gomock.Any(), gomock.Any()). Return(test.fakeCheckpoints(6), nil). Times(1) test.p2pService.EXPECT(). @@ -545,7 +545,7 @@ func TestBlockDownloaderDownloadBlocksRespectsMaxWorkers(t *testing.T) { maxWorkers: 1, }) test.heimdall.EXPECT(). - FetchCheckpointsFromBlock(gomock.Any(), gomock.Any()). + CheckpointsFromBlock(gomock.Any(), gomock.Any()). Return(test.fakeCheckpoints(2), nil). Times(1) test.p2pService.EXPECT(). @@ -629,7 +629,7 @@ func TestBlockDownloaderDownloadBlocksRespectsBlockLimit(t *testing.T) { blockLimit: tc.blockLimit, }) test.heimdall.EXPECT(). - FetchCheckpointsFromBlock(gomock.Any(), gomock.Any()). + CheckpointsFromBlock(gomock.Any(), gomock.Any()). Return(test.fakeCheckpoints(tc.numCheckpoints), nil). Times(1) test.p2pService.EXPECT(). diff --git a/polygon/sync/validator_set_interface.go b/polygon/sync/block_producers_reader.go similarity index 69% rename from polygon/sync/validator_set_interface.go rename to polygon/sync/block_producers_reader.go index 5ae820766c5..25d72bd24e0 100644 --- a/polygon/sync/validator_set_interface.go +++ b/polygon/sync/block_producers_reader.go @@ -17,13 +17,11 @@ package sync import ( - libcommon "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon/polygon/bor" + "context" + + "github.com/erigontech/erigon/polygon/bor/valset" ) -// valset.ValidatorSet abstraction for unit tests -type validatorSetInterface interface { - bor.ValidateHeaderTimeSignerSuccessionNumber - IncrementProposerPriority(times int) - Difficulty(signer libcommon.Address) (uint64, error) +type blockProducersReader interface { + Producers(ctx context.Context, blockNum uint64) (*valset.ValidatorSet, error) } diff --git a/polygon/sync/canonical_chain_builder.go b/polygon/sync/canonical_chain_builder.go index 0deae306fb7..4a1c7ec5b8c 100644 --- a/polygon/sync/canonical_chain_builder.go +++ b/polygon/sync/canonical_chain_builder.go @@ -18,6 +18,7 @@ package sync import ( "bytes" + "context" "errors" "fmt" "slices" @@ -36,7 +37,7 @@ type CanonicalChainBuilder interface { Root() *types.Header HeadersInRange(start uint64, count uint64) []*types.Header Prune(newRootNum uint64) error - Connect(headers []*types.Header) error + Connect(ctx context.Context, headers []*types.Header) error } type producerSlotIndex uint64 @@ -51,25 +52,29 @@ type forkTreeNode struct { totalDifficulty uint64 } -type canonicalChainBuilder struct { - root *forkTreeNode - tip *forkTreeNode +type difficultyCalculator interface { + HeaderDifficulty(ctx context.Context, header *types.Header) (uint64, error) +} - difficultyCalc DifficultyCalculator - headerValidator HeaderValidator - spansCache *SpansCache +type headerValidator interface { + ValidateHeader(ctx context.Context, header *types.Header, parent *types.Header, now time.Time) error +} + +type canonicalChainBuilder struct { + root *forkTreeNode + tip *forkTreeNode + difficultyCalc difficultyCalculator + headerValidator headerValidator } func NewCanonicalChainBuilder( root *types.Header, - difficultyCalc DifficultyCalculator, - headerValidator HeaderValidator, - spansCache *SpansCache, + difficultyCalc difficultyCalculator, + headerValidator headerValidator, ) CanonicalChainBuilder { ccb := &canonicalChainBuilder{ difficultyCalc: difficultyCalc, headerValidator: headerValidator, - spansCache: spansCache, } ccb.Reset(root) return ccb @@ -82,9 +87,6 @@ func (ccb *canonicalChainBuilder) Reset(root *types.Header) { headerHash: root.Hash(), } ccb.tip = ccb.root - if ccb.spansCache != nil { - ccb.spansCache.Prune(root.Number.Uint64()) - } } // depth-first search @@ -163,11 +165,8 @@ func (ccb *canonicalChainBuilder) Prune(newRootNum uint64) error { for newRoot.header.Number.Uint64() > newRootNum { newRoot = newRoot.parent } - ccb.root = newRoot - if ccb.spansCache != nil { - ccb.spansCache.Prune(newRootNum) - } + ccb.root = newRoot return nil } @@ -196,7 +195,7 @@ func (ccb *canonicalChainBuilder) updateTipIfNeeded(tipCandidate *forkTreeNode) } } -func (ccb *canonicalChainBuilder) Connect(headers []*types.Header) error { +func (ccb *canonicalChainBuilder) Connect(ctx context.Context, headers []*types.Header) error { if (len(headers) > 0) && (headers[0].Number != nil) && (headers[0].Number.Cmp(ccb.root.header.Number) == 0) { headers = headers[1:] } @@ -249,13 +248,11 @@ func (ccb *canonicalChainBuilder) Connect(headers []*types.Header) error { return errors.New("canonicalChainBuilder.Connect: invalid header.Number") } - if ccb.headerValidator != nil { - if err := ccb.headerValidator.ValidateHeader(header, parent.header, time.Now()); err != nil { - return fmt.Errorf("canonicalChainBuilder.Connect: invalid header error %w", err) - } + if err := ccb.headerValidator.ValidateHeader(ctx, header, parent.header, time.Now()); err != nil { + return fmt.Errorf("canonicalChainBuilder.Connect: invalid header error %w", err) } - difficulty, err := ccb.difficultyCalc.HeaderDifficulty(header) + difficulty, err := ccb.difficultyCalc.HeaderDifficulty(ctx, header) if err != nil { return fmt.Errorf("canonicalChainBuilder.Connect: header difficulty error %w", err) } diff --git a/polygon/sync/canonical_chain_builder_factory.go b/polygon/sync/canonical_chain_builder_factory.go index 2741b569965..42b02f257e1 100644 --- a/polygon/sync/canonical_chain_builder_factory.go +++ b/polygon/sync/canonical_chain_builder_factory.go @@ -32,26 +32,32 @@ type CanonicalChainBuilderFactory func(root *types.Header) CanonicalChainBuilder func NewCanonicalChainBuilderFactory( chainConfig *chain.Config, borConfig *borcfg.BorConfig, - spansCache *SpansCache, + blockProducersReader blockProducersReader, ) CanonicalChainBuilderFactory { signaturesCache, err := lru.NewARC[common.Hash, common.Address](InMemorySignatures) if err != nil { panic(err) } - difficultyCalculator := NewDifficultyCalculator(borConfig, spansCache, nil, signaturesCache) - headerTimeValidator := NewHeaderTimeValidator(borConfig, spansCache, nil, signaturesCache) - headerValidator := NewHeaderValidator(chainConfig, borConfig, headerTimeValidator) + difficultyCalculator := &DifficultyCalculator{ + borConfig: borConfig, + signaturesCache: signaturesCache, + blockProducersReader: blockProducersReader, + } + + headerTimeValidator := &HeaderTimeValidator{ + borConfig: borConfig, + signaturesCache: signaturesCache, + blockProducersReader: blockProducersReader, + } + + headerValidator := &HeaderValidator{ + chainConfig: chainConfig, + borConfig: borConfig, + headerTimeValidator: headerTimeValidator, + } return func(root *types.Header) CanonicalChainBuilder { - if spansCache.IsEmpty() { - panic("sync.Service: ccBuilderFactory - spansCache is empty") - } - return NewCanonicalChainBuilder( - root, - difficultyCalculator, - headerValidator, - spansCache, - ) + return NewCanonicalChainBuilder(root, difficultyCalculator, headerValidator) } } diff --git a/polygon/sync/canonical_chain_builder_mock.go b/polygon/sync/canonical_chain_builder_mock.go index 9ebaf9e5c75..d6259d7cc13 100644 --- a/polygon/sync/canonical_chain_builder_mock.go +++ b/polygon/sync/canonical_chain_builder_mock.go @@ -10,6 +10,7 @@ package sync import ( + context "context" reflect "reflect" common "github.com/erigontech/erigon-lib/common" @@ -41,17 +42,17 @@ func (m *MockCanonicalChainBuilder) EXPECT() *MockCanonicalChainBuilderMockRecor } // Connect mocks base method. -func (m *MockCanonicalChainBuilder) Connect(arg0 []*types.Header) error { +func (m *MockCanonicalChainBuilder) Connect(arg0 context.Context, arg1 []*types.Header) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Connect", arg0) + ret := m.ctrl.Call(m, "Connect", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // Connect indicates an expected call of Connect. -func (mr *MockCanonicalChainBuilderMockRecorder) Connect(arg0 any) *MockCanonicalChainBuilderConnectCall { +func (mr *MockCanonicalChainBuilderMockRecorder) Connect(arg0, arg1 any) *MockCanonicalChainBuilderConnectCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Connect), arg0) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Connect), arg0, arg1) return &MockCanonicalChainBuilderConnectCall{Call: call} } @@ -67,13 +68,13 @@ func (c *MockCanonicalChainBuilderConnectCall) Return(arg0 error) *MockCanonical } // Do rewrite *gomock.Call.Do -func (c *MockCanonicalChainBuilderConnectCall) Do(f func([]*types.Header) error) *MockCanonicalChainBuilderConnectCall { +func (c *MockCanonicalChainBuilderConnectCall) Do(f func(context.Context, []*types.Header) error) *MockCanonicalChainBuilderConnectCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockCanonicalChainBuilderConnectCall) DoAndReturn(f func([]*types.Header) error) *MockCanonicalChainBuilderConnectCall { +func (c *MockCanonicalChainBuilderConnectCall) DoAndReturn(f func(context.Context, []*types.Header) error) *MockCanonicalChainBuilderConnectCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/polygon/sync/canonical_chain_builder_test.go b/polygon/sync/canonical_chain_builder_test.go index a7d18b702eb..b306d4a8dc3 100644 --- a/polygon/sync/canonical_chain_builder_test.go +++ b/polygon/sync/canonical_chain_builder_test.go @@ -18,28 +18,32 @@ package sync import ( "bytes" + "context" "errors" "math/big" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/erigontech/erigon/core/types" - "github.com/erigontech/erigon/polygon/heimdall" ) -type testDifficultyCalculator struct { -} +type mockDifficultyCalculator struct{} -func (*testDifficultyCalculator) HeaderDifficulty(header *types.Header) (uint64, error) { +func (*mockDifficultyCalculator) HeaderDifficulty(_ context.Context, header *types.Header) (uint64, error) { if header.Difficulty == nil { return 0, errors.New("unset header.Difficulty") } return header.Difficulty.Uint64(), nil } -func (*testDifficultyCalculator) SetSpan(*heimdall.Span) {} +type mockHeaderValidator struct{} + +func (v *mockHeaderValidator) ValidateHeader(_ context.Context, _ *types.Header, _ *types.Header, _ time.Time) error { + return nil +} func makeRoot() *types.Header { return &types.Header{ @@ -48,8 +52,8 @@ func makeRoot() *types.Header { } func makeCCB(root *types.Header) CanonicalChainBuilder { - difficultyCalc := testDifficultyCalculator{} - builder := NewCanonicalChainBuilder(root, &difficultyCalc, nil, nil) + difficultyCalc := mockDifficultyCalculator{} + builder := NewCanonicalChainBuilder(root, &difficultyCalc, &mockHeaderValidator{}) return builder } @@ -95,6 +99,7 @@ func (test *connectCCBTest) makeHeaders(parent *types.Header, difficulties []uin } func (test *connectCCBTest) testConnect( + ctx context.Context, headers []*types.Header, expectedTip *types.Header, expectedHeaders []*types.Header, @@ -102,7 +107,7 @@ func (test *connectCCBTest) testConnect( t := test.t builder := test.builder - err := builder.Connect(headers) + err := builder.Connect(ctx, headers) require.Nil(t, err) newTip := builder.Tip() @@ -131,35 +136,45 @@ func TestCCBEmptyState(t *testing.T) { } func TestCCBConnectEmpty(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) - test.testConnect([]*types.Header{}, root, []*types.Header{root}) + test.testConnect(ctx, []*types.Header{}, root, []*types.Header{root}) } // connect 0 to 0 func TestCCBConnectRoot(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) - test.testConnect([]*types.Header{root}, root, []*types.Header{root}) + test.testConnect(ctx, []*types.Header{root}, root, []*types.Header{root}) } // connect 1 to 0 func TestCCBConnectOneToRoot(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) newTip := test.makeHeader(root, 1) - test.testConnect([]*types.Header{newTip}, newTip, []*types.Header{root, newTip}) + test.testConnect(ctx, []*types.Header{newTip}, newTip, []*types.Header{root, newTip}) } // connect 1-2-3 to 0 func TestCCBConnectSomeToRoot(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) headers := test.makeHeaders(root, []uint64{1, 2, 3}) - test.testConnect(headers, headers[len(headers)-1], append([]*types.Header{root}, headers...)) + test.testConnect(ctx, headers, headers[len(headers)-1], append([]*types.Header{root}, headers...)) } // connect any subset of 0-1-2-3 to 0-1-2-3 func TestCCBConnectOverlapsFull(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) headers := test.makeHeaders(root, []uint64{1, 2, 3}) - require.Nil(t, test.builder.Connect(headers)) + require.Nil(t, test.builder.Connect(ctx, headers)) expectedTip := headers[len(headers)-1] expectedHeaders := append([]*types.Header{root}, headers...) @@ -167,60 +182,70 @@ func TestCCBConnectOverlapsFull(t *testing.T) { for subsetLen := 1; subsetLen <= len(headers); subsetLen++ { for i := 0; i+subsetLen-1 < len(expectedHeaders); i++ { headers := expectedHeaders[i : i+subsetLen] - test.testConnect(headers, expectedTip, expectedHeaders) + test.testConnect(ctx, headers, expectedTip, expectedHeaders) } } } // connect 0-1 to 0 func TestCCBConnectOverlapPartialOne(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) newTip := test.makeHeader(root, 1) - test.testConnect([]*types.Header{root, newTip}, newTip, []*types.Header{root, newTip}) + test.testConnect(ctx, []*types.Header{root, newTip}, newTip, []*types.Header{root, newTip}) } // connect 2-3-4-5 to 0-1-2-3 func TestCCBConnectOverlapPartialSome(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) headers := test.makeHeaders(root, []uint64{1, 2, 3}) - require.Nil(t, test.builder.Connect(headers)) + require.Nil(t, test.builder.Connect(ctx, headers)) overlapHeaders := append(headers[1:], test.makeHeaders(headers[len(headers)-1], []uint64{4, 5})...) expectedTip := overlapHeaders[len(overlapHeaders)-1] expectedHeaders := append([]*types.Header{root, headers[0]}, overlapHeaders...) - test.testConnect(overlapHeaders, expectedTip, expectedHeaders) + test.testConnect(ctx, overlapHeaders, expectedTip, expectedHeaders) } // connect 2 to 0-1 at 0, then connect 10 to 0-1 func TestCCBConnectAltMainBecomesFork(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) header1 := test.makeHeader(root, 1) header2 := test.makeHeader(root, 2) - require.Nil(t, test.builder.Connect([]*types.Header{header1})) + require.Nil(t, test.builder.Connect(ctx, []*types.Header{header1})) // the tip changes to header2 - test.testConnect([]*types.Header{header2}, header2, []*types.Header{root, header2}) + test.testConnect(ctx, []*types.Header{header2}, header2, []*types.Header{root, header2}) header10 := test.makeHeader(header1, 10) - test.testConnect([]*types.Header{header10}, header10, []*types.Header{root, header1, header10}) + test.testConnect(ctx, []*types.Header{header10}, header10, []*types.Header{root, header1, header10}) } // connect 1 to 0-2 at 0, then connect 10 to 0-1 func TestCCBConnectAltForkBecomesMain(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) header1 := test.makeHeader(root, 1) header2 := test.makeHeader(root, 2) - require.Nil(t, test.builder.Connect([]*types.Header{header2})) + require.Nil(t, test.builder.Connect(ctx, []*types.Header{header2})) // the tip stays at header2 - test.testConnect([]*types.Header{header1}, header2, []*types.Header{root, header2}) + test.testConnect(ctx, []*types.Header{header1}, header2, []*types.Header{root, header2}) header10 := test.makeHeader(header1, 10) - test.testConnect([]*types.Header{header10}, header10, []*types.Header{root, header1, header10}) + test.testConnect(ctx, []*types.Header{header10}, header10, []*types.Header{root, header1, header10}) } // connect 10 and 11 to 1, then 20 and 22 to 2 one by one starting from a [0-1, 0-2] tree func TestCCBConnectAltForksAtLevel2(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) header1 := test.makeHeader(root, 1) header10 := test.makeHeader(header1, 10) @@ -228,18 +253,20 @@ func TestCCBConnectAltForksAtLevel2(t *testing.T) { header2 := test.makeHeader(root, 2) header20 := test.makeHeader(header2, 20) header22 := test.makeHeader(header2, 22) - require.Nil(t, test.builder.Connect([]*types.Header{header1})) - require.Nil(t, test.builder.Connect([]*types.Header{header2})) + require.Nil(t, test.builder.Connect(ctx, []*types.Header{header1})) + require.Nil(t, test.builder.Connect(ctx, []*types.Header{header2})) - test.testConnect([]*types.Header{header10}, header10, []*types.Header{root, header1, header10}) - test.testConnect([]*types.Header{header11}, header11, []*types.Header{root, header1, header11}) - test.testConnect([]*types.Header{header20}, header20, []*types.Header{root, header2, header20}) - test.testConnect([]*types.Header{header22}, header22, []*types.Header{root, header2, header22}) + test.testConnect(ctx, []*types.Header{header10}, header10, []*types.Header{root, header1, header10}) + test.testConnect(ctx, []*types.Header{header11}, header11, []*types.Header{root, header1, header11}) + test.testConnect(ctx, []*types.Header{header20}, header20, []*types.Header{root, header2, header20}) + test.testConnect(ctx, []*types.Header{header22}, header22, []*types.Header{root, header2, header22}) } // connect 11 and 10 to 1, then 22 and 20 to 2 one by one starting from a [0-1, 0-2] tree // then connect 100 to 10, and 200 to 20 func TestCCBConnectAltForksAtLevel2Reverse(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) test, root := newConnectCCBTest(t) header1 := test.makeHeader(root, 1) header10 := test.makeHeader(header1, 10) @@ -249,14 +276,14 @@ func TestCCBConnectAltForksAtLevel2Reverse(t *testing.T) { header22 := test.makeHeader(header2, 22) header100 := test.makeHeader(header10, 100) header200 := test.makeHeader(header20, 200) - require.Nil(t, test.builder.Connect([]*types.Header{header1})) - require.Nil(t, test.builder.Connect([]*types.Header{header2})) + require.Nil(t, test.builder.Connect(ctx, []*types.Header{header1})) + require.Nil(t, test.builder.Connect(ctx, []*types.Header{header2})) - test.testConnect([]*types.Header{header11}, header11, []*types.Header{root, header1, header11}) - test.testConnect([]*types.Header{header10}, header11, []*types.Header{root, header1, header11}) - test.testConnect([]*types.Header{header22}, header22, []*types.Header{root, header2, header22}) - test.testConnect([]*types.Header{header20}, header22, []*types.Header{root, header2, header22}) + test.testConnect(ctx, []*types.Header{header11}, header11, []*types.Header{root, header1, header11}) + test.testConnect(ctx, []*types.Header{header10}, header11, []*types.Header{root, header1, header11}) + test.testConnect(ctx, []*types.Header{header22}, header22, []*types.Header{root, header2, header22}) + test.testConnect(ctx, []*types.Header{header20}, header22, []*types.Header{root, header2, header22}) - test.testConnect([]*types.Header{header100}, header100, []*types.Header{root, header1, header10, header100}) - test.testConnect([]*types.Header{header200}, header200, []*types.Header{root, header2, header20, header200}) + test.testConnect(ctx, []*types.Header{header100}, header100, []*types.Header{root, header1, header10, header100}) + test.testConnect(ctx, []*types.Header{header200}, header200, []*types.Header{root, header2, header20, header200}) } diff --git a/polygon/sync/difficulty.go b/polygon/sync/difficulty.go index 4f087009913..c1bfe880457 100644 --- a/polygon/sync/difficulty.go +++ b/polygon/sync/difficulty.go @@ -17,7 +17,7 @@ package sync import ( - "fmt" + "context" lru "github.com/hashicorp/golang-lru/arc/v2" @@ -25,74 +25,32 @@ import ( "github.com/erigontech/erigon/core/types" "github.com/erigontech/erigon/polygon/bor" "github.com/erigontech/erigon/polygon/bor/borcfg" - "github.com/erigontech/erigon/polygon/bor/valset" ) -type DifficultyCalculator interface { - HeaderDifficulty(header *types.Header) (uint64, error) +type DifficultyCalculator struct { + borConfig *borcfg.BorConfig + signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address] + blockProducersReader blockProducersReader } -type difficultyCalculator struct { - borConfig *borcfg.BorConfig - spans *SpansCache - validatorSetFactory func(headerNum uint64) validatorSetInterface - signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address] -} - -func NewDifficultyCalculator( - borConfig *borcfg.BorConfig, - spans *SpansCache, - validatorSetFactory func(headerNum uint64) validatorSetInterface, - signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address], -) DifficultyCalculator { - if signaturesCache == nil { - var err error - signaturesCache, err = lru.NewARC[libcommon.Hash, libcommon.Address](InMemorySignatures) - if err != nil { - panic(err) - } - } - - calc := difficultyCalculator{ - borConfig: borConfig, - spans: spans, - validatorSetFactory: validatorSetFactory, - signaturesCache: signaturesCache, - } - - if validatorSetFactory == nil { - calc.validatorSetFactory = calc.makeValidatorSet - } - - return &calc -} - -func (calc *difficultyCalculator) makeValidatorSet(headerNum uint64) validatorSetInterface { - span := calc.spans.SpanAt(headerNum) - if span == nil { - return nil - } - return valset.NewValidatorSet(span.ValidatorSet.Validators) -} - -func (calc *difficultyCalculator) HeaderDifficulty(header *types.Header) (uint64, error) { +func (calc *DifficultyCalculator) HeaderDifficulty(ctx context.Context, header *types.Header) (uint64, error) { signer, err := bor.Ecrecover(header, calc.signaturesCache, calc.borConfig) if err != nil { return 0, err } - return calc.signerDifficulty(signer, header.Number.Uint64()) -} -func (calc *difficultyCalculator) signerDifficulty(signer libcommon.Address, headerNum uint64) (uint64, error) { - validatorSet := calc.validatorSetFactory(headerNum) - if validatorSet == nil { - return 0, fmt.Errorf("difficultyCalculator.signerDifficulty: no span at %d", headerNum) - } + return calc.signerDifficulty(ctx, signer, header.Number.Uint64()) +} - sprintNum := calc.borConfig.CalculateSprintNumber(headerNum) - if sprintNum > 0 { - validatorSet.IncrementProposerPriority(int(sprintNum)) +func (calc *DifficultyCalculator) signerDifficulty( + ctx context.Context, + signer libcommon.Address, + headerNum uint64, +) (uint64, error) { + producers, err := calc.blockProducersReader.Producers(ctx, headerNum) + if err != nil { + return 0, err } - return validatorSet.Difficulty(signer) + return producers.Difficulty(signer) } diff --git a/polygon/sync/difficulty_test.go b/polygon/sync/difficulty_test.go deleted file mode 100644 index 4c1ba13a50f..00000000000 --- a/polygon/sync/difficulty_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package sync - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/stretchr/testify/require" - - libcommon "github.com/erigontech/erigon-lib/common" - "github.com/erigontech/erigon/core/types" - "github.com/erigontech/erigon/polygon/bor/borcfg" -) - -type testValidatorSetInterface struct { - signers []libcommon.Address - sprintNum int -} - -func (v *testValidatorSetInterface) IncrementProposerPriority(times int) { - v.sprintNum = times -} - -func (v *testValidatorSetInterface) GetSignerSuccessionNumber(signer libcommon.Address, number uint64) (int, error) { - var i int - for (i < len(v.signers)) && (v.signers[i] != signer) { - i++ - } - - sprintOffset := v.sprintNum % len(v.signers) - var delta int - if i >= sprintOffset { - delta = i - sprintOffset - } else { - delta = i + len(v.signers) - sprintOffset - } - - return delta, nil -} - -func (v *testValidatorSetInterface) Difficulty(signer libcommon.Address) (uint64, error) { - delta, err := v.GetSignerSuccessionNumber(signer, 0) - if err != nil { - return 0, nil - } - return uint64(len(v.signers) - delta), nil -} - -func TestSignerDifficulty(t *testing.T) { - borConfig := borcfg.BorConfig{ - Sprint: map[string]uint64{"0": 16}, - } - signers := []libcommon.Address{ - libcommon.HexToAddress("00"), - libcommon.HexToAddress("01"), - libcommon.HexToAddress("02"), - } - validatorSetFactory := func(uint64) validatorSetInterface { return &testValidatorSetInterface{signers: signers} } - calc := NewDifficultyCalculator(&borConfig, nil, validatorSetFactory, nil).(*difficultyCalculator) - - var d uint64 - - // sprint 0 - d, _ = calc.signerDifficulty(signers[0], 0) - assert.Equal(t, uint64(3), d) - - d, _ = calc.signerDifficulty(signers[0], 1) - assert.Equal(t, uint64(3), d) - - d, _ = calc.signerDifficulty(signers[0], 15) - assert.Equal(t, uint64(3), d) - - d, _ = calc.signerDifficulty(signers[1], 0) - assert.Equal(t, uint64(2), d) - - d, _ = calc.signerDifficulty(signers[1], 1) - assert.Equal(t, uint64(2), d) - - d, _ = calc.signerDifficulty(signers[1], 15) - assert.Equal(t, uint64(2), d) - - d, _ = calc.signerDifficulty(signers[2], 0) - assert.Equal(t, uint64(1), d) - - d, _ = calc.signerDifficulty(signers[2], 1) - assert.Equal(t, uint64(1), d) - - d, _ = calc.signerDifficulty(signers[2], 15) - assert.Equal(t, uint64(1), d) - - // sprint 1 - d, _ = calc.signerDifficulty(signers[1], 16) - assert.Equal(t, uint64(3), d) - - d, _ = calc.signerDifficulty(signers[2], 16) - assert.Equal(t, uint64(2), d) - - d, _ = calc.signerDifficulty(signers[0], 16) - assert.Equal(t, uint64(1), d) - - // sprint 2 - d, _ = calc.signerDifficulty(signers[2], 32) - assert.Equal(t, uint64(3), d) - - d, _ = calc.signerDifficulty(signers[0], 32) - assert.Equal(t, uint64(2), d) - - d, _ = calc.signerDifficulty(signers[1], 32) - assert.Equal(t, uint64(1), d) - - // sprint 3 - d, _ = calc.signerDifficulty(signers[0], 48) - assert.Equal(t, uint64(3), d) - - d, _ = calc.signerDifficulty(signers[1], 48) - assert.Equal(t, uint64(2), d) - - d, _ = calc.signerDifficulty(signers[2], 48) - assert.Equal(t, uint64(1), d) -} - -func TestHeaderDifficultyNoSignature(t *testing.T) { - borConfig := borcfg.BorConfig{} - spans := NewSpansCache() - calc := NewDifficultyCalculator(&borConfig, spans, nil, nil) - - _, err := calc.HeaderDifficulty(new(types.Header)) - require.ErrorContains(t, err, "signature suffix missing") -} - -func TestSignerDifficultyNoSpan(t *testing.T) { - borConfig := borcfg.BorConfig{} - spans := NewSpansCache() - calc := NewDifficultyCalculator(&borConfig, spans, nil, nil).(*difficultyCalculator) - - _, err := calc.signerDifficulty(libcommon.HexToAddress("00"), 0) - require.ErrorContains(t, err, "no span") -} diff --git a/polygon/sync/execution_client.go b/polygon/sync/execution_client.go index 5194f3f52c9..2558f99ea80 100644 --- a/polygon/sync/execution_client.go +++ b/polygon/sync/execution_client.go @@ -19,13 +19,12 @@ package sync import ( "context" "fmt" - "runtime" "time" "google.golang.org/protobuf/types/known/emptypb" "github.com/erigontech/erigon-lib/gointerfaces" - executionproto "github.com/erigontech/erigon-lib/gointerfaces/executionproto" + "github.com/erigontech/erigon-lib/gointerfaces/executionproto" "github.com/erigontech/erigon/turbo/execution/eth1/eth1_utils" "github.com/erigontech/erigon/core/types" @@ -76,11 +75,6 @@ func (e *executionClient) InsertBlocks(ctx context.Context, blocks []*types.Bloc } func (e *executionClient) UpdateForkChoice(ctx context.Context, tip *types.Header, finalizedHeader *types.Header) error { - // TODO - not ready for execution - missing state sync event and span data - uncomment once ready - if runtime.GOOS != "TODO" { - return nil - } - tipHash := tip.Hash() const timeout = 5 * time.Second diff --git a/polygon/sync/header_time_validator.go b/polygon/sync/header_time_validator.go index 1721df8ba1c..acd32126c96 100644 --- a/polygon/sync/header_time_validator.go +++ b/polygon/sync/header_time_validator.go @@ -17,7 +17,7 @@ package sync import ( - "fmt" + "context" "time" lru "github.com/hashicorp/golang-lru/arc/v2" @@ -26,67 +26,25 @@ import ( "github.com/erigontech/erigon/core/types" "github.com/erigontech/erigon/polygon/bor" "github.com/erigontech/erigon/polygon/bor/borcfg" - "github.com/erigontech/erigon/polygon/bor/valset" ) -type HeaderTimeValidator interface { - ValidateHeaderTime(header *types.Header, now time.Time, parent *types.Header) error +type HeaderTimeValidator struct { + borConfig *borcfg.BorConfig + signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address] + blockProducersReader blockProducersReader } -type headerTimeValidator struct { - borConfig *borcfg.BorConfig - spans *SpansCache - validatorSetFactory func(headerNum uint64) validatorSetInterface - signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address] -} - -func NewHeaderTimeValidator( - borConfig *borcfg.BorConfig, - spans *SpansCache, - validatorSetFactory func(headerNum uint64) validatorSetInterface, - signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address], -) HeaderTimeValidator { - if signaturesCache == nil { - var err error - signaturesCache, err = lru.NewARC[libcommon.Hash, libcommon.Address](InMemorySignatures) - if err != nil { - panic(err) - } - } - - htv := headerTimeValidator{ - borConfig: borConfig, - spans: spans, - validatorSetFactory: validatorSetFactory, - signaturesCache: signaturesCache, - } - - if validatorSetFactory == nil { - htv.validatorSetFactory = htv.makeValidatorSet - } - - return &htv -} - -func (htv *headerTimeValidator) makeValidatorSet(headerNum uint64) validatorSetInterface { - span := htv.spans.SpanAt(headerNum) - if span == nil { - return nil - } - return valset.NewValidatorSet(span.ValidatorSet.Validators) -} - -func (htv *headerTimeValidator) ValidateHeaderTime(header *types.Header, now time.Time, parent *types.Header) error { +func (htv *HeaderTimeValidator) ValidateHeaderTime( + ctx context.Context, + header *types.Header, + now time.Time, + parent *types.Header, +) error { headerNum := header.Number.Uint64() - validatorSet := htv.validatorSetFactory(headerNum) - if validatorSet == nil { - return fmt.Errorf("headerTimeValidator.ValidateHeaderTime: no span at %d", headerNum) - } - - sprintNum := htv.borConfig.CalculateSprintNumber(headerNum) - if sprintNum > 0 { - validatorSet.IncrementProposerPriority(int(sprintNum)) + producers, err := htv.blockProducersReader.Producers(ctx, headerNum) + if err != nil { + return err } - return bor.ValidateHeaderTime(header, now, parent, validatorSet, htv.borConfig, htv.signaturesCache) + return bor.ValidateHeaderTime(header, now, parent, producers, htv.borConfig, htv.signaturesCache) } diff --git a/polygon/sync/header_validator.go b/polygon/sync/header_validator.go index 9935bd75f72..074eda09b76 100644 --- a/polygon/sync/header_validator.go +++ b/polygon/sync/header_validator.go @@ -17,6 +17,7 @@ package sync import ( + "context" "time" "github.com/erigontech/erigon-lib/chain" @@ -25,29 +26,18 @@ import ( "github.com/erigontech/erigon/polygon/bor/borcfg" ) -type HeaderValidator interface { - ValidateHeader(header *types.Header, parent *types.Header, now time.Time) error -} - -type headerValidator struct { +type HeaderValidator struct { chainConfig *chain.Config borConfig *borcfg.BorConfig - headerTimeValidator HeaderTimeValidator + headerTimeValidator *HeaderTimeValidator } -func NewHeaderValidator( - chainConfig *chain.Config, - borConfig *borcfg.BorConfig, - headerTimeValidator HeaderTimeValidator, -) HeaderValidator { - return &headerValidator{ - chainConfig: chainConfig, - borConfig: borConfig, - headerTimeValidator: headerTimeValidator, - } -} - -func (hv *headerValidator) ValidateHeader(header *types.Header, parent *types.Header, now time.Time) error { +func (hv *HeaderValidator) ValidateHeader( + ctx context.Context, + header *types.Header, + parent *types.Header, + now time.Time, +) error { if err := bor.ValidateHeaderUnusedFields(header); err != nil { return err } @@ -59,14 +49,13 @@ func (hv *headerValidator) ValidateHeader(header *types.Header, parent *types.He if err := bor.ValidateHeaderExtraLength(header.Extra); err != nil { return err } + if err := bor.ValidateHeaderSprintValidators(header, hv.borConfig); err != nil { return err } - if hv.headerTimeValidator != nil { - if err := hv.headerTimeValidator.ValidateHeaderTime(header, now, parent); err != nil { - return err - } + if err := hv.headerTimeValidator.ValidateHeaderTime(ctx, header, now, parent); err != nil { + return err } return nil diff --git a/polygon/sync/heimdall_waypoints_fetcher.go b/polygon/sync/heimdall_waypoints_fetcher.go index dabab322c48..b3cc6657369 100644 --- a/polygon/sync/heimdall_waypoints_fetcher.go +++ b/polygon/sync/heimdall_waypoints_fetcher.go @@ -24,6 +24,6 @@ import ( //go:generate mockgen -typed=true -source=./heimdall_waypoints_fetcher.go -destination=./heimdall_waypoints_fetcher_mock.go -package=sync type heimdallWaypointsFetcher interface { - FetchCheckpointsFromBlock(ctx context.Context, startBlock uint64) (heimdall.Waypoints, error) - FetchMilestonesFromBlock(ctx context.Context, startBlock uint64) (heimdall.Waypoints, error) + CheckpointsFromBlock(ctx context.Context, startBlock uint64) (heimdall.Waypoints, error) + MilestonesFromBlock(ctx context.Context, startBlock uint64) (heimdall.Waypoints, error) } diff --git a/polygon/sync/heimdall_waypoints_fetcher_mock.go b/polygon/sync/heimdall_waypoints_fetcher_mock.go index 654bf530768..9bfc1c0c3dd 100644 --- a/polygon/sync/heimdall_waypoints_fetcher_mock.go +++ b/polygon/sync/heimdall_waypoints_fetcher_mock.go @@ -13,9 +13,8 @@ import ( context "context" reflect "reflect" - gomock "go.uber.org/mock/gomock" - heimdall "github.com/erigontech/erigon/polygon/heimdall" + gomock "go.uber.org/mock/gomock" ) // MockheimdallWaypointsFetcher is a mock of heimdallWaypointsFetcher interface. @@ -41,80 +40,80 @@ func (m *MockheimdallWaypointsFetcher) EXPECT() *MockheimdallWaypointsFetcherMoc return m.recorder } -// FetchCheckpointsFromBlock mocks base method. -func (m *MockheimdallWaypointsFetcher) FetchCheckpointsFromBlock(ctx context.Context, startBlock uint64) (heimdall.Waypoints, error) { +// CheckpointsFromBlock mocks base method. +func (m *MockheimdallWaypointsFetcher) CheckpointsFromBlock(ctx context.Context, startBlock uint64) (heimdall.Waypoints, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchCheckpointsFromBlock", ctx, startBlock) + ret := m.ctrl.Call(m, "CheckpointsFromBlock", ctx, startBlock) ret0, _ := ret[0].(heimdall.Waypoints) ret1, _ := ret[1].(error) return ret0, ret1 } -// FetchCheckpointsFromBlock indicates an expected call of FetchCheckpointsFromBlock. -func (mr *MockheimdallWaypointsFetcherMockRecorder) FetchCheckpointsFromBlock(ctx, startBlock any) *MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall { +// CheckpointsFromBlock indicates an expected call of CheckpointsFromBlock. +func (mr *MockheimdallWaypointsFetcherMockRecorder) CheckpointsFromBlock(ctx, startBlock any) *MockheimdallWaypointsFetcherCheckpointsFromBlockCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchCheckpointsFromBlock", reflect.TypeOf((*MockheimdallWaypointsFetcher)(nil).FetchCheckpointsFromBlock), ctx, startBlock) - return &MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointsFromBlock", reflect.TypeOf((*MockheimdallWaypointsFetcher)(nil).CheckpointsFromBlock), ctx, startBlock) + return &MockheimdallWaypointsFetcherCheckpointsFromBlockCall{Call: call} } -// MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall wrap *gomock.Call -type MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall struct { +// MockheimdallWaypointsFetcherCheckpointsFromBlockCall wrap *gomock.Call +type MockheimdallWaypointsFetcherCheckpointsFromBlockCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall) Return(arg0 heimdall.Waypoints, arg1 error) *MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall { +func (c *MockheimdallWaypointsFetcherCheckpointsFromBlockCall) Return(arg0 heimdall.Waypoints, arg1 error) *MockheimdallWaypointsFetcherCheckpointsFromBlockCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall) Do(f func(context.Context, uint64) (heimdall.Waypoints, error)) *MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall { +func (c *MockheimdallWaypointsFetcherCheckpointsFromBlockCall) Do(f func(context.Context, uint64) (heimdall.Waypoints, error)) *MockheimdallWaypointsFetcherCheckpointsFromBlockCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall) DoAndReturn(f func(context.Context, uint64) (heimdall.Waypoints, error)) *MockheimdallWaypointsFetcherFetchCheckpointsFromBlockCall { +func (c *MockheimdallWaypointsFetcherCheckpointsFromBlockCall) DoAndReturn(f func(context.Context, uint64) (heimdall.Waypoints, error)) *MockheimdallWaypointsFetcherCheckpointsFromBlockCall { c.Call = c.Call.DoAndReturn(f) return c } -// FetchMilestonesFromBlock mocks base method. -func (m *MockheimdallWaypointsFetcher) FetchMilestonesFromBlock(ctx context.Context, startBlock uint64) (heimdall.Waypoints, error) { +// MilestonesFromBlock mocks base method. +func (m *MockheimdallWaypointsFetcher) MilestonesFromBlock(ctx context.Context, startBlock uint64) (heimdall.Waypoints, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchMilestonesFromBlock", ctx, startBlock) + ret := m.ctrl.Call(m, "MilestonesFromBlock", ctx, startBlock) ret0, _ := ret[0].(heimdall.Waypoints) ret1, _ := ret[1].(error) return ret0, ret1 } -// FetchMilestonesFromBlock indicates an expected call of FetchMilestonesFromBlock. -func (mr *MockheimdallWaypointsFetcherMockRecorder) FetchMilestonesFromBlock(ctx, startBlock any) *MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall { +// MilestonesFromBlock indicates an expected call of MilestonesFromBlock. +func (mr *MockheimdallWaypointsFetcherMockRecorder) MilestonesFromBlock(ctx, startBlock any) *MockheimdallWaypointsFetcherMilestonesFromBlockCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestonesFromBlock", reflect.TypeOf((*MockheimdallWaypointsFetcher)(nil).FetchMilestonesFromBlock), ctx, startBlock) - return &MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MilestonesFromBlock", reflect.TypeOf((*MockheimdallWaypointsFetcher)(nil).MilestonesFromBlock), ctx, startBlock) + return &MockheimdallWaypointsFetcherMilestonesFromBlockCall{Call: call} } -// MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall wrap *gomock.Call -type MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall struct { +// MockheimdallWaypointsFetcherMilestonesFromBlockCall wrap *gomock.Call +type MockheimdallWaypointsFetcherMilestonesFromBlockCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall) Return(arg0 heimdall.Waypoints, arg1 error) *MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall { +func (c *MockheimdallWaypointsFetcherMilestonesFromBlockCall) Return(arg0 heimdall.Waypoints, arg1 error) *MockheimdallWaypointsFetcherMilestonesFromBlockCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall) Do(f func(context.Context, uint64) (heimdall.Waypoints, error)) *MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall { +func (c *MockheimdallWaypointsFetcherMilestonesFromBlockCall) Do(f func(context.Context, uint64) (heimdall.Waypoints, error)) *MockheimdallWaypointsFetcherMilestonesFromBlockCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall) DoAndReturn(f func(context.Context, uint64) (heimdall.Waypoints, error)) *MockheimdallWaypointsFetcherFetchMilestonesFromBlockCall { +func (c *MockheimdallWaypointsFetcherMilestonesFromBlockCall) DoAndReturn(f func(context.Context, uint64) (heimdall.Waypoints, error)) *MockheimdallWaypointsFetcherMilestonesFromBlockCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/polygon/sync/log_prefix.go b/polygon/sync/log_prefix.go index 5cc8fb8e651..81b2e1d939a 100644 --- a/polygon/sync/log_prefix.go +++ b/polygon/sync/log_prefix.go @@ -16,8 +16,6 @@ package sync -import "fmt" - func syncLogPrefix(message string) string { - return fmt.Sprintf("[sync] %s", message) + return "[sync] " + message } diff --git a/polygon/sync/service.go b/polygon/sync/service.go index 874572d2d63..7d76ed5a544 100644 --- a/polygon/sync/service.go +++ b/polygon/sync/service.go @@ -44,30 +44,27 @@ type service struct { events *TipEvents heimdallService heimdall.Service - bridge bridge.Service + bridgeService bridge.Service } func NewService( logger log.Logger, chainConfig *chain.Config, - dataDir string, - tmpDir string, sentryClient direct.SentryClient, maxPeers int, statusDataProvider *sentry.StatusDataProvider, - heimdallUrl string, executionClient executionproto.ExecutionClient, blockLimit uint, - polygonBridge bridge.Service, + bridgeService bridge.Service, + heimdallService heimdall.Service, ) Service { borConfig := chainConfig.Bor.(*borcfg.BorConfig) checkpointVerifier := VerifyCheckpointHeaders milestoneVerifier := VerifyMilestoneHeaders blocksVerifier := VerifyBlocks p2pService := p2p.NewService(maxPeers, logger, sentryClient, statusDataProvider.GetStatusData) - heimdallService := heimdall.AssembleService(heimdallUrl, dataDir, tmpDir, logger) execution := NewExecutionClient(executionClient) - store := NewStore(logger, execution, polygonBridge) + store := NewStore(logger, execution, bridgeService) blockDownloader := NewBlockDownloader( logger, p2pService, @@ -78,8 +75,7 @@ func NewService( store, blockLimit, ) - spansCache := NewSpansCache() - ccBuilderFactory := NewCanonicalChainBuilderFactory(chainConfig, borConfig, spansCache) + ccBuilderFactory := NewCanonicalChainBuilderFactory(chainConfig, borConfig, heimdallService) events := NewTipEvents(logger, p2pService, heimdallService) sync := NewSync( store, @@ -89,8 +85,6 @@ func NewService( p2pService, blockDownloader, ccBuilderFactory, - spansCache, - heimdallService.FetchLatestSpans, events.Events(), logger, ) @@ -100,7 +94,7 @@ func NewService( store: store, events: events, heimdallService: heimdallService, - bridge: polygonBridge, + bridgeService: bridgeService, } } @@ -111,7 +105,7 @@ func (s *service) Run(parentCtx context.Context) error { group.Go(func() error { return s.store.Run(ctx) }) group.Go(func() error { return s.events.Run(ctx) }) group.Go(func() error { return s.heimdallService.Run(ctx) }) - group.Go(func() error { return s.bridge.Run(ctx) }) + group.Go(func() error { return s.bridgeService.Run(ctx) }) group.Go(func() error { return s.sync.Run(ctx) }) return group.Wait() diff --git a/polygon/sync/spans_cache.go b/polygon/sync/spans_cache.go deleted file mode 100644 index 3deab2e6d89..00000000000 --- a/polygon/sync/spans_cache.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package sync - -import "github.com/erigontech/erigon/polygon/heimdall" - -type SpansCache struct { - spans map[uint64]*heimdall.Span -} - -func NewSpansCache() *SpansCache { - return &SpansCache{ - spans: make(map[uint64]*heimdall.Span), - } -} - -func (cache *SpansCache) Add(span *heimdall.Span) { - cache.spans[span.StartBlock] = span -} - -// SpanAt finds a span that contains blockNum. -func (cache *SpansCache) SpanAt(blockNum uint64) *heimdall.Span { - for _, span := range cache.spans { - if (span.StartBlock <= blockNum) && (blockNum <= span.EndBlock) { - return span - } - } - return nil -} - -// Prune removes spans that ended before blockNum. -func (cache *SpansCache) Prune(blockNum uint64) { - for key, span := range cache.spans { - if span.EndBlock < blockNum { - delete(cache.spans, key) - } - } -} - -func (cache *SpansCache) IsEmpty() bool { - return len(cache.spans) == 0 -} diff --git a/polygon/sync/sync.go b/polygon/sync/sync.go index 26d704d65e2..9dac4f49910 100644 --- a/polygon/sync/sync.go +++ b/polygon/sync/sync.go @@ -23,12 +23,9 @@ import ( "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/core/types" - "github.com/erigontech/erigon/polygon/heimdall" "github.com/erigontech/erigon/polygon/p2p" ) -type latestSpanFetcher func(ctx context.Context, count uint) ([]*heimdall.Span, error) - type Sync struct { store Store execution ExecutionClient @@ -37,8 +34,6 @@ type Sync struct { p2pService p2p.Service blockDownloader BlockDownloader ccBuilderFactory CanonicalChainBuilderFactory - spansCache *SpansCache - fetchLatestSpans latestSpanFetcher events <-chan Event logger log.Logger } @@ -51,8 +46,6 @@ func NewSync( p2pService p2p.Service, blockDownloader BlockDownloader, ccBuilderFactory CanonicalChainBuilderFactory, - spansCache *SpansCache, - fetchLatestSpans latestSpanFetcher, events <-chan Event, logger log.Logger, ) *Sync { @@ -64,8 +57,6 @@ func NewSync( p2pService: p2pService, blockDownloader: blockDownloader, ccBuilderFactory: ccBuilderFactory, - spansCache: spansCache, - fetchLatestSpans: fetchLatestSpans, events: events, logger: logger, } @@ -178,7 +169,7 @@ func (s *Sync) onNewBlockEvent( } oldTip := ccBuilder.Tip() - if err = ccBuilder.Connect(newHeaders); err != nil { + if err = ccBuilder.Connect(ctx, newHeaders); err != nil { s.logger.Debug(syncLogPrefix("onNewBlockEvent: couldn't connect a header to the local chain tip, ignoring"), "err", err) return nil } @@ -248,15 +239,6 @@ func (s *Sync) Run(ctx context.Context) error { return err } - latestSpans, err := s.fetchLatestSpans(ctx, 2) - if err != nil { - return err - } - - for _, span := range latestSpans { - s.spansCache.Add(span) - } - ccBuilder := s.ccBuilderFactory(tip) for { @@ -275,8 +257,6 @@ func (s *Sync) Run(ctx context.Context) error { if err = s.onNewBlockHashesEvent(ctx, event.AsNewBlockHashes(), ccBuilder); err != nil { return err } - case EventTypeNewSpan: - s.spansCache.Add(event.AsNewSpan()) } case <-ctx.Done(): return ctx.Err() diff --git a/polygon/sync/tip_events.go b/polygon/sync/tip_events.go index e593db812d0..4713d6dccd2 100644 --- a/polygon/sync/tip_events.go +++ b/polygon/sync/tip_events.go @@ -30,7 +30,6 @@ import ( const EventTypeNewBlock = "new-block" const EventTypeNewBlockHashes = "new-block-hashes" const EventTypeNewMilestone = "new-milestone" -const EventTypeNewSpan = "new-span" type EventNewBlock struct { NewBlock *types.Block @@ -44,15 +43,12 @@ type EventNewBlockHashes struct { type EventNewMilestone = *heimdall.Milestone -type EventNewSpan = *heimdall.Span - type Event struct { Type string newBlock EventNewBlock newBlockHashes EventNewBlockHashes newMilestone EventNewMilestone - newSpan EventNewSpan } func (e Event) AsNewBlock() EventNewBlock { @@ -76,21 +72,13 @@ func (e Event) AsNewMilestone() EventNewMilestone { return e.newMilestone } -func (e Event) AsNewSpan() EventNewSpan { - if e.Type != EventTypeNewSpan { - panic("Event type mismatch") - } - return e.newSpan -} - type p2pObserverRegistrar interface { RegisterNewBlockObserver(polygoncommon.Observer[*p2p.DecodedInboundMessage[*eth.NewBlockPacket]]) polygoncommon.UnregisterFunc RegisterNewBlockHashesObserver(polygoncommon.Observer[*p2p.DecodedInboundMessage[*eth.NewBlockHashesPacket]]) polygoncommon.UnregisterFunc } type heimdallObserverRegistrar interface { - RegisterMilestoneObserver(func(*heimdall.Milestone)) polygoncommon.UnregisterFunc - RegisterSpanObserver(func(*heimdall.Span)) polygoncommon.UnregisterFunc + RegisterMilestoneObserver(callback func(*heimdall.Milestone), opts ...heimdall.ObserverOption) polygoncommon.UnregisterFunc } type TipEvents struct { @@ -149,16 +137,8 @@ func (te *TipEvents) Run(ctx context.Context) error { Type: EventTypeNewMilestone, newMilestone: milestone, }) - }) + }, heimdall.WithEventsLimit(5)) defer milestoneObserverCancel() - spanObserverCancel := te.heimdallObserverRegistrar.RegisterSpanObserver(func(span *heimdall.Span) { - te.events.PushEvent(Event{ - Type: EventTypeNewSpan, - newSpan: span, - }) - }) - defer spanObserverCancel() - return te.events.Run(ctx) } diff --git a/rlp/doc.go b/rlp/doc.go index bb0e1552801..a40bf827a9a 100644 --- a/rlp/doc.go +++ b/rlp/doc.go @@ -39,7 +39,7 @@ call EncodeRLP on nil pointer values. To encode a pointer, the value being pointed to is encoded. A nil pointer to a struct type, slice or array always encodes as an empty RLP list unless the slice or array has -elememt type byte. A nil pointer to any other value encodes as the empty string. +element type byte. A nil pointer to any other value encodes as the empty string. Struct values are encoded as an RLP list of all their encoded public fields. Recursive struct types are supported. diff --git a/rlp/encode.go b/rlp/encode.go index 3a817eefd2c..75c14a9913b 100644 --- a/rlp/encode.go +++ b/rlp/encode.go @@ -233,7 +233,7 @@ const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8 func writeBigInt(i *big.Int, w *encBuffer) error { if i.Sign() == -1 { - return fmt.Errorf("rlp: cannot encode negative *big.Int") + return errors.New("rlp: cannot encode negative *big.Int") } bitlen := i.BitLen() if bitlen <= 64 { diff --git a/rpc/handler.go b/rpc/handler.go index 0073bfee00d..0ae3dbc1b24 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -342,7 +342,6 @@ func (h *handler) startCallProc(fn func(*callProc)) { // handleImmediate executes non-call messages. It returns false if the message is a // call or requires a reply. func (h *handler) handleImmediate(msg *jsonrpcMessage) bool { - start := time.Now() switch { case msg.isNotification(): if strings.HasSuffix(msg.Method, notificationMethodSuffix) { @@ -352,7 +351,6 @@ func (h *handler) handleImmediate(msg *jsonrpcMessage) bool { return false case msg.isResponse(): h.handleResponse(msg) - h.logger.Trace("[rpc] handled response", "reqid", idForLog(msg.ID), "t", time.Since(start)) return true default: return false diff --git a/rpc/handler_test.go b/rpc/handler_test.go index 9dc15c2997f..ea8ea06d1cf 100644 --- a/rpc/handler_test.go +++ b/rpc/handler_test.go @@ -19,7 +19,7 @@ package rpc import ( "bytes" "context" - "fmt" + "errors" "reflect" "testing" @@ -65,10 +65,10 @@ func TestHandlerDoesNotDoubleWriteNull(t *testing.T) { dummyFunc := func(id int, stream *jsoniter.Stream) error { if id == 1 { stream.WriteNil() - return fmt.Errorf("id 1") + return errors.New("id 1") } if id == 2 { - return fmt.Errorf("id 2") + return errors.New("id 2") } if id == 3 { stream.WriteEmptyObject() @@ -79,7 +79,7 @@ func TestHandlerDoesNotDoubleWriteNull(t *testing.T) { stream.WriteObjectField("structLogs") stream.WriteEmptyArray() stream.WriteObjectEnd() - return fmt.Errorf("id 4") + return errors.New("id 4") } return nil } diff --git a/rpc/server.go b/rpc/server.go index 7e1c7239a33..fa9f5a7bdf8 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -190,7 +190,7 @@ type PeerInfo struct { // Address of client. This will usually contain the IP address and port. RemoteAddr string - // Addditional information for HTTP and WebSocket connections. + // Additional information for HTTP and WebSocket connections. HTTP struct { // Protocol version, i.e. "HTTP/1.1". This is not set for WebSocket. Version string diff --git a/rpc/types.go b/rpc/types.go index f9ff7d0fa4a..fec2d2d7123 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -22,6 +22,7 @@ package rpc import ( "context" "encoding/json" + "errors" "fmt" "math" "math/big" @@ -139,7 +140,7 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { } } if blckNum > math.MaxInt64 { - return fmt.Errorf("block number larger than int64") + return errors.New("block number larger than int64") } *bn = BlockNumber(blckNum) return nil @@ -236,10 +237,10 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { err := json.Unmarshal(data, &e) if err == nil { if e.BlockNumber != nil && e.BlockHash != nil { - return fmt.Errorf("cannot specify both BlockHash and BlockNumber, choose one or the other") + return errors.New("cannot specify both BlockHash and BlockNumber, choose one or the other") } if e.BlockNumber == nil && e.BlockHash == nil { - return fmt.Errorf("at least one of BlockNumber or BlockHash is needed if a dictionary is provided") + return errors.New("at least one of BlockNumber or BlockHash is needed if a dictionary is provided") } bnh.BlockNumber = e.BlockNumber bnh.BlockHash = e.BlockHash @@ -250,7 +251,7 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { blckNum, err := strconv.ParseUint(string(data), 10, 64) if err == nil { if blckNum > math.MaxInt64 { - return fmt.Errorf("blocknumber too high") + return errors.New("blocknumber too high") } bn := BlockNumber(blckNum) bnh.BlockNumber = &bn @@ -295,7 +296,7 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { return err } if blckNum > math.MaxInt64 { - return fmt.Errorf("blocknumber too high") + return errors.New("blocknumber too high") } bn := BlockNumber(blckNum) bnh.BlockNumber = &bn diff --git a/tests/block_test.go b/tests/block_test.go index 3adc20bf4f1..bf6237b8125 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -42,7 +42,7 @@ func TestBlockchain(t *testing.T) { // Currently it fails because SpawnStageHeaders doesn't accept any PoW blocks after PoS transition // TODO(yperbasis): make it work - bt.skipLoad(`^TransitionTests/bcArrowGlacierToMerge/powToPosBlockRejection\.json`) + bt.skipLoad(`^TransitionTests/bcArrowGlacierToParis/powToPosBlockRejection\.json`) bt.skipLoad(`^TransitionTests/bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain\.json`) // TODO: HistoryV3: doesn't produce receipts on execution by design. But maybe we can Generate them on-the fly (on history) and enable this tests @@ -61,8 +61,6 @@ func TestBlockchain(t *testing.T) { } func TestBlockchainEIP(t *testing.T) { - t.Skip("TODO(yperbasis): fix me") - defer log.Root().SetHandler(log.Root().GetHandler()) log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StderrHandler)) @@ -70,6 +68,7 @@ func TestBlockchainEIP(t *testing.T) { // EOF is not supported yet bt.skipLoad(`^StateTests/stEOF/`) + bt.skipLoad(`^StateTests/stEIP2537/`) checkStateRoot := true diff --git a/tests/block_test_util.go b/tests/block_test_util.go index ae056b86913..dc03306d241 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -25,6 +25,7 @@ import ( "context" "encoding/hex" "encoding/json" + "errors" "fmt" "math/big" "reflect" @@ -178,6 +179,7 @@ func (bt *BlockTest) genesis(config *chain.Config) *types.Genesis { BlobGasUsed: bt.json.Genesis.BlobGasUsed, ExcessBlobGas: bt.json.Genesis.ExcessBlobGas, ParentBeaconBlockRoot: bt.json.Genesis.ParentBeaconBlockRoot, + RequestsRoot: bt.json.Genesis.RequestsRoot, } } @@ -244,10 +246,10 @@ func (bt *BlockTest) insertBlocks(m *mock.MockSentry) ([]btBlock, error) { func validateHeader(h *btHeader, h2 *types.Header) error { if h == nil { - return fmt.Errorf("validateHeader: h == nil") + return errors.New("validateHeader: h == nil") } if h2 == nil { - return fmt.Errorf("validateHeader: h2 == nil") + return errors.New("validateHeader: h2 == nil") } if h.Bloom != h2.Bloom { return fmt.Errorf("bloom: want: %x have: %x", h.Bloom, h2.Bloom) diff --git a/tests/gen_stenv.go b/tests/gen_stenv.go index 8909970eb30..ce25eedb587 100644 --- a/tests/gen_stenv.go +++ b/tests/gen_stenv.go @@ -17,13 +17,14 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common0.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - Random *math.HexOrDecimal256 `json:"currentRandom" gencodec:"optional"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee" gencodec:"optional"` + Coinbase common0.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + Random *math.HexOrDecimal256 `json:"currentRandom" gencodec:"optional"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee" gencodec:"optional"` + ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas" gencodec:"optional"` } var enc stEnv enc.Coinbase = common0.UnprefixedAddress(s.Coinbase) @@ -33,19 +34,21 @@ func (s stEnv) MarshalJSON() ([]byte, error) { enc.Number = math.HexOrDecimal64(s.Number) enc.Timestamp = math.HexOrDecimal64(s.Timestamp) enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) + enc.ExcessBlobGas = (*math.HexOrDecimal64)(s.ExcessBlobGas) return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common0.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - Random *math.HexOrDecimal256 `json:"currentRandom" gencodec:"optional"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee" gencodec:"optional"` + Coinbase *common0.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + Random *math.HexOrDecimal256 `json:"currentRandom" gencodec:"optional"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee" gencodec:"optional"` + ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas" gencodec:"optional"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -77,5 +80,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.BaseFee != nil { s.BaseFee = (*big.Int)(dec.BaseFee) } + if dec.ExcessBlobGas != nil { + s.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + } return nil } diff --git a/tests/init_test.go b/tests/init_test.go index f919d195397..441e223e944 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -21,6 +21,7 @@ package tests import ( "encoding/json" + "errors" "fmt" "io" "os" @@ -185,7 +186,7 @@ func (tm *testMatcher) checkFailureWithName(t *testing.T, name string, err error t.Logf("error: %v", err) return nil } - return fmt.Errorf("test succeeded unexpectedly") + return errors.New("test succeeded unexpectedly") } return err } diff --git a/tests/rlp_test_util.go b/tests/rlp_test_util.go index e28094c3b04..7af1af689a9 100644 --- a/tests/rlp_test_util.go +++ b/tests/rlp_test_util.go @@ -62,7 +62,7 @@ func FromHex(s string) ([]byte, error) { func (t *RLPTest) Run() error { outb, err := FromHex(t.Out) if err != nil { - return fmt.Errorf("invalid hex in Out") + return errors.New("invalid hex in Out") } // Handle simple decoding tests with no actual In value. @@ -90,7 +90,7 @@ func checkDecodeInterface(b []byte, isValid bool) error { case isValid && err != nil: return fmt.Errorf("decoding failed: %w", err) case !isValid && err == nil: - return fmt.Errorf("decoding of invalid value succeeded") + return errors.New("decoding of invalid value succeeded") } return nil } diff --git a/tests/state_test.go b/tests/state_test.go index 5a1cc735e67..e931b37da21 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -60,8 +60,15 @@ func TestState(t *testing.T) { st.skipLoad(`.*vmPerformance/loop.*`) //if ethconfig.EnableHistoryV3InTest { //} + // these need to implement eip-7610 + st.skipLoad(`InitCollisionParis.json`) + st.skipLoad(`RevertInCreateInInit_Paris.json`) + st.skipLoad(`RevertInCreateInInitCreate2Paris.json`) + st.skipLoad(`create2collisionStorageParis.json`) + st.skipLoad(`dynamicAccountOverwriteEmpty_Paris.json`) - db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + dirs := datadir.New(t.TempDir()) + db, _ := temporaltest.NewTestDB(t, dirs) st.walk(t, stateTestDir, func(t *testing.T, name string, test *StateTest) { for _, subtest := range test.Subtests() { subtest := subtest @@ -73,7 +80,7 @@ func TestState(t *testing.T) { t.Fatal(err) } defer tx.Rollback() - _, _, err = test.Run(tx, subtest, vmconfig) + _, _, err = test.Run(tx, subtest, vmconfig, dirs) tx.Rollback() if err != nil && len(test.json.Post[subtest.Fork][subtest.Index].ExpectException) > 0 { // Ignore expected errors diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 5b8079692f9..bd438e0707a 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -24,16 +24,17 @@ import ( "encoding/binary" "encoding/hex" "encoding/json" + "errors" "fmt" "math/big" "strconv" "strings" + "github.com/erigontech/erigon-lib/common/datadir" + "github.com/holiman/uint256" "golang.org/x/crypto/sha3" - "github.com/erigontech/erigon-lib/config3" - "github.com/erigontech/erigon-lib/chain" libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/hexutility" @@ -45,6 +46,7 @@ import ( "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/common" "github.com/erigontech/erigon/common/math" + "github.com/erigontech/erigon/consensus/misc" "github.com/erigontech/erigon/core" "github.com/erigontech/erigon/core/state" "github.com/erigontech/erigon/core/tracing" @@ -108,23 +110,25 @@ type stTransaction struct { //go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go type stEnv struct { - Coinbase libcommon.Address `json:"currentCoinbase" gencodec:"required"` - Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` - Random *big.Int `json:"currentRandom" gencodec:"optional"` - GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` - Number uint64 `json:"currentNumber" gencodec:"required"` - Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` - BaseFee *big.Int `json:"currentBaseFee" gencodec:"optional"` + Coinbase libcommon.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` + Random *big.Int `json:"currentRandom" gencodec:"optional"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + BaseFee *big.Int `json:"currentBaseFee" gencodec:"optional"` + ExcessBlobGas *uint64 `json:"currentExcessBlobGas" gencodec:"optional"` } type stEnvMarshaling struct { - Coinbase common.UnprefixedAddress - Difficulty *math.HexOrDecimal256 - Random *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - Number math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 - BaseFee *math.HexOrDecimal256 + Coinbase common.UnprefixedAddress + Difficulty *math.HexOrDecimal256 + Random *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + Number math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 + ExcessBlobGas *math.HexOrDecimal64 } // GetChainConfig takes a fork definition and returns a chain config. @@ -165,8 +169,8 @@ func (t *StateTest) Subtests() []StateSubtest { } // Run executes a specific subtest and verifies the post-state and logs -func (t *StateTest) Run(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Config) (*state.IntraBlockState, libcommon.Hash, error) { - state, root, err := t.RunNoVerify(tx, subtest, vmconfig) +func (t *StateTest) Run(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Config, dirs datadir.Dirs) (*state.IntraBlockState, libcommon.Hash, error) { + state, root, err := t.RunNoVerify(tx, subtest, vmconfig, dirs) if err != nil { return state, types.EmptyRootHash, err } @@ -183,13 +187,13 @@ func (t *StateTest) Run(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Config) (* } // RunNoVerify runs a specific subtest and returns the statedb and post-state root -func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Config) (*state.IntraBlockState, libcommon.Hash, error) { +func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Config, dirs datadir.Dirs) (*state.IntraBlockState, libcommon.Hash, error) { config, eips, err := GetChainConfig(subtest.Fork) if err != nil { return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } vmconfig.ExtraEips = eips - block, _, err := core.GenesisToBlock(t.genesis(config), "", log.Root()) + block, _, err := core.GenesisToBlock(t.genesis(config), dirs, log.Root()) if err != nil { return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } @@ -197,26 +201,21 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co readBlockNr := block.NumberU64() writeBlockNr := readBlockNr + 1 - _, err = MakePreState(&chain.Rules{}, tx, t.json.Pre, readBlockNr, config3.EnableHistoryV4InTest) + _, err = MakePreState(&chain.Rules{}, tx, t.json.Pre, readBlockNr) if err != nil { return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } - var r state.StateReader - var w state.StateWriter - var domains *state2.SharedDomains var txc wrap.TxContainer txc.Tx = tx - if config3.EnableHistoryV4InTest { - domains, err = state2.NewSharedDomains(tx, log.New()) - if err != nil { - return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} - } - defer domains.Close() - txc.Doms = domains + domains, err := state2.NewSharedDomains(tx, log.New()) + if err != nil { + return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } - r = rpchelper.NewLatestStateReader(tx) - w = rpchelper.NewLatestStateWriter(txc, writeBlockNr) + defer domains.Close() + txc.Doms = domains + r := rpchelper.NewLatestStateReader(tx) + w := rpchelper.NewLatestStateWriter(txc, writeBlockNr) statedb := state.New(r) var baseFee *big.Int @@ -253,9 +252,19 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co context.BaseFee = new(uint256.Int) context.BaseFee.SetFromBig(baseFee) } - if t.json.Env.Random != nil { + if t.json.Env.Difficulty != nil { + context.Difficulty = new(big.Int).Set(t.json.Env.Difficulty) + } + if config.IsLondon(0) && t.json.Env.Random != nil { rnd := libcommon.BigToHash(t.json.Env.Random) context.PrevRanDao = &rnd + context.Difficulty = big.NewInt(0) + } + if config.IsCancun(block.NumberU64(), block.Time()) && t.json.Env.ExcessBlobGas != nil { + context.BlobBaseFee, err = misc.GetBlobGasPrice(config, *t.json.Env.ExcessBlobGas) + if err != nil { + return nil, libcommon.Hash{}, err + } } evm := vm.NewEVM(context, txContext, statedb, config, vmconfig) @@ -282,7 +291,7 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co return statedb, libcommon.BytesToHash(rootBytes), nil } -func MakePreState(rules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc, blockNr uint64, histV3 bool) (*state.IntraBlockState, error) { +func MakePreState(rules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc, blockNr uint64) (*state.IntraBlockState, error) { r := rpchelper.NewLatestStateReader(tx) statedb := state.New(r) for addr, a := range accounts { @@ -310,21 +319,18 @@ func MakePreState(rules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc, b } } - var w state.StateWriter - var domains *state2.SharedDomains var txc wrap.TxContainer txc.Tx = tx - if config3.EnableHistoryV4InTest { - var err error - domains, err = state2.NewSharedDomains(tx, log.New()) - if err != nil { - return nil, err - } - defer domains.Close() - defer domains.Flush(context2.Background(), tx) - txc.Doms = domains + + domains, err := state2.NewSharedDomains(tx, log.New()) + if err != nil { + return nil, err } - w = rpchelper.NewLatestStateWriter(txc, blockNr-1) + defer domains.Close() + defer domains.Flush(context2.Background(), tx) + txc.Doms = domains + + w := rpchelper.NewLatestStateWriter(txc, blockNr-1) // Commit and re-open to start with a clean state. if err := statedb.FinalizeTx(rules, w); err != nil { @@ -431,14 +437,17 @@ func toMessage(tx stTransaction, ps stPostState, baseFee *big.Int) (core.Message tx.MaxPriorityFeePerGas = tx.MaxFeePerGas } - feeCap = big.Int(*tx.MaxPriorityFeePerGas) - tipCap = big.Int(*tx.MaxFeePerGas) + //feeCap = big.Int(*tx.MaxPriorityFeePerGas) + //tipCap = big.Int(*tx.MaxFeePerGas) + + tipCap = big.Int(*tx.MaxPriorityFeePerGas) + feeCap = big.Int(*tx.MaxFeePerGas) - gp := math.BigMin(new(big.Int).Add(&feeCap, baseFee), &tipCap) + gp := math.BigMin(new(big.Int).Add(&tipCap, baseFee), &feeCap) gasPrice = math.NewHexOrDecimal256(gp.Int64()) } if gasPrice == nil { - return nil, fmt.Errorf("no gas price provided") + return nil, errors.New("no gas price provided") } gpi := big.Int(*gasPrice) diff --git a/tests/testdata b/tests/testdata index 853b1e03b10..e46e1db503e 160000 --- a/tests/testdata +++ b/tests/testdata @@ -1 +1 @@ -Subproject commit 853b1e03b1078d370614002851ba1ee9803d9fcf +Subproject commit e46e1db503ee2711ad02e1f5b3ea45d43e9cd8cb diff --git a/turbo/adapter/ethapi/api.go b/turbo/adapter/ethapi/api.go index 3791c3db9f0..901489a4f5c 100644 --- a/turbo/adapter/ethapi/api.go +++ b/turbo/adapter/ethapi/api.go @@ -20,6 +20,7 @@ package ethapi import ( + "encoding/hex" "errors" "fmt" "math/big" @@ -98,7 +99,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type if args.GasPrice != nil { overflow := gasPrice.SetFromBig(args.GasPrice.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } gasFeeCap, gasTipCap = gasPrice, gasPrice @@ -109,7 +110,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type gasPrice = new(uint256.Int) overflow := gasPrice.SetFromBig(args.GasPrice.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } gasFeeCap, gasTipCap = gasPrice, gasPrice } else { @@ -118,14 +119,14 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type if args.MaxFeePerGas != nil { overflow := gasFeeCap.SetFromBig(args.MaxFeePerGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } gasTipCap = new(uint256.Int) if args.MaxPriorityFeePerGas != nil { overflow := gasTipCap.SetFromBig(args.MaxPriorityFeePerGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes @@ -137,7 +138,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type if args.MaxFeePerBlobGas != nil { blobFee, overflow := uint256.FromBig(args.MaxFeePerBlobGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.MaxFeePerBlobGas higher than 2^256-1") + return types.Message{}, errors.New("args.MaxFeePerBlobGas higher than 2^256-1") } maxFeePerBlobGas = blobFee } @@ -147,7 +148,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type if args.Value != nil { overflow := value.SetFromBig(args.Value.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.Value higher than 2^256-1") + return types.Message{}, errors.New("args.Value higher than 2^256-1") } } var data []byte @@ -172,11 +173,11 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type // if statDiff is set, all diff will be applied first and then execute the call // message. type Account struct { - Nonce *hexutil.Uint64 `json:"nonce"` - Code *hexutility.Bytes `json:"code"` - Balance **hexutil.Big `json:"balance"` - State *map[libcommon.Hash]uint256.Int `json:"state"` - StateDiff *map[libcommon.Hash]uint256.Int `json:"stateDiff"` + Nonce *hexutil.Uint64 `json:"nonce"` + Code *hexutility.Bytes `json:"code"` + Balance **hexutil.Big `json:"balance"` + State *map[libcommon.Hash]libcommon.Hash `json:"state"` + StateDiff *map[libcommon.Hash]libcommon.Hash `json:"stateDiff"` } func NewRevertError(result *evmtypes.ExecutionResult) *RevertError { @@ -248,14 +249,14 @@ func FormatLogs(logs []logger.StructLog) []StructLogRes { if trace.Stack != nil { stack := make([]string, len(trace.Stack)) for i, stackValue := range trace.Stack { - stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) + stack[i] = hex.EncodeToString(math.PaddedBigBytes(stackValue, 32)) } formatted[index].Stack = &stack } if trace.Memory != nil { memory := make([]string, 0, (len(trace.Memory)+31)/32) for i := 0; i+32 <= len(trace.Memory); i += 32 { - memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) + memory = append(memory, hex.EncodeToString(trace.Memory[i:i+32])) } formatted[index].Memory = &memory } diff --git a/turbo/adapter/ethapi/state_overrides.go b/turbo/adapter/ethapi/state_overrides.go index 7fed8647623..77f782b6183 100644 --- a/turbo/adapter/ethapi/state_overrides.go +++ b/turbo/adapter/ethapi/state_overrides.go @@ -17,6 +17,7 @@ package ethapi import ( + "errors" "fmt" "math/big" @@ -45,7 +46,7 @@ func (overrides *StateOverrides) Override(state *state.IntraBlockState) error { if account.Balance != nil { balance, overflow := uint256.FromBig((*big.Int)(*account.Balance)) if overflow { - return fmt.Errorf("account.Balance higher than 2^256-1") + return errors.New("account.Balance higher than 2^256-1") } state.SetBalance(addr, balance, tracing.BalanceChangeUnspecified) } @@ -54,13 +55,19 @@ func (overrides *StateOverrides) Override(state *state.IntraBlockState) error { } // Replace entire state if caller requires. if account.State != nil { - state.SetStorage(addr, *account.State) + intState := map[libcommon.Hash]uint256.Int{} + for key, value := range *account.State { + intValue := new(uint256.Int).SetBytes32(value.Bytes()) + intState[key] = *intValue + } + state.SetStorage(addr, intState) } // Apply state diff into specified accounts. if account.StateDiff != nil { for key, value := range *account.StateDiff { key := key - state.SetState(addr, &key, value) + intValue := new(uint256.Int).SetBytes32(value.Bytes()) + state.SetState(addr, &key, *intValue) } } } diff --git a/turbo/app/import_cmd.go b/turbo/app/import_cmd.go index 453cabf8727..b9549189c67 100644 --- a/turbo/app/import_cmd.go +++ b/turbo/app/import_cmd.go @@ -150,7 +150,7 @@ func ImportChain(ethereum *eth.Ethereum, chainDB kv.RwDB, fn string, logger log. for batch := 0; ; batch++ { // Load a batch of RLP blocks. if checkInterrupt() { - return fmt.Errorf("interrupted") + return errors.New("interrupted") } i := 0 for ; i < importBatchSize; i++ { @@ -173,7 +173,7 @@ func ImportChain(ethereum *eth.Ethereum, chainDB kv.RwDB, fn string, logger log. } // Import the batch. if checkInterrupt() { - return fmt.Errorf("interrupted") + return errors.New("interrupted") } br, _ := ethereum.BlockIO() diff --git a/turbo/app/init_cmd.go b/turbo/app/init_cmd.go index e329b08ac6c..269af34b13c 100644 --- a/turbo/app/init_cmd.go +++ b/turbo/app/init_cmd.go @@ -20,6 +20,8 @@ import ( "encoding/json" "os" + "github.com/erigontech/erigon-lib/common/datadir" + "github.com/urfave/cli/v2" "github.com/erigontech/erigon-lib/log/v3" @@ -82,7 +84,7 @@ func initGenesis(cliCtx *cli.Context) error { if err != nil { utils.Fatalf("Failed to open database: %v", err) } - _, hash, err := core.CommitGenesisBlock(chaindb, genesis, "", logger) + _, hash, err := core.CommitGenesisBlock(chaindb, genesis, datadir.New(cliCtx.String(utils.DataDirFlag.Name)), logger) if err != nil { utils.Fatalf("Failed to write genesis block: %v", err) } diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go index b9a06951528..a286afce2d7 100644 --- a/turbo/app/snapshots_cmd.go +++ b/turbo/app/snapshots_cmd.go @@ -475,7 +475,7 @@ func doIntegrity(cliCtx *cli.Context) error { chainDB := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen() defer chainDB.Close() - cfg := ethconfig.NewSnapCfg(true, false, true, true) + cfg := ethconfig.NewSnapCfg(false, true, true) _, _, _, blockRetire, agg, clean, err := openSnaps(ctx, cfg, dirs, chainDB, logger) if err != nil { @@ -594,7 +594,7 @@ func doDecompressSpeed(cliCtx *cli.Context) error { } args := cliCtx.Args() if args.Len() < 1 { - return fmt.Errorf("expecting file path as a first argument") + return errors.New("expecting file path as a first argument") } f := args.First() @@ -647,7 +647,7 @@ func doIndicesCommand(cliCtx *cli.Context, dirs datadir.Dirs) error { return err } - cfg := ethconfig.NewSnapCfg(true, false, true, true) + cfg := ethconfig.NewSnapCfg(false, true, true) chainConfig := fromdb.ChainConfig(chainDB) _, _, caplinSnaps, br, agg, clean, err := openSnaps(ctx, cfg, dirs, chainDB, logger) if err != nil { @@ -678,7 +678,7 @@ func doLS(cliCtx *cli.Context, dirs datadir.Dirs) error { chainDB := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen() defer chainDB.Close() - cfg := ethconfig.NewSnapCfg(true, false, true, true) + cfg := ethconfig.NewSnapCfg(false, true, true) blockSnaps, borSnaps, caplinSnaps, _, agg, clean, err := openSnaps(ctx, cfg, dirs, chainDB, logger) if err != nil { return err @@ -767,7 +767,7 @@ func doUncompress(cliCtx *cli.Context) error { args := cliCtx.Args() if args.Len() < 1 { - return fmt.Errorf("expecting file path as a first argument") + return errors.New("expecting file path as a first argument") } f := args.First() @@ -820,7 +820,7 @@ func doCompress(cliCtx *cli.Context) error { args := cliCtx.Args() if args.Len() < 1 { - return fmt.Errorf("expecting file path as a first argument") + return errors.New("expecting file path as a first argument") } f := args.First() dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) @@ -875,7 +875,7 @@ func doRetireCommand(cliCtx *cli.Context, dirs datadir.Dirs) error { db := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen() defer db.Close() - cfg := ethconfig.NewSnapCfg(true, false, true, true) + cfg := ethconfig.NewSnapCfg(false, true, true) blockSnaps, _, caplinSnaps, br, agg, clean, err := openSnaps(ctx, cfg, dirs, db, logger) if err != nil { return err diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 7e0a4e7ad63..618a8afa487 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -213,8 +213,10 @@ var DefaultFlags = []cli.Flag{ &utils.CaplinBackfillingFlag, &utils.CaplinBlobBackfillingFlag, &utils.CaplinDisableBlobPruningFlag, + &utils.CaplinDisableCheckpointSyncFlag, &utils.CaplinArchiveFlag, &utils.CaplinMevRelayUrl, + &utils.CaplinValidatorMonitorFlag, &utils.TrustedSetupFile, &utils.RPCSlowFlag, diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 66c8413bf23..8ca78ab237f 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -274,7 +274,7 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. } // Sanitize prune flag if ctx.String(PruneModeFlag.Name) != "archive" && (ctx.IsSet(PruneBlocksDistanceFlag.Name) || ctx.IsSet(PruneDistanceFlag.Name)) { - utils.Fatalf(fmt.Sprintf("error: --prune.distance and --prune.distance.blocks are only allowed with --prune.mode=archive")) + utils.Fatalf("error: --prune.distance and --prune.distance.blocks are only allowed with --prune.mode=archive") } distance := ctx.Uint64(PruneDistanceFlag.Name) blockDistance := ctx.Uint64(PruneBlocksDistanceFlag.Name) @@ -399,7 +399,7 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { chainId := cfg.NetworkID if *pruneMode != "archive" && (pruneBlockDistance != nil || pruneDistance != nil) { - utils.Fatalf(fmt.Sprintf("error: --prune.distance and --prune.distance.blocks are only allowed with --prune.mode=archive")) + utils.Fatalf("error: --prune.distance and --prune.distance.blocks are only allowed with --prune.mode=archive") } var distance, blockDistance uint64 = math.MaxUint64, math.MaxUint64 if pruneBlockDistance != nil { diff --git a/turbo/cmdtest/test_cmd.go b/turbo/cmdtest/test_cmd.go index 62e1205c297..3a846315ad6 100644 --- a/turbo/cmdtest/test_cmd.go +++ b/turbo/cmdtest/test_cmd.go @@ -27,6 +27,7 @@ import ( "os" "os/exec" "regexp" + "strconv" "strings" "sync" "sync/atomic" @@ -36,8 +37,7 @@ import ( "time" "github.com/erigontech/erigon-lib/log/v3" - - "github.com/docker/docker/pkg/reexec" + "github.com/erigontech/erigon/internal/reexec" ) func NewTestCmd(t *testing.T, data interface{}) *TestCmd { @@ -66,7 +66,7 @@ var id int32 // reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) func (tt *TestCmd) Run(name string, args ...string) { id := atomic.AddInt32(&id, 1) - tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)} + tt.stderr = &testlogger{t: tt.T, name: strconv.FormatUint(uint64(id), 10)} tt.cmd = &exec.Cmd{ Path: reexec.Self(), Args: append([]string{name}, args...), diff --git a/turbo/debug/flags.go b/turbo/debug/flags.go index 80c4537defd..c2946ec1315 100644 --- a/turbo/debug/flags.go +++ b/turbo/debug/flags.go @@ -58,7 +58,7 @@ var ( metricsAddrFlag = cli.StringFlag{ Name: "metrics.addr", Usage: "Prometheus HTTP server listening interface", - Value: "0.0.0.0", + Value: "127.0.0.1", } metricsPortFlag = cli.UintFlag{ Name: "metrics.port", @@ -76,7 +76,7 @@ var ( pprofAddrFlag = cli.StringFlag{ Name: "pprof.addr", Usage: "pprof HTTP server listening interface", - Value: "0.0.0.0", + Value: "127.0.0.1", } cpuprofileFlag = cli.StringFlag{ Name: "pprof.cpuprofile", diff --git a/turbo/engineapi/engine_block_downloader/block_downloader.go b/turbo/engineapi/engine_block_downloader/block_downloader.go index db2b2e29f37..50283851ffc 100644 --- a/turbo/engineapi/engine_block_downloader/block_downloader.go +++ b/turbo/engineapi/engine_block_downloader/block_downloader.go @@ -193,7 +193,7 @@ func (e *EngineBlockDownloader) loadDownloadedHeaders(tx kv.RwTx) (fromBlock uin return saveHeader(tx, &h, h.Hash()) } - foundPow = h.Difficulty.Cmp(libcommon.Big0) != 0 + foundPow = h.Difficulty.Sign() != 0 if foundPow { if (fromHash == libcommon.Hash{}) { fromHash = h.Hash() diff --git a/turbo/engineapi/engine_helpers/fork_validator.go b/turbo/engineapi/engine_helpers/fork_validator.go index bc1cfa73d71..ccaa1025152 100644 --- a/turbo/engineapi/engine_helpers/fork_validator.go +++ b/turbo/engineapi/engine_helpers/fork_validator.go @@ -41,6 +41,13 @@ import ( "github.com/erigontech/erigon/turbo/shards" ) +type BlockTimings [2]time.Duration + +const ( + BlockTimingsValidationIndex = 0 + BlockTimingsFlushExtendingFork = 1 +) + const timingsCacheSize = 16 // the maximum point from the current head, past which side forks are not validated anymore. @@ -70,7 +77,7 @@ type ForkValidator struct { // we want fork validator to be thread safe so let lock sync.Mutex - timingsCache *lru.Cache[libcommon.Hash, []interface{}] + timingsCache *lru.Cache[libcommon.Hash, BlockTimings] } func NewForkValidatorMock(currentHeight uint64) *ForkValidator { @@ -78,7 +85,7 @@ func NewForkValidatorMock(currentHeight uint64) *ForkValidator { if err != nil { panic(err) } - timingsCache, err := lru.New[libcommon.Hash, []interface{}]("timingsCache", timingsCacheSize) + timingsCache, err := lru.New[libcommon.Hash, BlockTimings]("timingsCache", timingsCacheSize) if err != nil { panic(err) } @@ -95,7 +102,7 @@ func NewForkValidator(ctx context.Context, currentHeight uint64, validatePayload panic(err) } - timingsCache, err := lru.New[libcommon.Hash, []interface{}]("timingsCache", timingsCacheSize) + timingsCache, err := lru.New[libcommon.Hash, BlockTimings]("timingsCache", timingsCacheSize) if err != nil { panic(err) } @@ -152,7 +159,8 @@ func (fv *ForkValidator) FlushExtendingFork(tx kv.RwTx, accumulator *shards.Accu } } timings, _ := fv.timingsCache.Get(fv.extendingForkHeadHash) - fv.timingsCache.Add(fv.extendingForkHeadHash, append(timings, "FlushExtendingFork", time.Since(start))) + timings[BlockTimingsFlushExtendingFork] = time.Since(start) + fv.timingsCache.Add(fv.extendingForkHeadHash, timings) fv.extendingForkNotifications.Accumulator.CopyAndReset(accumulator) // Clean extending fork data fv.sharedDom = nil @@ -302,7 +310,7 @@ func (fv *ForkValidator) validateAndStorePayload(txc wrap.TxContainer, header *t return } } - fv.timingsCache.Add(header.Hash(), []interface{}{"BlockValidation", time.Since(start)}) + fv.timingsCache.Add(header.Hash(), BlockTimings{time.Since(start), 0}) latestValidHash = header.Hash() fv.extendingForkHeadHash = header.Hash() @@ -341,11 +349,11 @@ func (fv *ForkValidator) validateAndStorePayload(txc wrap.TxContainer, header *t } // GetTimings returns the timings of the last block validation. -func (fv *ForkValidator) GetTimings(hash libcommon.Hash) []interface{} { +func (fv *ForkValidator) GetTimings(hash libcommon.Hash) BlockTimings { fv.lock.Lock() defer fv.lock.Unlock() if timings, ok := fv.timingsCache.Get(hash); ok { return timings } - return nil + return BlockTimings{} } diff --git a/turbo/engineapi/engine_server.go b/turbo/engineapi/engine_server.go index ce17c257a02..d1df8446eb5 100644 --- a/turbo/engineapi/engine_server.go +++ b/turbo/engineapi/engine_server.go @@ -104,7 +104,7 @@ func (e *EngineServer) Start( txPool txpool.TxpoolClient, mining txpool.MiningClient, ) { - base := jsonrpc.NewBaseApi(filters, stateCache, blockReader, httpConfig.WithDatadir, httpConfig.EvmCallTimeout, engineReader, httpConfig.Dirs) + base := jsonrpc.NewBaseApi(filters, stateCache, blockReader, httpConfig.WithDatadir, httpConfig.EvmCallTimeout, engineReader, httpConfig.Dirs, nil) ethImpl := jsonrpc.NewEthAPI(base, db, eth, txPool, mining, httpConfig.Gascap, httpConfig.Feecap, httpConfig.ReturnDataLimit, httpConfig.AllowUnprotectedTxs, httpConfig.MaxGetProofRewindBlockCount, httpConfig.WebsocketSubscribeLogsChannelSize, e.logger) @@ -128,7 +128,7 @@ func (e *EngineServer) Start( } } -func (s *EngineServer) checkWithdrawalsPresence(number uint64, time uint64, withdrawals []*types.Withdrawal) error { +func (s *EngineServer) checkWithdrawalsPresence(number uint64, time uint64, withdrawals types.Withdrawals) error { if !s.config.IsShanghai(number, time) && withdrawals != nil { return &rpc.InvalidParamsError{Message: "withdrawals before shanghai"} } @@ -212,9 +212,6 @@ func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.Executi requests = append(requests, req.DepositRequests.Requests()...) requests = append(requests, req.WithdrawalRequests.Requests()...) requests = append(requests, req.ConsolidationRequests.Requests()...) - } - - if requests != nil { rh := types.DeriveSha(requests) header.RequestsRoot = &rh } @@ -340,11 +337,11 @@ func (s *EngineServer) getQuickPayloadStatusIfPossible(ctx context.Context, bloc } if s.config.TerminalTotalDifficulty == nil { s.logger.Error(fmt.Sprintf("[%s] not a proof-of-stake chain", prefix)) - return nil, fmt.Errorf("not a proof-of-stake chain") + return nil, errors.New("not a proof-of-stake chain") } if s.hd == nil { - return nil, fmt.Errorf("headerdownload is nil") + return nil, errors.New("headerdownload is nil") } headHash, finalizedHash, safeHash, err := s.chainRW.GetForkChoice(ctx) @@ -453,11 +450,11 @@ func (s *EngineServer) getPayload(ctx context.Context, payloadId uint64, version return nil, errCaplinEnabled } if !s.proposing { - return nil, fmt.Errorf("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") + return nil, errors.New("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") } if s.config.TerminalTotalDifficulty == nil { - return nil, fmt.Errorf("not a proof-of-stake chain") + return nil, errors.New("not a proof-of-stake chain") } s.logger.Debug("[GetPayload] acquiring lock") @@ -555,7 +552,7 @@ func (s *EngineServer) forkchoiceUpdated(ctx context.Context, forkchoiceState *e } if !s.proposing { - return nil, fmt.Errorf("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") + return nil, errors.New("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") } headHeader := s.chainRW.GetHeaderByHash(ctx, forkchoiceState.HeadHash) diff --git a/turbo/engineapi/engine_types/jsonrpc.go b/turbo/engineapi/engine_types/jsonrpc.go index eb6d767731c..dde2ec5aadf 100644 --- a/turbo/engineapi/engine_types/jsonrpc.go +++ b/turbo/engineapi/engine_types/jsonrpc.go @@ -171,6 +171,12 @@ func ConvertRpcBlockToExecutionPayload(payload *execution.Block) *ExecutionPaylo excessBlobGas := *header.ExcessBlobGas res.ExcessBlobGas = (*hexutil.Uint64)(&excessBlobGas) } + if header.RequestsRoot != nil { + reqs, _ := types.UnmarshalRequestsFromBinary(body.Requests) + res.DepositRequests = reqs.Deposits() + res.WithdrawalRequests = reqs.Withdrawals() + res.ConsolidationRequests = reqs.Consolidations() + } return res } @@ -209,6 +215,11 @@ func ConvertPayloadFromRpc(payload *types2.ExecutionPayload) *ExecutionPayload { excessBlobGas := *payload.ExcessBlobGas res.ExcessBlobGas = (*hexutil.Uint64)(&excessBlobGas) } + if payload.Version >= 4 { + res.DepositRequests = ConvertDepositRequestsFromRpc(payload.DepositRequests) + res.WithdrawalRequests = ConvertWithdrawalRequestsFromRpc(payload.WithdrawalRequests) + res.ConsolidationRequests = ConvertConsolidationRequestsFromRpc(payload.ConsolidationRequests) + } return res } @@ -265,6 +276,100 @@ func ConvertWithdrawalsFromRpc(in []*types2.Withdrawal) []*types.Withdrawal { return out } +func ConvertDepositRequestsToRpc(in []*types.DepositRequest) []*types2.DepositRequest { + if in == nil { + return nil + } + out := make([]*types2.DepositRequest, 0, len(in)) + for _, w := range in { + out = append(out, &types2.DepositRequest{ + Pubkey: w.Pubkey[:], + WithdrawalCredentials: gointerfaces.ConvertHashToH256(w.WithdrawalCredentials), + Amount: w.Amount, + Signature: w.Signature[:], + Index: w.Index, + }) + } + return out +} + +func ConvertDepositRequestsFromRpc(in []*types2.DepositRequest) []*types.DepositRequest { + if in == nil { + return nil + } + out := make([]*types.DepositRequest, 0, len(in)) + for _, w := range in { + out = append(out, &types.DepositRequest{ + Pubkey: [48]byte(w.Pubkey), + WithdrawalCredentials: gointerfaces.ConvertH256ToHash(w.WithdrawalCredentials), + Amount: w.Amount, + Signature: [96]byte(w.Signature), + Index: w.Index, + }) + } + return out +} + +func ConvertWithdrawalRequestsToRpc(in []*types.WithdrawalRequest) []*types2.WithdrawalRequest { + if in == nil { + return nil + } + out := make([]*types2.WithdrawalRequest, 0, len(in)) + for _, w := range in { + out = append(out, &types2.WithdrawalRequest{ + SourceAddress: gointerfaces.ConvertAddressToH160(w.SourceAddress), + ValidatorPubkey: w.ValidatorPubkey[:], + Amount: w.Amount, + }) + } + return out +} + +func ConvertWithdrawalRequestsFromRpc(in []*types2.WithdrawalRequest) []*types.WithdrawalRequest { + if in == nil { + return nil + } + out := make([]*types.WithdrawalRequest, 0, len(in)) + for _, w := range in { + out = append(out, &types.WithdrawalRequest{ + SourceAddress: gointerfaces.ConvertH160toAddress(w.SourceAddress), + ValidatorPubkey: [48]byte(w.ValidatorPubkey), + Amount: w.Amount, + }) + } + return out +} + +func ConvertConsolidationRequestsToRpc(in []*types.ConsolidationRequest) []*types2.ConsolidationRequest { + if in == nil { + return nil + } + out := make([]*types2.ConsolidationRequest, 0, len(in)) + for _, w := range in { + out = append(out, &types2.ConsolidationRequest{ + SourceAddress: gointerfaces.ConvertAddressToH160(w.SourceAddress), + SourcePubkey: w.SourcePubKey[:], + TargetPubkey: w.TargetPubKey[:], + }) + } + return out +} + +func ConvertConsolidationRequestsFromRpc(in []*types2.ConsolidationRequest) []*types.ConsolidationRequest { + if in == nil { + return nil + } + out := make([]*types.ConsolidationRequest, 0, len(in)) + for _, c := range in { + out = append(out, &types.ConsolidationRequest{ + SourceAddress: gointerfaces.ConvertH160toAddress(c.SourceAddress), + SourcePubKey: [48]byte(c.SourcePubkey), + TargetPubKey: [48]byte(c.TargetPubkey), + }) + } + return out +} + func ConvertPayloadId(payloadId uint64) *hexutility.Bytes { encodedPayloadId := make([]byte, 8) binary.BigEndian.PutUint64(encodedPayloadId, payloadId) diff --git a/turbo/execution/eth1/block_building.go b/turbo/execution/eth1/block_building.go index 3473f44b8a7..dd2d3b3b832 100644 --- a/turbo/execution/eth1/block_building.go +++ b/turbo/execution/eth1/block_building.go @@ -19,6 +19,7 @@ package eth1 import ( "context" "fmt" + "math" "reflect" "github.com/holiman/uint256" @@ -33,14 +34,15 @@ import ( "github.com/erigontech/erigon/rpc" "github.com/erigontech/erigon/turbo/builder" "github.com/erigontech/erigon/turbo/engineapi/engine_helpers" + "github.com/erigontech/erigon/turbo/engineapi/engine_types" "github.com/erigontech/erigon/turbo/execution/eth1/eth1_utils" ) func (e *EthereumExecutionModule) checkWithdrawalsPresence(time uint64, withdrawals []*types.Withdrawal) error { - if !e.config.IsShanghai(0, time) && withdrawals != nil { + if !e.config.IsShanghai(math.MaxUint64, time) && withdrawals != nil { return &rpc.InvalidParamsError{Message: "withdrawals before shanghai"} } - if e.config.IsShanghai(0, time) && withdrawals == nil { + if e.config.IsShanghai(math.MaxUint64, time) && withdrawals == nil { return &rpc.InvalidParamsError{Message: "missing withdrawals list"} } return nil @@ -58,7 +60,6 @@ func (e *EthereumExecutionModule) evictOldBuilders() { // Missing: NewPayload, AssembleBlock func (e *EthereumExecutionModule) AssembleBlock(ctx context.Context, req *execution.AssembleBlockRequest) (*execution.AssembleBlockResponse, error) { if !e.semaphore.TryAcquire(1) { - e.logger.Warn("ethereumExecutionModule.AssembleBlock: ExecutionStatus_Busy") return &execution.AssembleBlockResponse{ Id: 0, Busy: true, @@ -127,7 +128,6 @@ func blockValue(br *types.BlockWithReceipts, baseFee *uint256.Int) *uint256.Int func (e *EthereumExecutionModule) GetAssembledBlock(ctx context.Context, req *execution.GetAssembledBlockRequest) (*execution.GetAssembledBlockResponse, error) { if !e.semaphore.TryAcquire(1) { - e.logger.Warn("ethereumExecutionModule.GetAssembledBlock: ExecutionStatus_Busy") return &execution.GetAssembledBlockResponse{ Busy: true, }, nil @@ -184,6 +184,13 @@ func (e *EthereumExecutionModule) GetAssembledBlock(ctx context.Context, req *ex payload.BlobGasUsed = header.BlobGasUsed payload.ExcessBlobGas = header.ExcessBlobGas } + reqs := block.Requests() + if reqs != nil { + payload.Version = 4 + payload.DepositRequests = engine_types.ConvertDepositRequestsToRpc(reqs.Deposits()) + payload.WithdrawalRequests = engine_types.ConvertWithdrawalRequestsToRpc(reqs.Withdrawals()) + payload.ConsolidationRequests = engine_types.ConvertConsolidationRequestsToRpc(reqs.Consolidations()) + } blockValue := blockValue(blockWithReceipts, baseFee) diff --git a/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go b/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go index 43104b7fc1f..e0730a84da8 100644 --- a/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go +++ b/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go @@ -18,6 +18,7 @@ package eth1_chain_reader import ( "context" + "errors" "fmt" "math/big" "time" @@ -408,7 +409,7 @@ func (c ChainReaderWriterEth1) AssembleBlock(baseHash libcommon.Hash, attributes return 0, err } if resp.Busy { - return 0, fmt.Errorf("execution data is still syncing") + return 0, errors.New("execution data is still syncing") } return resp.Id, nil } @@ -421,7 +422,7 @@ func (c ChainReaderWriterEth1) GetAssembledBlock(id uint64) (*cltypes.Eth1Block, return nil, nil, nil, err } if resp.Busy { - return nil, nil, nil, fmt.Errorf("execution data is still syncing") + return nil, nil, nil, errors.New("execution data is still syncing") } if resp.Data == nil { return nil, nil, nil, nil diff --git a/turbo/execution/eth1/ethereum_execution.go b/turbo/execution/eth1/ethereum_execution.go index 26997319e2b..04e954d61c4 100644 --- a/turbo/execution/eth1/ethereum_execution.go +++ b/turbo/execution/eth1/ethereum_execution.go @@ -85,6 +85,10 @@ type EthereumExecutionModule struct { doingPostForkchoice atomic.Bool + // metrics for average mgas/sec + avgMgasSec float64 + recordedMgasSec uint64 + execution.UnimplementedExecutionServer } diff --git a/turbo/execution/eth1/forkchoice.go b/turbo/execution/eth1/forkchoice.go index fbb0d720807..6d5673b277e 100644 --- a/turbo/execution/eth1/forkchoice.go +++ b/turbo/execution/eth1/forkchoice.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "runtime" - "slices" "time" "github.com/erigontech/erigon-lib/common" @@ -37,6 +36,7 @@ import ( "github.com/erigontech/erigon/eth/consensuschain" "github.com/erigontech/erigon/eth/stagedsync" "github.com/erigontech/erigon/eth/stagedsync/stages" + "github.com/erigontech/erigon/turbo/engineapi/engine_helpers" ) const startPruneFrom = 1024 @@ -46,14 +46,22 @@ type forkchoiceOutcome struct { err error } -func sendForkchoiceReceiptWithoutWaiting(ch chan forkchoiceOutcome, receipt *execution.ForkChoiceReceipt) { +func sendForkchoiceReceiptWithoutWaiting(ch chan forkchoiceOutcome, receipt *execution.ForkChoiceReceipt, alreadySent bool) { + if alreadySent { + return + } select { case ch <- forkchoiceOutcome{receipt: receipt}: default: } } -func sendForkchoiceErrorWithoutWaiting(ch chan forkchoiceOutcome, err error) { +func sendForkchoiceErrorWithoutWaiting(logger log.Logger, ch chan forkchoiceOutcome, err error, alreadySent bool) { + if alreadySent { + logger.Warn("forkchoice: error received after result was sent", "error", err) + return + } + select { case ch <- forkchoiceOutcome{err: err}: default: @@ -142,13 +150,13 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original sendForkchoiceReceiptWithoutWaiting(outcomeCh, &execution.ForkChoiceReceipt{ LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{}), Status: execution.ExecutionStatus_Busy, - }) + }, false) return } defer e.semaphore.Release(1) //if err := stages2.ProcessFrozenBlocks(ctx, e.db, e.blockReader, e.executionPipeline); err != nil { - // sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + // sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) // e.logger.Warn("ProcessFrozenBlocks", "error", err) // return //} @@ -163,7 +171,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original } return rawdb.WriteLastNewBlockSeen(tx, *num) }); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } @@ -174,7 +182,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original } tx, err := e.db.BeginRwNosync(ctx) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } defer tx.Rollback() @@ -183,23 +191,23 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original finishProgressBefore, err := stages.GetStageProgress(tx, stages.Finish) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } headersProgressBefore, err := stages.GetStageProgress(tx, stages.Headers) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } // Step one, find reconnection point, and mark all of those headers as canonical. fcuHeader, err := e.blockReader.HeaderByHash(ctx, tx, originalBlockHash) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if fcuHeader == nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, fmt.Errorf("forkchoice: block %x not found or was marked invalid", blockHash)) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, fmt.Errorf("forkchoice: block %x not found or was marked invalid", blockHash), false) return } @@ -212,7 +220,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original canonicalHash, err := rawdb.ReadCanonicalHash(tx, fcuHeader.Number.Uint64()) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if fcuHeader.Number.Uint64() > 0 { @@ -221,20 +229,20 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original writeForkChoiceHashes(tx, blockHash, safeHash, finalizedHash) valid, err := e.verifyForkchoiceHashes(ctx, tx, blockHash, finalizedHash, safeHash) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if !valid { sendForkchoiceReceiptWithoutWaiting(outcomeCh, &execution.ForkChoiceReceipt{ LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{}), Status: execution.ExecutionStatus_InvalidForkchoice, - }) + }, false) return } sendForkchoiceReceiptWithoutWaiting(outcomeCh, &execution.ForkChoiceReceipt{ LatestValidHash: gointerfaces.ConvertHashToH256(blockHash), Status: execution.ExecutionStatus_Success, - }) + }, false) return } @@ -243,7 +251,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original sendForkchoiceReceiptWithoutWaiting(outcomeCh, &execution.ForkChoiceReceipt{ LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{}), Status: execution.ExecutionStatus_MissingSegment, - }) + }, false) return } @@ -251,7 +259,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original currentParentNumber := fcuHeader.Number.Uint64() - 1 isCanonicalHash, err := rawdb.IsCanonicalHash(tx, currentParentHash, currentParentNumber) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } // Find such point, and collect all hashes @@ -267,14 +275,14 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original }) currentHeader, err := e.blockReader.Header(ctx, tx, currentParentHash, currentParentNumber) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if currentHeader == nil { sendForkchoiceReceiptWithoutWaiting(outcomeCh, &execution.ForkChoiceReceipt{ LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{}), Status: execution.ExecutionStatus_MissingSegment, - }) + }, false) return } currentParentHash = currentHeader.ParentHash @@ -284,30 +292,30 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original currentParentNumber = currentHeader.Number.Uint64() - 1 isCanonicalHash, err = rawdb.IsCanonicalHash(tx, currentParentHash, currentParentNumber) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } } if err := e.executionPipeline.UnwindTo(currentParentNumber, stagedsync.ForkChoice, tx); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if e.hook != nil { if err = e.hook.BeforeRun(tx, isSynced); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } } // Run the unwind if err := e.executionPipeline.RunUnwind(e.db, wrap.TxContainer{Tx: tx}); err != nil { err = fmt.Errorf("updateForkChoice: %w", err) - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if err := rawdbv3.TxNums.Truncate(tx, currentParentNumber+1); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } // Mark all new canonicals as canonicals @@ -318,70 +326,78 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original h := rawdb.ReadHeader(tx, canonicalSegment.hash, canonicalSegment.number) if b == nil || h == nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, fmt.Errorf("unexpected chain cap: %d", canonicalSegment.number)) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, fmt.Errorf("unexpected chain cap: %d", canonicalSegment.number), false) return } if err := e.engine.VerifyHeader(chainReader, h, true); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if err := e.engine.VerifyUncles(chainReader, h, b.Uncles); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if err := rawdb.WriteCanonicalHash(tx, canonicalSegment.hash, canonicalSegment.number); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } } if len(newCanonicals) > 0 { if err := rawdbv3.TxNums.Truncate(tx, newCanonicals[0].number); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if err := rawdb.AppendCanonicalTxNums(tx, newCanonicals[len(newCanonicals)-1].number); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } } } if isDomainAheadOfBlocks(tx) { if err := tx.Commit(); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } sendForkchoiceReceiptWithoutWaiting(outcomeCh, &execution.ForkChoiceReceipt{ LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{}), Status: execution.ExecutionStatus_TooFarAway, ValidationError: "domain ahead of blocks", - }) + }, false) return } // Set Progress for headers and bodies accordingly. if err := stages.SaveStageProgress(tx, stages.Headers, fcuHeader.Number.Uint64()); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if err := stages.SaveStageProgress(tx, stages.BlockHashes, fcuHeader.Number.Uint64()); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if err := stages.SaveStageProgress(tx, stages.Bodies, fcuHeader.Number.Uint64()); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } if err = rawdb.WriteHeadHeaderHash(tx, blockHash); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, false) return } - if blockHash == e.forkValidator.ExtendingForkHeadHash() { - e.logger.Info("[updateForkchoice] Fork choice update: flushing in-memory state (built by previous newPayload)") + + flushExtendingFork := blockHash == e.forkValidator.ExtendingForkHeadHash() + if flushExtendingFork { + e.logger.Debug("[updateForkchoice] Fork choice update: flushing in-memory state (built by previous newPayload)") + // Send forkchoice early (We already know the fork is valid) + sendForkchoiceReceiptWithoutWaiting(outcomeCh, &execution.ForkChoiceReceipt{ + LatestValidHash: gointerfaces.ConvertHashToH256(blockHash), + Status: execution.ExecutionStatus_Success, + ValidationError: validationError, + }, false) if err := e.forkValidator.FlushExtendingFork(tx, e.accumulator); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, flushExtendingFork) return } } @@ -391,12 +407,10 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original if _, err := e.executionPipeline.Run(e.db, wrap.TxContainer{Tx: tx}, initialCycle, firstCycle); err != nil { err = fmt.Errorf("updateForkChoice: %w", err) e.logger.Warn("Cannot update chain head", "hash", blockHash, "err", err) - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, flushExtendingFork) return } - timings := slices.Clone(e.executionPipeline.PrintTimings()) - // if head hash was set then success otherwise no headHash := rawdb.ReadHeadBlockHash(tx) headNumber := rawdb.ReadHeaderNumber(tx, headHash) @@ -414,28 +428,28 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original } else { valid, err := e.verifyForkchoiceHashes(ctx, tx, blockHash, finalizedHash, safeHash) if err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, flushExtendingFork) return } if !valid { sendForkchoiceReceiptWithoutWaiting(outcomeCh, &execution.ForkChoiceReceipt{ Status: execution.ExecutionStatus_InvalidForkchoice, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{}), - }) + }, flushExtendingFork) return } if err := rawdb.TruncateCanonicalChain(ctx, tx, *headNumber+1); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, flushExtendingFork) return } if err := rawdbv3.TxNums.Truncate(tx, *headNumber+1); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, flushExtendingFork) return } commitStart := time.Now() if err := tx.Commit(); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, flushExtendingFork) return } commitTime := time.Since(commitStart) @@ -444,37 +458,45 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original if err := e.db.View(ctx, func(tx kv.Tx) error { return e.hook.AfterRun(tx, finishProgressBefore) }); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, flushExtendingFork) return } } // force fsync after notifications are sent if err := e.db.Update(ctx, func(tx kv.RwTx) error { - return kv.IncrementKey(tx, kv.DatabaseInfo, []byte("alex")) + return kv.IncrementKey(tx, kv.DatabaseInfo, []byte("chaindata_force")) }); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + sendForkchoiceErrorWithoutWaiting(e.logger, outcomeCh, err, flushExtendingFork) return } - if log { - e.logger.Info("head updated", "number", *headNumber, "hash", headHash) - } - var m runtime.MemStats dbg.ReadMemStats(&m) - timings = append(timings, e.forkValidator.GetTimings(headHash)...) - timings = append(timings, "commit", commitTime, "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys)) - e.logger.Info("Timings (slower than 50ms)", timings...) + blockTimings := e.forkValidator.GetTimings(blockHash) + logArgs := []interface{}{"head", headHash, "hash", blockHash} + if flushExtendingFork { + totalTime := blockTimings[engine_helpers.BlockTimingsValidationIndex] + gasUsedMgas := float64(fcuHeader.GasUsed) / 1e6 + mgasPerSec := gasUsedMgas / totalTime.Seconds() + e.avgMgasSec = ((e.avgMgasSec * (float64(e.recordedMgasSec))) + mgasPerSec) / float64(e.recordedMgasSec+1) + e.recordedMgasSec++ + logArgs = append(logArgs, "number", fcuHeader.Number.Uint64(), "execution", blockTimings[engine_helpers.BlockTimingsValidationIndex], "mgas/s", fmt.Sprintf("%.2f", mgasPerSec), "average mgas/s", fmt.Sprintf("%.2f", e.avgMgasSec)) + } + logArgs = append(logArgs, "commit", commitTime, "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys)) + if log { + e.logger.Info("head updated", logArgs...) + } } if *headNumber >= startPruneFrom { e.runPostForkchoiceInBackground(initialCycle) } + sendForkchoiceReceiptWithoutWaiting(outcomeCh, &execution.ForkChoiceReceipt{ LatestValidHash: gointerfaces.ConvertHashToH256(headHash), Status: status, ValidationError: validationError, - }) + }, flushExtendingFork) } func (e *EthereumExecutionModule) runPostForkchoiceInBackground(initialCycle bool) { diff --git a/turbo/execution/eth1/getters.go b/turbo/execution/eth1/getters.go index a0e5964c2f8..e9865424f2d 100644 --- a/turbo/execution/eth1/getters.go +++ b/turbo/execution/eth1/getters.go @@ -292,7 +292,7 @@ func (e *EthereumExecutionModule) CurrentHeader(ctx context.Context, _ *emptypb. return nil, fmt.Errorf("ethereumExecutionModule.CurrentHeader: blockReader.Header error %w", err) } if h == nil { - return nil, fmt.Errorf("ethereumExecutionModule.CurrentHeader: no current header yet - probabably node not synced yet") + return nil, errors.New("ethereumExecutionModule.CurrentHeader: no current header yet - probabably node not synced yet") } return &execution.GetHeaderResponse{ Header: eth1_utils.HeaderToHeaderRPC(h), diff --git a/turbo/jsonrpc/daemon.go b/turbo/jsonrpc/daemon.go index 3978ba850b3..588bfa01562 100644 --- a/turbo/jsonrpc/daemon.go +++ b/turbo/jsonrpc/daemon.go @@ -17,12 +17,12 @@ package jsonrpc import ( - "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/consensus/parlia" txpool "github.com/erigontech/erigon-lib/gointerfaces/txpoolproto" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/kvcache" + "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cmd/rpcdaemon/cli/httpcfg" "github.com/erigontech/erigon/consensus" "github.com/erigontech/erigon/consensus/clique" @@ -36,9 +36,9 @@ import ( func APIList(db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, filters *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, cfg *httpcfg.HttpCfg, engine consensus.EngineReader, - logger log.Logger, + logger log.Logger, bridgeReader bridgeReader, ) (list []rpc.API) { - base := NewBaseApi(filters, stateCache, blockReader, cfg.WithDatadir, cfg.EvmCallTimeout, engine, cfg.Dirs) + base := NewBaseApi(filters, stateCache, blockReader, cfg.WithDatadir, cfg.EvmCallTimeout, engine, cfg.Dirs, bridgeReader) ethImpl := NewEthAPI(base, db, eth, txPool, mining, cfg.Gascap, cfg.Feecap, cfg.ReturnDataLimit, cfg.AllowUnprotectedTxs, cfg.MaxGetProofRewindBlockCount, cfg.WebsocketSubscribeLogsChannelSize, logger) erigonImpl := NewErigonAPI(base, db, eth) txpoolImpl := NewTxPoolAPI(base, db, txPool) diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go index 8e53809e37c..cad67631c87 100644 --- a/turbo/jsonrpc/debug_api.go +++ b/turbo/jsonrpc/debug_api.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" "github.com/erigontech/erigon/consensus" "github.com/erigontech/erigon/eth/stagedsync" @@ -91,7 +92,7 @@ func (api *PrivateDebugAPIImpl) StorageRangeAt(ctx context.Context, blockHash co number := rawdb.ReadHeaderNumber(tx, blockHash) if number == nil { - return StorageRangeResult{}, fmt.Errorf("block not found") + return StorageRangeResult{}, errors.New("block not found") } minTxNum, err := rawdbv3.TxNums.Min(tx, *number) if err != nil { @@ -112,7 +113,7 @@ func (api *PrivateDebugAPIImpl) AccountRange(ctx context.Context, blockNrOrHash if number, ok := blockNrOrHash.Number(); ok { if number == rpc.PendingBlockNumber { - return state.IteratorDump{}, fmt.Errorf("accountRange for pending block not supported") + return state.IteratorDump{}, errors.New("accountRange for pending block not supported") } if number == rpc.LatestBlockNumber { var err error @@ -343,7 +344,7 @@ func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common. canonicalHash, _ := api._blockReader.CanonicalHash(ctx, tx, *number) isCanonical := canonicalHash == blockHash if !isCanonical { - return nil, fmt.Errorf("block hash is not canonical") + return nil, errors.New("block hash is not canonical") } minTxNum, err := rawdbv3.TxNums.Min(tx, *number) @@ -398,7 +399,7 @@ func (api *PrivateDebugAPIImpl) GetRawHeader(ctx context.Context, blockNrOrHash return nil, err } if header == nil { - return nil, fmt.Errorf("header not found") + return nil, errors.New("header not found") } return rlp.EncodeToBytes(header) } @@ -418,7 +419,7 @@ func (api *PrivateDebugAPIImpl) GetRawBlock(ctx context.Context, blockNrOrHash r return nil, err } if block == nil { - return nil, fmt.Errorf("block not found") + return nil, errors.New("block not found") } return rlp.EncodeToBytes(block) } diff --git a/turbo/jsonrpc/debug_api_test.go b/turbo/jsonrpc/debug_api_test.go index 9e3ba152be8..100295a6a5c 100644 --- a/turbo/jsonrpc/debug_api_test.go +++ b/turbo/jsonrpc/debug_api_test.go @@ -69,7 +69,7 @@ var debugTraceTransactionNoRefundTests = []struct { func TestTraceBlockByNumber(t *testing.T) { m, _, _ := rpcdaemontest.CreateTestSentry(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - baseApi := NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs) + baseApi := NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil) ethApi := NewEthAPI(baseApi, m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) api := NewPrivateDebugAPI(baseApi, m.DB, 0) for _, tt := range debugTraceTransactionTests { diff --git a/turbo/jsonrpc/erigon_receipts.go b/turbo/jsonrpc/erigon_receipts.go index 51bd791d345..48c52f09b2b 100644 --- a/turbo/jsonrpc/erigon_receipts.go +++ b/turbo/jsonrpc/erigon_receipts.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" "github.com/RoaringBitmap/roaring" @@ -52,7 +53,7 @@ func (api *ErigonImpl) GetLogsByHash(ctx context.Context, hash common.Hash) ([][ if block == nil { return nil, nil } - receipts, err := api.getReceipts(ctx, tx, block, block.Body().SendersFromTxs()) + receipts, err := api.getReceipts(ctx, tx, block) if err != nil { return nil, fmt.Errorf("getReceipts error: %w", err) } @@ -130,7 +131,7 @@ func (api *ErigonImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) // {{A}} matches topic A in any positions. Logs with {{B}, {A}} will be matched func (api *ErigonImpl) GetLatestLogs(ctx context.Context, crit filters.FilterCriteria, logOptions filters.LogFilterOptions) (types.ErigonLogs, error) { if logOptions.LogCount != 0 && logOptions.BlockCount != 0 { - return nil, fmt.Errorf("logs count & block count are ambigious") + return nil, errors.New("logs count & block count are ambigious") } if logOptions.LogCount == 0 && logOptions.BlockCount == 0 { logOptions = filters.DefaultLogFilterOptions() @@ -357,7 +358,7 @@ func (api *ErigonImpl) GetBlockReceiptsByBlockHash(ctx context.Context, cannonic if err != nil { return nil, err } - receipts, err := api.getReceipts(ctx, tx, block, block.Body().SendersFromTxs()) + receipts, err := api.getReceipts(ctx, tx, block) if err != nil { return nil, fmt.Errorf("getReceipts error: %w", err) } diff --git a/turbo/jsonrpc/eth_api.go b/turbo/jsonrpc/eth_api.go index 2fcbf8de72c..a00c8b6877d 100644 --- a/turbo/jsonrpc/eth_api.go +++ b/turbo/jsonrpc/eth_api.go @@ -19,7 +19,7 @@ package jsonrpc import ( "bytes" "context" - "fmt" + "errors" "math/big" "sync" "sync/atomic" @@ -127,9 +127,8 @@ type EthAPI interface { type BaseAPI struct { // all caches are thread-safe - stateCache kvcache.Cache - blocksLRU *lru.Cache[common.Hash, *types.Block] - receiptsCache *lru.Cache[common.Hash, []*types.Receipt] + stateCache kvcache.Cache + blocksLRU *lru.Cache[common.Hash, *types.Block] filters *rpchelper.Filters _chainConfig atomic.Pointer[chain.Config] @@ -140,12 +139,14 @@ type BaseAPI struct { _txnReader services.TxnReader _engine consensus.EngineReader + bridgeReader bridgeReader + evmCallTimeout time.Duration dirs datadir.Dirs receiptsGenerator *receipts.Generator } -func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, singleNodeMode bool, evmCallTimeout time.Duration, engine consensus.EngineReader, dirs datadir.Dirs) *BaseAPI { +func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, singleNodeMode bool, evmCallTimeout time.Duration, engine consensus.EngineReader, dirs datadir.Dirs, bridgeReader bridgeReader) *BaseAPI { var ( blocksLRUSize = 128 // ~32Mb receiptsCacheLimit = 32 @@ -159,24 +160,18 @@ func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader serv if err != nil { panic(err) } - receiptsCache, err := lru.New[common.Hash, []*types.Receipt](receiptsCacheLimit) - if err != nil { - panic(err) - } - - receiptsGenerator := receipts.NewGenerator(receiptsCache, blockReader, engine) return &BaseAPI{ filters: f, stateCache: stateCache, blocksLRU: blocksLRU, - receiptsCache: receiptsCache, _blockReader: blockReader, _txnReader: blockReader, evmCallTimeout: evmCallTimeout, _engine: engine, - receiptsGenerator: receiptsGenerator, + receiptsGenerator: receipts.NewGenerator(receiptsCacheLimit, blockReader, engine), dirs: dirs, + bridgeReader: bridgeReader, } } @@ -261,7 +256,7 @@ func (api *BaseAPI) chainConfigWithGenesis(ctx context.Context, tx kv.Tx) (*chai return nil, nil, err } if genesisBlock == nil { - return nil, nil, fmt.Errorf("genesis block not found in database") + return nil, nil, errors.New("genesis block not found in database") } cc, err = rawdb.ReadChainConfig(tx, genesisBlock.Hash()) if err != nil { @@ -320,7 +315,7 @@ func (api *BaseAPI) checkPruneHistory(tx kv.Tx, block uint64) error { } prunedTo := p.History.PruneTo(latest) if block < prunedTo { - return fmt.Errorf("history has been pruned for this block") + return errors.New("history has been pruned for this block") } } @@ -343,6 +338,11 @@ func (api *BaseAPI) pruneMode(tx kv.Tx) (*prune.Mode, error) { return p, nil } +type bridgeReader interface { + Events(ctx context.Context, blockNum uint64) ([]*types.Message, error) + EventTxnLookup(ctx context.Context, borTxHash common.Hash) (uint64, bool, error) +} + // APIImpl is implementation of the EthAPI interface based on remote Db access type APIImpl struct { *BaseAPI @@ -366,6 +366,10 @@ func NewEthAPI(base *BaseAPI, db kv.RoDB, eth rpchelper.ApiBackend, txPool txpoo gascap = uint64(math.MaxUint64 / 2) } + if base.bridgeReader != nil { + logger.Info("starting rpc with polygon bridge") + } + return &APIImpl{ BaseAPI: base, db: db, diff --git a/turbo/jsonrpc/eth_api_test.go b/turbo/jsonrpc/eth_api_test.go index 65b8d48dc67..d8973d8fbc0 100644 --- a/turbo/jsonrpc/eth_api_test.go +++ b/turbo/jsonrpc/eth_api_test.go @@ -42,7 +42,7 @@ import ( func newBaseApiForTest(m *mock.MockSentry) *BaseAPI { stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - return NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs) + return NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil) } func TestGetBalanceChangesInBlock(t *testing.T) { @@ -71,7 +71,7 @@ func TestGetTransactionReceipt(t *testing.T) { m, _, _ := rpcdaemontest.CreateTestSentry(t) db := m.DB stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), db, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) + api := NewEthAPI(NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil), db, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) // Call GetTransactionReceipt for transaction which is not in the database if _, err := api.GetTransactionReceipt(context.Background(), common.Hash{}); err != nil { t.Errorf("calling GetTransactionReceipt with empty hash: %v", err) diff --git a/turbo/jsonrpc/eth_block.go b/turbo/jsonrpc/eth_block.go index fbab1391182..79c0ec6a1fb 100644 --- a/turbo/jsonrpc/eth_block.go +++ b/turbo/jsonrpc/eth_block.go @@ -22,12 +22,11 @@ import ( "math/big" "time" - "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/hexutil" "github.com/erigontech/erigon-lib/common/hexutility" "github.com/erigontech/erigon-lib/kv" + "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/common/math" "github.com/erigontech/erigon/core" @@ -367,7 +366,16 @@ func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockN if chainConfig.Bor != nil { borStateSyncTxHash := bortypes.ComputeBorTxHash(blockNum, blockHash) - _, ok, err := api._blockReader.EventLookup(ctx, tx, borStateSyncTxHash) + + var ok bool + var err error + + if api.bridgeReader != nil { + _, ok, err = api.bridgeReader.EventTxnLookup(ctx, borStateSyncTxHash) + } else { + _, ok, err = api._blockReader.EventLookup(ctx, tx, borStateSyncTxHash) + } + if err != nil { return nil, err } @@ -408,7 +416,15 @@ func (api *APIImpl) GetBlockTransactionCountByHash(ctx context.Context, blockHas if chainConfig.Bor != nil { borStateSyncTxHash := bortypes.ComputeBorTxHash(blockNum, blockHash) - _, ok, err := api._blockReader.EventLookup(ctx, tx, borStateSyncTxHash) + + var ok bool + var err error + + if api.bridgeReader != nil { + _, ok, err = api.bridgeReader.EventTxnLookup(ctx, borStateSyncTxHash) + } else { + _, ok, err = api._blockReader.EventLookup(ctx, tx, borStateSyncTxHash) + } if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_block_test.go b/turbo/jsonrpc/eth_block_test.go index 1d5aaed14e2..0b155d3cf32 100644 --- a/turbo/jsonrpc/eth_block_test.go +++ b/turbo/jsonrpc/eth_block_test.go @@ -104,7 +104,7 @@ func TestGetBlockByNumberWithPendingTag(t *testing.T) { RplBlock: rlpBlock, }) - api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) + api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) b, err := api.GetBlockByNumber(context.Background(), rpc.PendingBlockNumber, false) if err != nil { t.Errorf("error getting block number with pending tag: %s", err) diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index e638eae19b8..e5e226a6da7 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -22,9 +22,6 @@ import ( "fmt" "math/big" - "github.com/holiman/uint256" - "google.golang.org/grpc" - libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/hexutil" "github.com/erigontech/erigon-lib/common/hexutility" @@ -33,6 +30,8 @@ import ( "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" types2 "github.com/erigontech/erigon-lib/types" + "github.com/holiman/uint256" + "google.golang.org/grpc" "github.com/erigontech/erigon/core" "github.com/erigontech/erigon/core/state" @@ -202,7 +201,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs stateReader := rpchelper.CreateLatestCachedStateReader(cacheView, dbtx) state := state.New(stateReader) if state == nil { - return 0, fmt.Errorf("can't get the current state") + return 0, errors.New("can't get the current state") } balance := state.GetBalance(*args.From) // from can't be nil @@ -254,7 +253,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs } } if block == nil { - return 0, fmt.Errorf("could not find latest block in cache or db") + return 0, errors.New("could not find latest block in cache or db") } stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, latestCanBlockNumber, isLatest, 0, api.stateCache, chainConfig.ChainName) @@ -332,7 +331,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs // GetProof is partially implemented; no Storage proofs, and proofs must be for // blocks within maxGetProofRewindBlockCount blocks of the head. func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, storageKeys []libcommon.Hash, blockNrOrHash rpc.BlockNumberOrHash) (*accounts.AccProofResult, error) { - return nil, fmt.Errorf("not supported by Erigon3") + return nil, errors.New("not supported by Erigon3") /* tx, err := api.db.BeginRo(ctx) if err != nil { diff --git a/turbo/jsonrpc/eth_callMany.go b/turbo/jsonrpc/eth_callMany.go index 9b2bab097ec..8e36057e34c 100644 --- a/turbo/jsonrpc/eth_callMany.go +++ b/turbo/jsonrpc/eth_callMany.go @@ -19,6 +19,7 @@ package jsonrpc import ( "context" "encoding/hex" + "errors" "fmt" "math/big" "time" @@ -108,7 +109,7 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont return nil, err } if len(bundles) == 0 { - return nil, fmt.Errorf("empty bundles") + return nil, errors.New("empty bundles") } empty := true for _, bundle := range bundles { @@ -118,7 +119,7 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont } if empty { - return nil, fmt.Errorf("empty bundles") + return nil, errors.New("empty bundles") } defer func(start time.Time) { log.Trace("Executing EVM callMany finished", "runtime", time.Since(start)) }(time.Now()) diff --git a/turbo/jsonrpc/eth_callMany_test.go b/turbo/jsonrpc/eth_callMany_test.go index b6647929752..4c4f3947bc9 100644 --- a/turbo/jsonrpc/eth_callMany_test.go +++ b/turbo/jsonrpc/eth_callMany_test.go @@ -101,7 +101,7 @@ func TestCallMany(t *testing.T) { db := contractBackend.DB() engine := contractBackend.Engine() - api := NewEthAPI(NewBaseApi(nil, stateCache, contractBackend.BlockReader(), false, rpccfg.DefaultEvmCallTimeout, engine, datadir.New(t.TempDir())), db, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) + api := NewEthAPI(NewBaseApi(nil, stateCache, contractBackend.BlockReader(), false, rpccfg.DefaultEvmCallTimeout, engine, datadir.New(t.TempDir()), nil), db, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) callArgAddr1 := ethapi.CallArgs{From: &address, To: &tokenAddr, Nonce: &nonce, MaxPriorityFeePerGas: (*hexutil.Big)(big.NewInt(1e9)), diff --git a/turbo/jsonrpc/eth_call_test.go b/turbo/jsonrpc/eth_call_test.go index ed4d1c59d96..a6f0ff87c25 100644 --- a/turbo/jsonrpc/eth_call_test.go +++ b/turbo/jsonrpc/eth_call_test.go @@ -59,7 +59,7 @@ func TestEstimateGas(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mock.Mock(t)) mining := txpool.NewMiningClient(conn) ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, nil, mining, func() {}, m.Log) - api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) + api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) var from = libcommon.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") var to = libcommon.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") if _, err := api.EstimateGas(context.Background(), ðapi.CallArgs{ @@ -73,7 +73,7 @@ func TestEstimateGas(t *testing.T) { func TestEthCallNonCanonical(t *testing.T) { m, _, _ := rpcdaemontest.CreateTestSentry(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) + api := NewEthAPI(NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) var from = libcommon.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") var to = libcommon.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") if _, err := api.Call(context.Background(), ethapi.CallArgs{ diff --git a/turbo/jsonrpc/eth_filters_test.go b/turbo/jsonrpc/eth_filters_test.go index a62fc1ee312..cde6732ea50 100644 --- a/turbo/jsonrpc/eth_filters_test.go +++ b/turbo/jsonrpc/eth_filters_test.go @@ -47,7 +47,7 @@ func TestNewFilters(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mock.Mock(t)) mining := txpool.NewMiningClient(conn) ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, nil, mining, func() {}, m.Log) - api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) + api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) ptf, err := api.NewPendingTransactionFilter(ctx) assert.Nil(err) diff --git a/turbo/jsonrpc/eth_mining_test.go b/turbo/jsonrpc/eth_mining_test.go index da09320fbd7..f51e3c178cf 100644 --- a/turbo/jsonrpc/eth_mining_test.go +++ b/turbo/jsonrpc/eth_mining_test.go @@ -44,7 +44,7 @@ func TestPendingBlock(t *testing.T) { ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, nil, mining, func() {}, m.Log) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) engine := ethash.NewFaker() - api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, engine, m.Dirs), nil, nil, nil, mining, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) + api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, engine, m.Dirs, nil), nil, nil, nil, mining, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) expect := uint64(12345) b, err := rlp.EncodeToBytes(types.NewBlockWithHeader(&types.Header{Number: new(big.Int).SetUint64(expect)})) require.NoError(t, err) diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go index a8c1fc52a46..ae22c6f50e9 100644 --- a/turbo/jsonrpc/eth_receipts.go +++ b/turbo/jsonrpc/eth_receipts.go @@ -42,13 +42,13 @@ import ( ) // getReceipts - checking in-mem cache, or else fallback to db, or else fallback to re-exec of block to re-gen receipts -func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, block *types.Block, senders []common.Address) (types.Receipts, error) { +func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, block *types.Block) (types.Receipts, error) { chainConfig, err := api.chainConfig(ctx, tx) if err != nil { return nil, err } - return api.receiptsGenerator.GetReceipts(ctx, chainConfig, tx, block, senders) + return api.receiptsGenerator.GetReceipts(ctx, chainConfig, tx, block) } // GetLogs implements eth_getLogs. Returns an array of logs matching a given filter object. @@ -339,10 +339,9 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end log.BlockHash = blockHash log.TxHash = txn.Hash() } - filteredErigonLogs := make(types.ErigonLogs, len(rawLogs)) //TODO: maybe Logs by default and enreach them with - for i, filteredLog := range filtered { - filteredErigonLogs[i] = &types.ErigonLog{ + for _, filteredLog := range filtered { + logs = append(logs, &types.ErigonLog{ Address: filteredLog.Address, Topics: filteredLog.Topics, Data: filteredLog.Data, @@ -353,9 +352,8 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end Index: filteredLog.Index, Removed: filteredLog.Removed, Timestamp: timestamp, - } + }) } - logs = append(logs, filteredErigonLogs...) } //stats := api._agg.GetAndResetStats() @@ -428,7 +426,11 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, txnHash common.Ha } // Private API returns 0 if transaction is not found. if blockNum == 0 && cc.Bor != nil { - blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txnHash) + if api.bridgeReader != nil { + blockNum, ok, err = api.bridgeReader.EventTxnLookup(ctx, txnHash) + } else { + blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txnHash) + } if err != nil { return nil, err } @@ -463,7 +465,7 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, txnHash common.Ha borTx = bortypes.NewBorTransaction() } } - receipts, err := api.getReceipts(ctx, tx, block, block.Body().SendersFromTxs()) + receipts, err := api.getReceipts(ctx, tx, block) if err != nil { return nil, fmt.Errorf("getReceipts error: %w", err) } @@ -509,7 +511,7 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, numberOrHash rpc.Block if err != nil { return nil, err } - receipts, err := api.getReceipts(ctx, tx, block, block.Body().SendersFromTxs()) + receipts, err := api.getReceipts(ctx, tx, block) if err != nil { return nil, fmt.Errorf("getReceipts error: %w", err) } diff --git a/turbo/jsonrpc/eth_system.go b/turbo/jsonrpc/eth_system.go index 30c0a4b9873..fa1f10302fa 100644 --- a/turbo/jsonrpc/eth_system.go +++ b/turbo/jsonrpc/eth_system.go @@ -244,7 +244,7 @@ func (b *GasPriceOracleBackend) ChainConfig() *chain.Config { return cc } func (b *GasPriceOracleBackend) GetReceipts(ctx context.Context, block *types.Block) (types.Receipts, error) { - return b.baseApi.getReceipts(ctx, b.tx, block, nil) + return b.baseApi.getReceipts(ctx, b.tx, block) } func (b *GasPriceOracleBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { return nil, nil diff --git a/turbo/jsonrpc/eth_txs.go b/turbo/jsonrpc/eth_txs.go index c8dab63955d..878b72e0498 100644 --- a/turbo/jsonrpc/eth_txs.go +++ b/turbo/jsonrpc/eth_txs.go @@ -55,7 +55,12 @@ func (api *APIImpl) GetTransactionByHash(ctx context.Context, txnHash common.Has } // Private API returns 0 if transaction is not found. if blockNum == 0 && chainConfig.Bor != nil { - blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txnHash) + if api.bridgeReader != nil { + blockNum, ok, err = api.bridgeReader.EventTxnLookup(ctx, txnHash) + } else { + blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txnHash) + } + if err != nil { return nil, err } diff --git a/turbo/jsonrpc/gen_traces_test.go b/turbo/jsonrpc/gen_traces_test.go index 0af519aa32f..9463ee73ee9 100644 --- a/turbo/jsonrpc/gen_traces_test.go +++ b/turbo/jsonrpc/gen_traces_test.go @@ -45,7 +45,7 @@ Testing tracing RPC API by generating patters of contracts invoking one another func TestGeneratedDebugApi(t *testing.T) { m := rpcdaemontest.CreateTestSentryForTraces(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - baseApi := NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs) + baseApi := NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil) api := NewPrivateDebugAPI(baseApi, m.DB, 0) var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) @@ -132,7 +132,7 @@ func TestGeneratedDebugApi(t *testing.T) { func TestGeneratedTraceApi(t *testing.T) { m := rpcdaemontest.CreateTestSentryForTraces(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - baseApi := NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs) + baseApi := NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil) api := NewTraceAPI(baseApi, m.DB, &httpcfg.HttpCfg{}) traces, err := api.Block(context.Background(), rpc.BlockNumber(1), new(bool), nil) if err != nil { diff --git a/turbo/jsonrpc/graphql_api.go b/turbo/jsonrpc/graphql_api.go index 5d51cd74938..ae95a4640b9 100644 --- a/turbo/jsonrpc/graphql_api.go +++ b/turbo/jsonrpc/graphql_api.go @@ -72,7 +72,7 @@ func (api *GraphQLAPIImpl) GetBlockDetails(ctx context.Context, blockNumber rpc. } defer tx.Rollback() - block, senders, err := api.getBlockWithSenders(ctx, blockNumber, tx) + block, _, err := api.getBlockWithSenders(ctx, blockNumber, tx) if err != nil { return nil, err } @@ -90,7 +90,7 @@ func (api *GraphQLAPIImpl) GetBlockDetails(ctx context.Context, blockNumber rpc. return nil, err } - receipts, err := api.getReceipts(ctx, tx, block, senders) + receipts, err := api.getReceipts(ctx, tx, block) if err != nil { return nil, fmt.Errorf("getReceipts error: %w", err) } diff --git a/turbo/jsonrpc/otterscan_api.go b/turbo/jsonrpc/otterscan_api.go index 7c1960fa5ef..a7f941e7a98 100644 --- a/turbo/jsonrpc/otterscan_api.go +++ b/turbo/jsonrpc/otterscan_api.go @@ -333,7 +333,7 @@ func delegateBlockFees(ctx context.Context, tx kv.Tx, block *types.Block, sender totalFees := big.NewInt(0) for _, receipt := range receipts { txn := block.Transactions()[receipt.TransactionIndex] - effectiveGasPrice := uint64(0) + var effectiveGasPrice uint64 if !chainConfig.IsLondon(block.NumberU64()) { effectiveGasPrice = txn.GetPrice().Uint64() } else { @@ -379,7 +379,7 @@ func (api *OtterscanAPIImpl) GetBlockTransactions(ctx context.Context, number rp } defer tx.Rollback() - b, senders, err := api.getBlockWithSenders(ctx, number, tx) + b, _, err := api.getBlockWithSenders(ctx, number, tx) if err != nil { return nil, err } @@ -398,7 +398,7 @@ func (api *OtterscanAPIImpl) GetBlockTransactions(ctx context.Context, number rp } // Receipts - receipts, err := api.getReceipts(ctx, tx, b, senders) + receipts, err := api.getReceipts(ctx, tx, b) if err != nil { return nil, fmt.Errorf("getReceipts error: %v", err) } diff --git a/turbo/jsonrpc/otterscan_block_details.go b/turbo/jsonrpc/otterscan_block_details.go index bd1d16d8011..e96a21eb3d5 100644 --- a/turbo/jsonrpc/otterscan_block_details.go +++ b/turbo/jsonrpc/otterscan_block_details.go @@ -86,7 +86,7 @@ func (api *OtterscanAPIImpl) getBlockDetailsImpl(ctx context.Context, tx kv.Tx, if err != nil { return nil, err } - receipts, err := api.getReceipts(ctx, tx, b, senders) + receipts, err := api.getReceipts(ctx, tx, b) if err != nil { return nil, fmt.Errorf("getReceipts error: %v", err) } diff --git a/turbo/jsonrpc/otterscan_search_trace.go b/turbo/jsonrpc/otterscan_search_trace.go index 18e75cf2df2..6191c50ad55 100644 --- a/turbo/jsonrpc/otterscan_search_trace.go +++ b/turbo/jsonrpc/otterscan_search_trace.go @@ -93,7 +93,7 @@ func (api *OtterscanAPIImpl) traceBlock(dbtx kv.Tx, ctx context.Context, blockNu } engine := api.engine() - blockReceipts, err := api.getReceipts(ctx, dbtx, block, block.Body().SendersFromTxs()) + blockReceipts, err := api.getReceipts(ctx, dbtx, block) if err != nil { return false, nil, err } diff --git a/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce_test.go b/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce_test.go index 75501512c4b..2cfc7fa2781 100644 --- a/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce_test.go +++ b/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce_test.go @@ -19,15 +19,16 @@ package jsonrpc import ( "testing" + "github.com/stretchr/testify/require" + "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon/cmd/rpcdaemon/rpcdaemontest" "github.com/erigontech/erigon/rpc/rpccfg" - "github.com/stretchr/testify/require" ) func TestGetTransactionBySenderAndNonce(t *testing.T) { m, _, _ := rpcdaemontest.CreateTestSentry(t) - api := NewOtterscanAPI(NewBaseApi(nil, nil, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, 25) + api := NewOtterscanAPI(NewBaseApi(nil, nil, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil), m.DB, 25) addr := common.HexToAddress("0x537e697c7ab75a26f9ecf0ce810e3154dfcaaf44") expectCreator := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") diff --git a/turbo/jsonrpc/overlay_api.go b/turbo/jsonrpc/overlay_api.go index e48579170cb..39943891c23 100644 --- a/turbo/jsonrpc/overlay_api.go +++ b/turbo/jsonrpc/overlay_api.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" "runtime" "sync" @@ -120,7 +121,7 @@ func (api *OverlayAPIImpl) CallConstructor(ctx context.Context, address common.A } if !ok { - return nil, fmt.Errorf("contract construction txn not found") + return nil, errors.New("contract construction txn not found") } err = api.BaseAPI.checkPruneHistory(tx, blockNum) @@ -144,7 +145,7 @@ func (api *OverlayAPIImpl) CallConstructor(ctx context.Context, address common.A } if transactionIndex == -1 { - return nil, fmt.Errorf("could not find txn hash") + return nil, errors.New("could not find txn hash") } replayTransactions = block.Transactions()[:transactionIndex] @@ -479,7 +480,7 @@ func (api *OverlayAPIImpl) replayBlock(ctx context.Context, blockNum uint64, sta gp := new(core.GasPool).AddGas(math.MaxUint64).AddBlobGas(math.MaxUint64) vmConfig := vm.Config{Debug: false} evm = vm.NewEVM(blockCtx, evmtypes.TxContext{}, statedb, chainConfig, vmConfig) - receipts, err := api.getReceipts(ctx, tx, block, block.Body().SendersFromTxs()) + receipts, err := api.getReceipts(ctx, tx, block) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/parity_api.go b/turbo/jsonrpc/parity_api.go index c47c1be644a..6ade56eed38 100644 --- a/turbo/jsonrpc/parity_api.go +++ b/turbo/jsonrpc/parity_api.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -70,7 +71,7 @@ func (api *ParityAPIImpl) ListStorageKeys(ctx context.Context, account libcommon if err != nil { return nil, err } else if a == nil { - return nil, fmt.Errorf("acc not found") + return nil, errors.New("acc not found") } bn := rawdb.ReadCurrentBlockNumber(tx) diff --git a/turbo/jsonrpc/parity_api_test.go b/turbo/jsonrpc/parity_api_test.go index 721dcb80803..0d91fbc47bb 100644 --- a/turbo/jsonrpc/parity_api_test.go +++ b/turbo/jsonrpc/parity_api_test.go @@ -18,12 +18,13 @@ package jsonrpc import ( "context" - "fmt" + "errors" "testing" - "github.com/erigontech/erigon/rpc/rpccfg" "github.com/stretchr/testify/assert" + "github.com/erigontech/erigon/rpc/rpccfg" + libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/hexutility" @@ -37,7 +38,7 @@ var latestBlock = rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) func TestParityAPIImpl_ListStorageKeys_NoOffset(t *testing.T) { assert := assert.New(t) m, _, _ := rpcdaemontest.CreateTestSentry(t) - baseApi := NewBaseApi(nil, nil, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs) + baseApi := NewBaseApi(nil, nil, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil) api := NewParityAPIImpl(baseApi, m.DB) answers := []string{ "0000000000000000000000000000000000000000000000000000000000000000", @@ -122,5 +123,5 @@ func TestParityAPIImpl_ListStorageKeys_AccNotFound(t *testing.T) { api := NewParityAPIImpl(newBaseApiForTest(m), m.DB) addr := libcommon.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcaef") _, err := api.ListStorageKeys(context.Background(), addr, 2, nil, latestBlock) - assert.Error(err, fmt.Errorf("acc not found")) + assert.Error(err, errors.New("acc not found")) } diff --git a/turbo/jsonrpc/receipts/receipts_generator.go b/turbo/jsonrpc/receipts/receipts_generator.go index a69bad72115..f3bc38bd228 100644 --- a/turbo/jsonrpc/receipts/receipts_generator.go +++ b/turbo/jsonrpc/receipts/receipts_generator.go @@ -11,7 +11,6 @@ import ( "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/consensus" "github.com/erigontech/erigon/core" - "github.com/erigontech/erigon/core/rawdb" "github.com/erigontech/erigon/core/state" "github.com/erigontech/erigon/core/types" "github.com/erigontech/erigon/core/vm" @@ -25,8 +24,13 @@ type Generator struct { engine consensus.EngineReader } -func NewGenerator(receiptsCache *lru.Cache[common.Hash, []*types.Receipt], blockReader services.FullBlockReader, +func NewGenerator(cacheSize int, blockReader services.FullBlockReader, engine consensus.EngineReader) *Generator { + receiptsCache, err := lru.New[common.Hash, []*types.Receipt](cacheSize) + if err != nil { + panic(err) + } + return &Generator{ receiptsCache: receiptsCache, blockReader: blockReader, @@ -34,16 +38,11 @@ func NewGenerator(receiptsCache *lru.Cache[common.Hash, []*types.Receipt], block } } -func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Tx, block *types.Block, senders []common.Address) (types.Receipts, error) { +func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Tx, block *types.Block) (types.Receipts, error) { if receipts, ok := g.receiptsCache.Get(block.Hash()); ok { return receipts, nil } - if receipts := rawdb.ReadReceipts(tx, block, senders); receipts != nil { - g.receiptsCache.Add(block.Hash(), receipts) - return receipts, nil - } - engine := g.engine _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, cfg, g.blockReader, tx, 0) diff --git a/turbo/jsonrpc/send_transaction_test.go b/turbo/jsonrpc/send_transaction_test.go index 5a11dc667f0..7fe046d7f2a 100644 --- a/turbo/jsonrpc/send_transaction_test.go +++ b/turbo/jsonrpc/send_transaction_test.go @@ -54,7 +54,7 @@ import ( func newBaseApiForTest(m *mock.MockSentry) *jsonrpc.BaseAPI { stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - return jsonrpc.NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs) + return jsonrpc.NewBaseApi(nil, stateCache, m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil) } // Do 1 step to start txPool diff --git a/turbo/jsonrpc/trace_adhoc.go b/turbo/jsonrpc/trace_adhoc.go index 42b316899bc..543e0d64aa8 100644 --- a/turbo/jsonrpc/trace_adhoc.go +++ b/turbo/jsonrpc/trace_adhoc.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "math" "strings" @@ -178,7 +179,7 @@ func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) if args.GasPrice != nil { overflow := gasPrice.SetFromBig(args.GasPrice.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } gasFeeCap, gasTipCap = gasPrice, gasPrice @@ -189,7 +190,7 @@ func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) // User specified the legacy gas field, convert to 1559 gas typing gasPrice, overflow = uint256.FromBig(args.GasPrice.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } gasFeeCap, gasTipCap = gasPrice, gasPrice } else { @@ -198,14 +199,14 @@ func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) if args.MaxFeePerGas != nil { overflow := gasFeeCap.SetFromBig(args.MaxFeePerGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } gasTipCap = new(uint256.Int) if args.MaxPriorityFeePerGas != nil { overflow := gasTipCap.SetFromBig(args.MaxPriorityFeePerGas.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + return types.Message{}, errors.New("args.GasPrice higher than 2^256-1") } } // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes @@ -226,7 +227,7 @@ func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) if args.Value != nil { overflow := value.SetFromBig(args.Value.ToInt()) if overflow { - return types.Message{}, fmt.Errorf("args.Value higher than 2^256-1") + return types.Message{}, errors.New("args.Value higher than 2^256-1") } } var data []byte @@ -777,7 +778,12 @@ func (api *TraceAPIImpl) ReplayTransaction(ctx context.Context, txHash libcommon } // otherwise this may be a bor state sync transaction - check - blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txHash) + if api.bridgeReader != nil { + blockNum, ok, err = api.bridgeReader.EventTxnLookup(ctx, txHash) + } else { + blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, txHash) + } + if err != nil { return nil, err } @@ -1009,7 +1015,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp var overflow bool baseFee, overflow = uint256.FromBig(header.BaseFee) if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") + return nil, errors.New("header.BaseFee uint256 overflow") } } msg, err := args.ToMessage(api.gasCap, baseFee) @@ -1075,7 +1081,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa return nil, err } if tok != json.Delim('[') { - return nil, fmt.Errorf("expected array of [callparam, tracetypes]") + return nil, errors.New("expected array of [callparam, tracetypes]") } for dec.More() { tok, err = dec.Token() @@ -1083,7 +1089,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa return nil, err } if tok != json.Delim('[') { - return nil, fmt.Errorf("expected [callparam, tracetypes]") + return nil, errors.New("expected [callparam, tracetypes]") } callParams = append(callParams, TraceCallParam{}) args := &callParams[len(callParams)-1] @@ -1098,7 +1104,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa return nil, err } if tok != json.Delim(']') { - return nil, fmt.Errorf("expected end of [callparam, tracetypes]") + return nil, errors.New("expected end of [callparam, tracetypes]") } } tok, err = dec.Token() @@ -1106,7 +1112,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa return nil, err } if tok != json.Delim(']') { - return nil, fmt.Errorf("expected end of array of [callparam, tracetypes]") + return nil, errors.New("expected end of array of [callparam, tracetypes]") } var baseFee *uint256.Int if parentNrOrHash == nil { @@ -1131,7 +1137,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa var overflow bool baseFee, overflow = uint256.FromBig(parentHeader.BaseFee) if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") + return nil, errors.New("header.BaseFee uint256 overflow") } } msgs := make([]types.Message, len(callParams)) diff --git a/turbo/jsonrpc/trace_adhoc_test.go b/turbo/jsonrpc/trace_adhoc_test.go index 1d3adcdaaf0..082c38d388a 100644 --- a/turbo/jsonrpc/trace_adhoc_test.go +++ b/turbo/jsonrpc/trace_adhoc_test.go @@ -194,7 +194,7 @@ func TestOeTracer(t *testing.T) { require.NoError(t, err) defer dbTx.Rollback() - statedb, _ := tests.MakePreState(rules, dbTx, test.Genesis.Alloc, context.BlockNumber, m.HistoryV3) + statedb, _ := tests.MakePreState(rules, dbTx, test.Genesis.Alloc, context.BlockNumber) msg, err := tx.AsMessage(*signer, (*big.Int)(test.Context.BaseFee), rules) require.NoError(t, err) txContext := core.NewEVMTxContext(msg) diff --git a/turbo/jsonrpc/trace_filtering.go b/turbo/jsonrpc/trace_filtering.go index c3e87683019..e39b031fea7 100644 --- a/turbo/jsonrpc/trace_filtering.go +++ b/turbo/jsonrpc/trace_filtering.go @@ -74,7 +74,11 @@ func (api *TraceAPIImpl) Transaction(ctx context.Context, txHash common.Hash, ga } // otherwise this may be a bor state sync transaction - check - blockNumber, ok, err = api._blockReader.EventLookup(ctx, tx, txHash) + if api.bridgeReader != nil { + blockNumber, ok, err = api.bridgeReader.EventTxnLookup(ctx, txHash) + } else { + blockNumber, ok, err = api._blockReader.EventLookup(ctx, tx, txHash) + } if err != nil { return nil, err } @@ -325,7 +329,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, gas toBlock = uint64(*req.ToBlock) } if fromBlock > toBlock { - return fmt.Errorf("invalid parameters: fromBlock cannot be greater than toBlock") + return errors.New("invalid parameters: fromBlock cannot be greater than toBlock") } return api.filterV3(ctx, dbtx.(kv.TemporalTx), fromBlock, toBlock, req, stream, *gasBailOut, traceConfig) @@ -425,7 +429,7 @@ func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.TemporalTx, fromB if !isPos && chainConfig.TerminalTotalDifficulty != nil { header := lastHeader - isPos = header.Difficulty.Cmp(common.Big0) == 0 || header.Difficulty.Cmp(chainConfig.TerminalTotalDifficulty) >= 0 + isPos = header.Difficulty.Sign() == 0 || header.Difficulty.Cmp(chainConfig.TerminalTotalDifficulty) >= 0 } lastBlockHash = lastHeader.Hash() @@ -726,7 +730,16 @@ func (api *TraceAPIImpl) callManyTransactions( // check if this block has state sync txn blockHash := block.Hash() borStateSyncTxnHash = bortypes.ComputeBorTxHash(blockNumber, blockHash) - _, ok, err := api._blockReader.EventLookup(ctx, dbtx, borStateSyncTxnHash) + + var ok bool + var err error + + if api.bridgeReader != nil { + _, ok, err = api.bridgeReader.EventTxnLookup(ctx, borStateSyncTxnHash) + + } else { + _, ok, err = api._blockReader.EventLookup(ctx, dbtx, borStateSyncTxnHash) + } if err != nil { return nil, nil, err } diff --git a/turbo/jsonrpc/tracing.go b/turbo/jsonrpc/tracing.go index 6491ae16bdc..dfbe6aa0b95 100644 --- a/turbo/jsonrpc/tracing.go +++ b/turbo/jsonrpc/tracing.go @@ -18,6 +18,7 @@ package jsonrpc import ( "context" + "errors" "fmt" "time" @@ -117,7 +118,13 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp var borStateSyncTxn types.Transaction if *config.BorTraceEnabled { borStateSyncTxHash := bortypes.ComputeBorTxHash(block.NumberU64(), block.Hash()) - _, ok, err := api._blockReader.EventLookup(ctx, tx, borStateSyncTxHash) + + var ok bool + if api.bridgeReader != nil { + _, ok, err = api.bridgeReader.EventTxnLookup(ctx, borStateSyncTxHash) + } else { + _, ok, err = api._blockReader.EventLookup(ctx, tx, borStateSyncTxHash) + } if err != nil { stream.WriteArrayEnd() return err @@ -245,7 +252,11 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo } // otherwise this may be a bor state sync transaction - check - blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, hash) + if api.bridgeReader != nil { + blockNum, ok, err = api.bridgeReader.EventTxnLookup(ctx, hash) + } else { + blockNum, ok, err = api._blockReader.EventLookup(ctx, tx, hash) + } if err != nil { stream.WriteNil() return err @@ -385,7 +396,7 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA var overflow bool baseFee, overflow = uint256.FromBig(header.BaseFee) if overflow { - return fmt.Errorf("header.BaseFee uint256 overflow") + return errors.New("header.BaseFee uint256 overflow") } } msg, err := args.ToMessage(api.GasCap, baseFee) @@ -427,7 +438,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun } if len(bundles) == 0 { stream.WriteNil() - return fmt.Errorf("empty bundles") + return errors.New("empty bundles") } empty := true for _, bundle := range bundles { @@ -438,7 +449,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun if empty { stream.WriteNil() - return fmt.Errorf("empty bundles") + return errors.New("empty bundles") } defer func(start time.Time) { log.Trace("Tracing CallMany finished", "runtime", time.Since(start)) }(time.Now()) diff --git a/turbo/jsonrpc/txpool_api.go b/turbo/jsonrpc/txpool_api.go index b0e47f54835..cb2ac72bed0 100644 --- a/turbo/jsonrpc/txpool_api.go +++ b/turbo/jsonrpc/txpool_api.go @@ -19,6 +19,7 @@ package jsonrpc import ( "context" "fmt" + "strconv" "github.com/erigontech/erigon-lib/common/hexutil" @@ -111,7 +112,7 @@ func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]ma for account, txs := range pending { dump := make(map[string]*RPCTransaction) for _, txn := range txs { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["pending"][account.Hex()] = dump } @@ -119,7 +120,7 @@ func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]ma for account, txs := range baseFee { dump := make(map[string]*RPCTransaction) for _, txn := range txs { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["baseFee"][account.Hex()] = dump } @@ -127,7 +128,7 @@ func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]ma for account, txs := range queued { dump := make(map[string]*RPCTransaction) for _, txn := range txs { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["queued"][account.Hex()] = dump } @@ -186,19 +187,19 @@ func (api *TxPoolAPIImpl) ContentFrom(ctx context.Context, addr libcommon.Addres // Flatten the pending transactions dump := make(map[string]*RPCTransaction) for _, txn := range pending { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["pending"] = dump // Flatten the baseFee transactions dump = make(map[string]*RPCTransaction) for _, txn := range baseFee { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["baseFee"] = dump // Flatten the queued transactions dump = make(map[string]*RPCTransaction) for _, txn := range queued { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + dump[strconv.FormatUint(txn.GetNonce(), 10)] = newRPCPendingTransaction(txn, curHeader, cc) } content["queued"] = dump return content, nil diff --git a/turbo/jsonrpc/txpool_api_test.go b/turbo/jsonrpc/txpool_api_test.go index c46cb1a67fa..ee6c9ca05dc 100644 --- a/turbo/jsonrpc/txpool_api_test.go +++ b/turbo/jsonrpc/txpool_api_test.go @@ -26,7 +26,6 @@ import ( libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/hexutil" - "github.com/erigontech/erigon-lib/config3" txpool "github.com/erigontech/erigon-lib/gointerfaces/txpoolproto" "github.com/erigontech/erigon-lib/kv/kvcache" @@ -40,10 +39,6 @@ import ( ) func TestTxPoolContent(t *testing.T) { - if config3.EnableHistoryV4InTest { - t.Skip("TODO: [e4] implement me") - } - m, require := mock.MockWithTxPool(t), require.New(t) chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { b.SetCoinbase(libcommon.Address{1}) @@ -55,7 +50,7 @@ func TestTxPoolContent(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) txPool := txpool.NewTxpoolClient(conn) ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, txPool, txpool.NewMiningClient(conn), func() {}, m.Log) - api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, txPool) + api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), m.BlockReader, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil), m.DB, txPool) expectValue := uint64(1234) txn, err := types.SignTx(types.NewTransaction(0, libcommon.Address{1}, uint256.NewInt(expectValue), params.TxGas, uint256.NewInt(10*params.GWei), nil), *types.LatestSignerForChainID(m.ChainConfig.ChainID), m.Key) diff --git a/turbo/node/node.go b/turbo/node/node.go index 788b631a8bc..24a1abec9a2 100644 --- a/turbo/node/node.go +++ b/turbo/node/node.go @@ -91,8 +91,6 @@ func NewNodConfigUrfave(ctx *cli.Context, logger log.Logger) *nodecfg.Config { logger.Info("Starting Erigon on Sepolia testnet...") case networkname.DevChainName: logger.Info("Starting Erigon in ephemeral dev mode...") - case networkname.MumbaiChainName: - logger.Info("Starting Erigon on Mumbai testnet...") case networkname.AmoyChainName: logger.Info("Starting Erigon on Amoy testnet...") case networkname.BorMainnetChainName: diff --git a/turbo/rpchelper/filters.go b/turbo/rpchelper/filters.go index 20a17da7965..b6896fbad7d 100644 --- a/turbo/rpchelper/filters.go +++ b/turbo/rpchelper/filters.go @@ -574,7 +574,7 @@ func (ff *Filters) onNewEvent(event *remote.SubscribeReply) error { case remote.Event_PENDING_BLOCK: return ff.onPendingBlock(event) default: - return fmt.Errorf("unsupported event type") + return errors.New("unsupported event type") } } diff --git a/turbo/snapshotsync/freezeblocks/block_reader.go b/turbo/snapshotsync/freezeblocks/block_reader.go index 58adf3a8716..c55d7f6be7c 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader.go +++ b/turbo/snapshotsync/freezeblocks/block_reader.go @@ -49,6 +49,8 @@ import ( ) var ErrSpanNotFound = errors.New("span not found") +var ErrMilestoneNotFound = errors.New("milestone not found") +var ErrCheckpointNotFound = errors.New("checkpoint not found") type RemoteBlockReader struct { client remote.ETHBACKENDClient @@ -258,7 +260,7 @@ func (r *RemoteBlockReader) BodyRlp(ctx context.Context, tx kv.Getter, hash comm } func (r *RemoteBlockReader) LastEventId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBlockReader) EventLookup(ctx context.Context, tx kv.Getter, txnHash common.Hash) (uint64, bool, error) { @@ -305,7 +307,7 @@ func (r *RemoteBlockReader) LastFrozenSpanId() uint64 { } func (r *RemoteBlockReader) LastMilestoneId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBlockReader) Milestone(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) { @@ -313,7 +315,7 @@ func (r *RemoteBlockReader) Milestone(ctx context.Context, tx kv.Getter, spanId } func (r *RemoteBlockReader) LastCheckpointId(ctx context.Context, tx kv.Tx) (uint64, bool, error) { - return 0, false, fmt.Errorf("not implemented") + return 0, false, errors.New("not implemented") } func (r *RemoteBlockReader) Checkpoint(ctx context.Context, tx kv.Getter, spanId uint64) ([]byte, error) { @@ -380,6 +382,12 @@ func (r *BlockReader) HeadersRange(ctx context.Context, walker func(header *type } func (r *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (h *types.Header, err error) { + var dbgPrefix string + dbgLogs := dbg.Enabled(ctx) + if dbgLogs { + dbgPrefix = fmt.Sprintf("[dbg] BlockReader(idxMax=%d,segMax=%d).HeaderByNumber(blk=%d) -> ", r.sn.idxMax.Load(), r.sn.segmentsMax.Load(), blockHeight) + } + if tx != nil { blockHash, err := rawdb.ReadCanonicalHash(tx, blockHeight) if err != nil { @@ -390,12 +398,27 @@ func (r *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHei h = rawdb.ReadHeader(tx, blockHash, blockHeight) if h != nil { return h, nil + } else { + if dbgLogs { + log.Info(dbgPrefix + "not found in db") + } } + } else { + if dbgLogs { + log.Info(dbgPrefix + "canonical hash is empty") + } + } + } else { + if dbgLogs { + log.Info(dbgPrefix + "tx is nil") } } seg, ok, release := r.sn.ViewSingleFile(coresnaptype.Headers, blockHeight) if !ok { + if dbgLogs { + log.Info(dbgPrefix + "not found file for such blockHeight") + } return } defer release() @@ -404,6 +427,11 @@ func (r *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHei if err != nil { return nil, err } + if h == nil { + if dbgLogs { + log.Info(dbgPrefix + "got nil from file") + } + } return h, nil } @@ -1647,7 +1675,7 @@ func (r *BlockReader) Milestone(ctx context.Context, tx kv.Getter, milestoneId u } if v == nil { - return nil, fmt.Errorf("milestone %d not found (db)", milestoneId) + return nil, fmt.Errorf("%w, id: %d (db)", ErrMilestoneNotFound, milestoneId) } return common.Copy(v), nil @@ -1696,7 +1724,7 @@ func (r *BlockReader) Checkpoint(ctx context.Context, tx kv.Getter, checkpointId return common.Copy(result), nil } - return nil, fmt.Errorf("checkpoint %d not found (db)", checkpointId) + return nil, fmt.Errorf("%w, id: %d (db)", ErrCheckpointNotFound, checkpointId) } func (r *BlockReader) LastFrozenCheckpointId() uint64 { diff --git a/turbo/snapshotsync/freezeblocks/block_reader_test.go b/turbo/snapshotsync/freezeblocks/block_reader_test.go index 6fe79bb1d43..4e8fd30d1cc 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader_test.go +++ b/turbo/snapshotsync/freezeblocks/block_reader_test.go @@ -43,7 +43,7 @@ func TestBlockReaderLastFrozenSpanIdWhenSegmentFilesArePresent(t *testing.T) { dir := t.TempDir() createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) createTestSegmentFile(t, 0, 500_000, borsnaptype.Enums.BorSpans, dir, 1, logger) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer borRoSnapshots.Close() err := borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -57,7 +57,7 @@ func TestBlockReaderLastFrozenSpanIdWhenSegmentFilesAreNotPresent(t *testing.T) logger := testlog.Logger(t, log.LvlInfo) dir := t.TempDir() - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer borRoSnapshots.Close() err := borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -81,7 +81,7 @@ func TestBlockReaderLastFrozenSpanIdReturnsLastSegWithIdx(t *testing.T) { idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, borsnaptype.BorSpans.Name())) err := os.Remove(idxFileToDelete) require.NoError(t, err) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer borRoSnapshots.Close() err = borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -111,7 +111,7 @@ func TestBlockReaderLastFrozenSpanIdReturnsZeroWhenAllSegmentsDoNotHaveIdx(t *te idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, borsnaptype.BorSpans.Name())) err = os.Remove(idxFileToDelete) require.NoError(t, err) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer borRoSnapshots.Close() err = borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -127,7 +127,7 @@ func TestBlockReaderLastFrozenEventIdWhenSegmentFilesArePresent(t *testing.T) { dir := t.TempDir() createTestBorEventSegmentFile(t, 0, 500_000, 132, dir, logger) createTestSegmentFile(t, 0, 500_000, borsnaptype.Enums.BorSpans, dir, 1, logger) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer borRoSnapshots.Close() err := borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -141,7 +141,7 @@ func TestBlockReaderLastFrozenEventIdWhenSegmentFilesAreNotPresent(t *testing.T) logger := testlog.Logger(t, log.LvlInfo) dir := t.TempDir() - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer borRoSnapshots.Close() err := borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -165,7 +165,7 @@ func TestBlockReaderLastFrozenEventIdReturnsLastSegWithIdx(t *testing.T) { idxFileToDelete := filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, borsnaptype.BorEvents.Name())) err := os.Remove(idxFileToDelete) require.NoError(t, err) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer borRoSnapshots.Close() err = borRoSnapshots.ReopenFolder() require.NoError(t, err) @@ -195,7 +195,7 @@ func TestBlockReaderLastFrozenEventIdReturnsZeroWhenAllSegmentsDoNotHaveIdx(t *t idxFileToDelete = filepath.Join(dir, snaptype.IdxFileName(1, 1_000_000, 1_500_000, borsnaptype.BorEvents.Name())) err = os.Remove(idxFileToDelete) require.NoError(t, err) - borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + borRoSnapshots := NewBorRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer borRoSnapshots.Close() err = borRoSnapshots.ReopenFolder() require.NoError(t, err) diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index 93c85b2dfda..c094ebc1ca2 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -428,6 +428,7 @@ func (s *RoSnapshots) idxAvailability() uint64 { if len(maximums) == 0 { return 0 } + return slices.Min(maximums) } @@ -435,13 +436,15 @@ func (s *RoSnapshots) idxAvailability() uint64 { // - user must be able: delete any snapshot file and Erigon will self-heal by re-downloading // - RPC return Nil for historical blocks if snapshots are not open func (s *RoSnapshots) OptimisticReopenWithDB(db kv.RoDB) { - _ = db.View(context.Background(), func(tx kv.Tx) error { - snList, _, err := rawdb.ReadSnapshots(tx) + var snList []string + _ = db.View(context.Background(), func(tx kv.Tx) (err error) { + snList, _, err = rawdb.ReadSnapshots(tx) if err != nil { return err } - return s.ReopenList(snList, true) + return nil }) + _ = s.ReopenList(snList, true) } func (s *RoSnapshots) LS() { @@ -483,6 +486,8 @@ func (s *RoSnapshots) Files() (list []string) { } func (s *RoSnapshots) OpenFiles() (list []string) { + log.Warn("[dbg] OpenFiles") + defer log.Warn("[dbg] OpenFiles end") s.segments.Scan(func(segtype snaptype.Enum, value *segments) bool { value.lock.RLock() defer value.lock.RUnlock() @@ -498,6 +503,9 @@ func (s *RoSnapshots) OpenFiles() (list []string) { // ReopenList stops on optimistic=false, continue opening files on optimistic=true func (s *RoSnapshots) ReopenList(fileNames []string, optimistic bool) error { + s.lockSegments() + defer s.unlockSegments() + s.closeWhatNotInList(fileNames) if err := s.rebuildSegments(fileNames, true, optimistic); err != nil { return err } @@ -505,6 +513,9 @@ func (s *RoSnapshots) ReopenList(fileNames []string, optimistic bool) error { } func (s *RoSnapshots) InitSegments(fileNames []string) error { + s.lockSegments() + defer s.unlockSegments() + s.closeWhatNotInList(fileNames) if err := s.rebuildSegments(fileNames, false, true); err != nil { return err } @@ -526,10 +537,6 @@ func (s *RoSnapshots) unlockSegments() { } func (s *RoSnapshots) rebuildSegments(fileNames []string, open bool, optimistic bool) error { - s.lockSegments() - defer s.unlockSegments() - - s.closeWhatNotInList(fileNames) var segmentsMax uint64 var segmentsMaxSet bool @@ -623,10 +630,17 @@ func (s *RoSnapshots) Ranges() []Range { func (s *RoSnapshots) OptimisticalyReopenFolder() { _ = s.ReopenFolder() } func (s *RoSnapshots) OptimisticalyReopenWithDB(db kv.RoDB) { _ = s.ReopenWithDB(db) } func (s *RoSnapshots) ReopenFolder() error { - if err := s.ReopenSegments(s.Types(), false); err != nil { - return fmt.Errorf("ReopenSegments: %w", err) + files, _, err := typedSegments(s.dir, s.segmentsMin.Load(), s.Types(), false) + if err != nil { + return err } - return nil + + list := make([]string, 0, len(files)) + for _, f := range files { + _, fName := filepath.Split(f.Path) + list = append(list, fName) + } + return s.ReopenList(list, false) } func (s *RoSnapshots) ReopenSegments(types []snaptype.Type, allowGaps bool) error { @@ -640,7 +654,14 @@ func (s *RoSnapshots) ReopenSegments(types []snaptype.Type, allowGaps bool) erro _, fName := filepath.Split(f.Path) list = append(list, fName) } - return s.ReopenList(list, false) + + s.lockSegments() + defer s.unlockSegments() + // don't need close already opened files + if err := s.rebuildSegments(list, true, false); err != nil { + return err + } + return nil } func (s *RoSnapshots) ReopenWithDB(db kv.RoDB) error { @@ -728,13 +749,13 @@ func (s *RoSnapshots) buildMissedIndicesIfNeed(ctx context.Context, logPrefix st return nil } if !s.Cfg().ProduceE2 && s.IndicesMax() == 0 { - return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") + return errors.New("please remove --snap.stop, erigon can't work without creating basic indices") } if !s.Cfg().ProduceE2 { return nil } if !s.SegmentsReady() { - return fmt.Errorf("not all snapshot segments are available") + return errors.New("not all snapshot segments are available") } s.LogStat("missed-idx") @@ -944,7 +965,7 @@ func (s *RoSnapshots) AddSnapshotsToSilkworm(silkwormInstance *silkworm.Silkworm } if len(mappedHeaderSnapshots) != len(mappedBodySnapshots) || len(mappedBodySnapshots) != len(mappedTxnSnapshots) { - return fmt.Errorf("addSnapshots: the number of headers/bodies/txs snapshots must be the same") + return errors.New("addSnapshots: the number of headers/bodies/txs snapshots must be the same") } for i := 0; i < len(mappedHeaderSnapshots); i++ { @@ -972,11 +993,18 @@ func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainConfig *chain.Conf } func notifySegmentIndexingFinished(name string) { - diagnostics.Send( - diagnostics.SnapshotSegmentIndexingFinishedUpdate{ + dts := []diagnostics.SnapshotSegmentIndexingStatistics{ + { SegmentName: name, + Percent: 100, + Alloc: 0, + Sys: 0, }, - ) + } + diagnostics.Send(diagnostics.SnapshotIndexingStatistics{ + Segments: dts, + TimeElapsed: -1, + }) } func sendDiagnostics(startIndexingTime time.Time, indexPercent map[string]int, alloc uint64, sys uint64) { @@ -1595,13 +1623,21 @@ func dumpRange(ctx context.Context, f snaptype.FileInfo, dumper dumpFunc, firstK var lastKeyValue uint64 sn, err := seg.NewCompressor(ctx, "Snapshot "+f.Type.Name(), f.Path, tmpDir, seg.MinPatternScore, workers, log.LvlTrace, logger) - if err != nil { return lastKeyValue, err } defer sn.Close() + // E3 need to keep db smaller: earlier retire -> earlier prune. + // Means: + // - build must be fast + // - merge can be slow and expensive + noCompress := (f.To - f.From) < (snaptype.Erigon2MergeLimit - 1) + lastKeyValue, err = dumper(ctx, chainDB, chainConfig, f.From, f.To, firstKey, func(v []byte) error { + if noCompress { + return sn.AddUncompressedWord(v) + } return sn.AddWord(v) }, workers, lvl, logger) @@ -2036,7 +2072,7 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (map[snap func (m *Merger) filesByRangeOfType(view *View, from, to uint64, snapshotType snaptype.Type) []string { paths := make([]string, 0) - for _, sn := range view.Segments(snapshotType) { + for _, sn := range view.segments(snapshotType) { if sn.from < from { continue } @@ -2273,16 +2309,16 @@ func (s *RoSnapshots) ViewSingleFile(t snaptype.Type, blockNum uint64) (segment return nil, false, noop } -func (v *View) Segments(t snaptype.Type) []*Segment { +func (v *View) segments(t snaptype.Type) []*Segment { if s, ok := v.s.segments.Get(t.Enum()); ok { return s.segments } return nil } -func (v *View) Headers() []*Segment { return v.Segments(coresnaptype.Headers) } -func (v *View) Bodies() []*Segment { return v.Segments(coresnaptype.Bodies) } -func (v *View) Txs() []*Segment { return v.Segments(coresnaptype.Transactions) } +func (v *View) Headers() []*Segment { return v.segments(coresnaptype.Headers) } +func (v *View) Bodies() []*Segment { return v.segments(coresnaptype.Bodies) } +func (v *View) Txs() []*Segment { return v.segments(coresnaptype.Transactions) } func (v *View) Segment(t snaptype.Type, blockNum uint64) (*Segment, bool) { if s, ok := v.s.segments.Get(t.Enum()); ok { @@ -2297,7 +2333,7 @@ func (v *View) Segment(t snaptype.Type, blockNum uint64) (*Segment, bool) { } func (v *View) Ranges() (ranges []Range) { - for _, sn := range v.Segments(v.baseSegType) { + for _, sn := range v.segments(v.baseSegType) { ranges = append(ranges, sn.Range) } diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go index e0cad88290f..a4f5284dc7a 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go @@ -149,7 +149,7 @@ func TestMergeSnapshots(t *testing.T) { for i := uint64(0); i < N; i++ { createFile(i*10_000, (i+1)*10_000) } - s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + s := NewRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer s.Close() require.NoError(s.ReopenFolder()) { @@ -157,7 +157,7 @@ func TestMergeSnapshots(t *testing.T) { merger.DisableFsync() s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) - require.True(len(ranges) > 0) + require.Equal(3, len(ranges)) err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) require.NoError(err) } @@ -172,9 +172,9 @@ func TestMergeSnapshots(t *testing.T) { { merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) merger.DisableFsync() - s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) + s.ReopenFolder() ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) - require.True(len(ranges) == 0) + require.Equal(0, len(ranges)) err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) require.NoError(err) } @@ -245,7 +245,7 @@ func TestDeleteSnapshots(t *testing.T) { for i := uint64(0); i < N; i++ { createFile(i*10_000, (i+1)*10_000) } - s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + s := NewRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer s.Close() retireFiles := []string{ "v1-000000-000010-bodies.seg", @@ -284,7 +284,7 @@ func TestRemoveOverlaps(t *testing.T) { createFile(200_000+i*10_000, 200_000+(i+1)*10_000) } - s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + s := NewRoSnapshots(ethconfig.BlocksFreezing{}, dir, 0, logger) defer s.Close() require.NoError(s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false)) @@ -332,11 +332,11 @@ func TestOpenAllSnapshot(t *testing.T) { logger := log.New() baseDir, require := t.TempDir(), require.New(t) - for _, chain := range []string{networkname.MainnetChainName, networkname.MumbaiChainName} { + for _, chain := range []string{networkname.MainnetChainName, networkname.AmoyChainName} { dir := filepath.Join(baseDir, chain) chainSnapshotCfg := snapcfg.KnownCfg(chain) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 - cfg := ethconfig.BlocksFreezing{Enabled: true} + cfg := ethconfig.BlocksFreezing{} createFile := func(from, to uint64, name snaptype.Type) { createTestSegmentFile(t, from, to, name.Enum(), dir, 1, logger) } diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go index a3e41d991bc..396241bc392 100644 --- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go @@ -23,6 +23,7 @@ import ( "path/filepath" "reflect" + "github.com/erigontech/erigon-lib/common/dbg" "github.com/erigontech/erigon-lib/downloader/snaptype" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/cmd/hack/tool/fromdb" @@ -31,11 +32,17 @@ import ( "github.com/erigontech/erigon/turbo/services" ) +var BorProduceFiles = dbg.EnvBool("BOR_PRODUCE_FILES", false) + func (br *BlockRetire) dbHasEnoughDataForBorRetire(ctx context.Context) (bool, error) { return true, nil } func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) (bool, error) { + if !BorProduceFiles { + return false, nil + } + select { case <-ctx.Done(): return false, ctx.Err() @@ -250,10 +257,10 @@ func (v *BorView) Close() { v.base.Close() } -func (v *BorView) Events() []*Segment { return v.base.Segments(borsnaptype.BorEvents) } -func (v *BorView) Spans() []*Segment { return v.base.Segments(borsnaptype.BorSpans) } -func (v *BorView) Checkpoints() []*Segment { return v.base.Segments(borsnaptype.BorCheckpoints) } -func (v *BorView) Milestones() []*Segment { return v.base.Segments(borsnaptype.BorMilestones) } +func (v *BorView) Events() []*Segment { return v.base.segments(borsnaptype.BorEvents) } +func (v *BorView) Spans() []*Segment { return v.base.segments(borsnaptype.BorSpans) } +func (v *BorView) Checkpoints() []*Segment { return v.base.segments(borsnaptype.BorCheckpoints) } +func (v *BorView) Milestones() []*Segment { return v.base.segments(borsnaptype.BorMilestones) } func (v *BorView) EventsSegment(blockNum uint64) (*Segment, bool) { return v.base.Segment(borsnaptype.BorEvents, blockNum) diff --git a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go index edf460fb36d..befffdcb58a 100644 --- a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go @@ -56,7 +56,7 @@ func BeaconSimpleIdx(ctx context.Context, sn snaptype.FileInfo, salt uint32, tmp num := make([]byte, binary.MaxVarintLen64) if err := snaptype.BuildIndex(ctx, sn, salt, sn.From, tmpDir, log.LvlDebug, p, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { if i%20_000 == 0 { - logger.Log(lvl, fmt.Sprintf("Generating idx for %s", sn.Type.Name()), "progress", i) + logger.Log(lvl, "Generating idx for "+sn.Type.Name(), "progress", i) } p.Processed.Add(1) n := binary.PutUvarint(num, i) @@ -569,6 +569,7 @@ func (s *CaplinSnapshots) BuildMissingIndices(ctx context.Context, logger log.Lo if err != nil { return err } + noneDone := true for index := range segments { segment := segments[index] // The same slot=>offset mapping is used for both beacon blocks and blob sidecars. @@ -579,11 +580,14 @@ func (s *CaplinSnapshots) BuildMissingIndices(ctx context.Context, logger log.Lo continue } p := &background.Progress{} - + noneDone = false if err := BeaconSimpleIdx(ctx, segment, s.Salt, s.tmpdir, p, log.LvlDebug, logger); err != nil { return err } } + if noneDone { + return nil + } return s.ReopenFolder() } @@ -665,7 +669,7 @@ func (s *CaplinSnapshots) ReadBlobSidecars(slot uint64) ([]*cltypes.BlobSidecar, return nil, nil } if len(buf)%sidecarSSZSize != 0 { - return nil, fmt.Errorf("invalid sidecar list length") + return nil, errors.New("invalid sidecar list length") } sidecars := make([]*cltypes.BlobSidecar, len(buf)/sidecarSSZSize) for i := 0; i < len(buf); i += sidecarSSZSize { diff --git a/turbo/snapshotsync/snap/flags.go b/turbo/snapshotsync/snap/flags.go deleted file mode 100644 index 1102cf10abf..00000000000 --- a/turbo/snapshotsync/snap/flags.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2024 The Erigon Authors -// This file is part of Erigon. -// -// Erigon is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Erigon is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with Erigon. If not, see . - -package snap - -import ( - "github.com/erigontech/erigon-lib/kv" - "github.com/erigontech/erigon/eth/ethconfig" -) - -var ( - blockSnapshotEnabledKey = []byte("blocksSnapshotEnabled") -) - -func Enabled(tx kv.Getter) (bool, error) { - return kv.GetBool(tx, kv.DatabaseInfo, blockSnapshotEnabledKey) -} - -func EnsureNotChanged(tx kv.GetPut, cfg ethconfig.BlocksFreezing) (bool, bool, error) { - ok, v, err := kv.EnsureNotChangedBool(tx, kv.DatabaseInfo, blockSnapshotEnabledKey, cfg.Enabled) - if err != nil { - return false, false, err - } - if !ok { - return false, v, nil - } - return true, false, nil -} - -// ForceSetFlags - if you know what you are doing -func ForceSetFlags(tx kv.GetPut, cfg ethconfig.BlocksFreezing) error { - if cfg.Enabled { - if err := tx.Put(kv.DatabaseInfo, blockSnapshotEnabledKey, []byte{1}); err != nil { - return err - } - } else { - if err := tx.Put(kv.DatabaseInfo, blockSnapshotEnabledKey, []byte{0}); err != nil { - return err - } - } - return nil -} diff --git a/turbo/snapshotsync/snapshotsync.go b/turbo/snapshotsync/snapshotsync.go index e22cda4b884..75ef5d55c83 100644 --- a/turbo/snapshotsync/snapshotsync.go +++ b/turbo/snapshotsync/snapshotsync.go @@ -29,6 +29,7 @@ import ( "github.com/erigontech/erigon-lib/chain" "github.com/erigontech/erigon-lib/chain/snapcfg" "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/datadir" "github.com/erigontech/erigon-lib/common/dbg" "github.com/erigontech/erigon-lib/config3" "github.com/erigontech/erigon-lib/diagnostics" @@ -47,6 +48,46 @@ import ( "github.com/erigontech/erigon/turbo/services" ) +var greatOtterBanner = ` + _____ _ _ _ ____ _ _ + / ____| | | | (_) / __ \| | | | + | (___ | |_ __ _ _ __| |_ _ _ __ __ _ | | | | |_| |_ ___ _ __ ___ _ _ _ __ ___ + \___ \| __/ _ | '__| __| | '_ \ / _ | | | | | __| __/ _ \ '__/ __| | | | '_ \ / __| + ____) | || (_| | | | |_| | | | | (_| | | |__| | |_| || __/ | \__ \ |_| | | | | (__ _ _ _ + |_____/ \__\__,_|_| \__|_|_| |_|\__, | \____/ \__|\__\___|_| |___/\__, |_| |_|\___(_|_|_) + __/ | __/ | + |___/ |___/ + + + .:-===++**++===-: + :=##%@@@@@@@@@@@@@@@@@@%#*=. + .=#@@@@@@%##+====--====+##@@@@@@@#=. ... + .=**###*=:+#@@@@%*=:. .:=#%@@@@#==#@@@@@%#- + -#@@@@%%@@@@@@%+-. .=*%@@@@#*+*#@@@%= + =@@@*: -%%+: -#@+. =@@@- + %@@# +@#. :%%- %@@* + @@@+ +%=. -+= :=- .#@- %@@# + *@@%: #@- =@@@* +@@@%. =@= -*@@@: + #@@@##@+ #@@@@. %@@@@= #@%@@@#- + :#@@@@: +@@@# :=++++==-. *@@@@: =@@@@- + =%@@%= +#*. =#%#+==-==+#%%=: .+#*: .#@@@#. + +@@%+. .+%+-. :=##- :#@@@- + -@@@= -%#: ..::. +@* +@@%. + .::-========*@@@.. -@# +%@@@@%. -@# .-@@@+=======- +.:-====----:::::#@@%:--=::::.. #@: *@@@@@%: *@= ..:-:-=--:@@@+::::---- + =@@@:....... @@ :+@#=. -@+ .......-@@@: + .:=++####*%@@%=--::::.. @@ %# %* :@* -@+ ...::---+@@@#*#*##+=-: + ..--==::.. :%@@@- ..:::.. @@ +@*:.-#@@+-.-#@- -@+ ..:::.. .+@@@#. ..:- + .#@@@##-:. @@ :+#@%=.:+@@#=. -@+ .-=#@@@@+ + -=+++=--+%@@%+=. @@ +%*=+#%- -@+ :=#@@@%+--++++=: + .=**=:. .=*@@@@@#=:. @@ :--. -@+ .-+#@@@@%+: .:=*+-. + ::. .=*@@@@@@%#@@+=-:.. ..::=+#@%#@@@@@@%+-. ..-. + ..=*#@@@@@@@@@@@@@@@%%@@@@@@@@@@@@@@%#+-. + .:-==++*#######%######**+==-: + + +` + type CaplinMode int const ( @@ -227,7 +268,7 @@ func computeBlocksToPrune(blockReader services.FullBlockReader, p prune.Mode) (b // WaitForDownloader - wait for Downloader service to download all expected snapshots // for MVP we sync with Downloader only once, in future will send new snapshots also -func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs bool, prune prune.Mode, caplin CaplinMode, agg *state.Aggregator, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { +func WaitForDownloader(ctx context.Context, logPrefix string, dirs datadir.Dirs, headerchain, blobs bool, prune prune.Mode, caplin CaplinMode, agg *state.Aggregator, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { snapshots := blockReader.Snapshots() borSnapshots := blockReader.BorSnapshots() @@ -244,13 +285,6 @@ func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs return nil } - if headerchain { - snapshots.Close() - if cc.Bor != nil { - borSnapshots.Close() - } - } - //Corner cases: // - Erigon generated file X with hash H1. User upgraded Erigon. New version has preverified file X with hash H2. Must ignore H2 (don't send to Downloader) // - Erigon "download once": means restart/upgrade/downgrade must not download files (and will be fast) @@ -317,9 +351,6 @@ func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs } - // TODO: https://github.com/erigontech/erigon/issues/11271 - time.Sleep(10 * time.Second) - downloadStartTime := time.Now() const logInterval = 20 * time.Second logEvery := time.NewTicker(logInterval) @@ -333,11 +364,17 @@ func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs // Print download progress until all segments are available + firstLog := true for !stats.Completed { select { case <-ctx.Done(): return ctx.Err() case <-logEvery.C: + if firstLog && headerchain { + log.Info("[OtterSync] Starting Ottersync") + log.Info(greatOtterBanner) + firstLog = false + } if stats, err = snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}); err != nil { log.Warn("Error while waiting for snapshots progress", "err", err) } else { @@ -448,7 +485,8 @@ func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs if firstNonGenesis != nil { firstNonGenesisBlockNumber := binary.BigEndian.Uint64(firstNonGenesis) if snapshots.SegmentsMax()+1 < firstNonGenesisBlockNumber { - log.Warn(fmt.Sprintf("[%s] Some blocks are not in snapshots and not in db", logPrefix), "max_in_snapshots", snapshots.SegmentsMax(), "min_in_db", firstNonGenesisBlockNumber) + log.Warn(fmt.Sprintf("[%s] Some blocks are not in snapshots and not in db. this could have happend due to the node being stopped at the wrong time, you can fix this with 'rm -rf %s' (this is not equivalent to a full resync)", logPrefix, dirs.Chaindata), "max_in_snapshots", snapshots.SegmentsMax(), "min_in_db", firstNonGenesisBlockNumber) + return fmt.Errorf("some blocks are not in snapshots and not in db. this could have happend due to the node being stopped at the wrong time, you can fix this with 'rm -rf %s' (this is not equivalent to a full resync)", dirs.Chaindata) } } return nil @@ -497,7 +535,7 @@ func logStats(ctx context.Context, stats *proto_downloader.StatsReply, startTime remainingBytes = stats.BytesTotal - stats.BytesCompleted } - downloadTimeLeft := calculateTime(remainingBytes, stats.DownloadRate) + downloadTimeLeft := calculateTime(remainingBytes, stats.CompletionRate) log.Info(fmt.Sprintf("[%s] %s", logPrefix, logReason), "progress", fmt.Sprintf("%.2f%% %s/%s", stats.Progress, common.ByteCount(stats.BytesCompleted), common.ByteCount(stats.BytesTotal)), @@ -505,6 +543,9 @@ func logStats(ctx context.Context, stats *proto_downloader.StatsReply, startTime "time-left", downloadTimeLeft, "total-time", time.Since(startTime).Round(time.Second).String(), "download", common.ByteCount(stats.DownloadRate)+"/s", + "flush", common.ByteCount(stats.FlushRate)+"/s", + "hash", common.ByteCount(stats.HashRate)+"/s", + "complete", common.ByteCount(stats.CompletionRate)+"/s", "upload", common.ByteCount(stats.UploadRate)+"/s", "peers", stats.PeersUnique, "files", stats.FilesTotal, diff --git a/turbo/stages/blockchain_test.go b/turbo/stages/blockchain_test.go index 9141009c422..ab9798a530a 100644 --- a/turbo/stages/blockchain_test.go +++ b/turbo/stages/blockchain_test.go @@ -56,7 +56,6 @@ import ( "github.com/erigontech/erigon/crypto" "github.com/erigontech/erigon/ethdb/prune" "github.com/erigontech/erigon/params" - "github.com/erigontech/erigon/turbo/services" "github.com/erigontech/erigon/turbo/stages/mock" ) @@ -546,7 +545,7 @@ func TestChainTxReorgs(t *testing.T) { if bn, _ := rawdb.ReadTxLookupEntry(tx, txn.Hash()); bn != nil { t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn) } - if rcpt, _, _, _, _ := readReceipt(tx, txn.Hash(), m.BlockReader); rcpt != nil { + if rcpt, _, _, _, _ := readReceipt(tx, txn.Hash(), m); rcpt != nil { t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt) } } @@ -558,12 +557,8 @@ func TestChainTxReorgs(t *testing.T) { require.NoError(t, err) require.True(t, found) - if m.HistoryV3 { - // m.HistoryV3 doesn't store - } else { - if rcpt, _, _, _, _ := readReceipt(tx, txn.Hash(), m.BlockReader); rcpt == nil { - t.Errorf("add %d: expected receipt to be found", i) - } + if rcpt, _, _, _, _ := readReceipt(tx, txn.Hash(), m); rcpt == nil { + t.Errorf("add %d: expected receipt to be found", i) } } // shared tx @@ -572,17 +567,14 @@ func TestChainTxReorgs(t *testing.T) { if bn, _ := rawdb.ReadTxLookupEntry(tx, txn.Hash()); bn == nil { t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn) } - if m.HistoryV3 { - // m.HistoryV3 doesn't store - } else { - if rcpt, _, _, _, _ := readReceipt(tx, txn.Hash(), m.BlockReader); rcpt == nil { - t.Errorf("share %d: expected receipt to be found", i) - } + + if rcpt, _, _, _, _ := readReceipt(tx, txn.Hash(), m); rcpt == nil { + t.Errorf("share %d: expected receipt to be found", i) } } } -func readReceipt(db kv.Tx, txHash libcommon.Hash, br services.FullBlockReader) (*types.Receipt, libcommon.Hash, uint64, uint64, error) { +func readReceipt(db kv.Tx, txHash libcommon.Hash, m *mock.MockSentry) (*types.Receipt, libcommon.Hash, uint64, uint64, error) { // Retrieve the context of the receipt based on the transaction hash blockNumber, err := rawdb.ReadTxLookupEntry(db, txHash) if err != nil { @@ -591,19 +583,23 @@ func readReceipt(db kv.Tx, txHash libcommon.Hash, br services.FullBlockReader) ( if blockNumber == nil { return nil, libcommon.Hash{}, 0, 0, nil } - blockHash, err := br.CanonicalHash(context.Background(), db, *blockNumber) + blockHash, err := m.BlockReader.CanonicalHash(context.Background(), db, *blockNumber) if err != nil { return nil, libcommon.Hash{}, 0, 0, err } if blockHash == (libcommon.Hash{}) { return nil, libcommon.Hash{}, 0, 0, nil } - b, senders, err := br.BlockWithSenders(context.Background(), db, blockHash, *blockNumber) + b, _, err := m.BlockReader.BlockWithSenders(context.Background(), db, blockHash, *blockNumber) if err != nil { return nil, libcommon.Hash{}, 0, 0, err } + // Read all the receipts from the block and return the one with the matching hash - receipts := rawdb.ReadReceipts(db, b, senders) + receipts, err := m.ReceiptsReader.GetReceipts(context.Background(), m.ChainConfig, db, b) + if err != nil { + return nil, libcommon.Hash{}, 0, 0, err + } for receiptIndex, receipt := range receipts { if receipt.TxHash == txHash { return receipt, blockHash, *blockNumber, uint64(receiptIndex), nil diff --git a/turbo/stages/bodydownload/body_algos.go b/turbo/stages/bodydownload/body_algos.go index c7b879612b3..97c38215ead 100644 --- a/turbo/stages/bodydownload/body_algos.go +++ b/turbo/stages/bodydownload/body_algos.go @@ -19,6 +19,7 @@ package bodydownload import ( "bytes" "context" + "errors" "fmt" "github.com/erigontech/erigon/params" "math/big" @@ -76,7 +77,7 @@ func (bd *BodyDownload) UpdateFromDb(db kv.Tx) (headHeight, headTime uint64, hea headTd256 = new(uint256.Int) overflow := headTd256.SetFromBig(headTd) if overflow { - return 0, 0, libcommon.Hash{}, nil, fmt.Errorf("headTd higher than 2^256-1") + return 0, 0, libcommon.Hash{}, nil, errors.New("headTd higher than 2^256-1") } headTime = 0 headHeader, err := bd.br.Header(context.Background(), db, headHash, headHeight) diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index 87616166cf5..4a8dcd5f7fb 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -68,7 +68,7 @@ func TestSetupGenesis(t *testing.T) { { name: "genesis without ChainConfig", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - return core.CommitGenesisBlock(db, new(types.Genesis), tmpdir, logger) + return core.CommitGenesisBlock(db, new(types.Genesis), datadir.New(tmpdir), logger) }, wantErr: types.ErrGenesisNoConfig, wantConfig: params.AllProtocolChanges, @@ -76,7 +76,7 @@ func TestSetupGenesis(t *testing.T) { { name: "no block in DB, genesis == nil", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - return core.CommitGenesisBlock(db, nil, tmpdir, logger) + return core.CommitGenesisBlock(db, nil, datadir.New(tmpdir), logger) }, wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, @@ -84,7 +84,7 @@ func TestSetupGenesis(t *testing.T) { { name: "mainnet block in DB, genesis == nil", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - return core.CommitGenesisBlock(db, nil, tmpdir, logger) + return core.CommitGenesisBlock(db, nil, datadir.New(tmpdir), logger) }, wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, @@ -92,8 +92,8 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == nil", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, nil, tmpdir, logger) + core.MustCommitGenesis(&customg, db, datadir.New(tmpdir), logger) + return core.CommitGenesisBlock(db, nil, datadir.New(tmpdir), logger) }, wantHash: customghash, wantConfig: customg.Config, @@ -101,8 +101,8 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == sepolia", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, core.SepoliaGenesisBlock(), tmpdir, logger) + core.MustCommitGenesis(&customg, db, datadir.New(tmpdir), logger) + return core.CommitGenesisBlock(db, core.SepoliaGenesisBlock(), datadir.New(tmpdir), logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash}, wantHash: params.SepoliaGenesisHash, @@ -111,28 +111,18 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == bor-mainnet", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, core.BorMainnetGenesisBlock(), tmpdir, logger) + core.MustCommitGenesis(&customg, db, datadir.New(tmpdir), logger) + return core.CommitGenesisBlock(db, core.BorMainnetGenesisBlock(), datadir.New(tmpdir), logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.BorMainnetGenesisHash}, wantHash: params.BorMainnetGenesisHash, wantConfig: params.BorMainnetChainConfig, }, - { - name: "custom block in DB, genesis == mumbai", - fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, core.MumbaiGenesisBlock(), tmpdir, logger) - }, - wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.MumbaiGenesisHash}, - wantHash: params.MumbaiGenesisHash, - wantConfig: params.MumbaiChainConfig, - }, { name: "custom block in DB, genesis == amoy", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, core.AmoyGenesisBlock(), tmpdir, logger) + core.MustCommitGenesis(&customg, db, datadir.New(tmpdir), logger) + return core.CommitGenesisBlock(db, core.AmoyGenesisBlock(), datadir.New(tmpdir), logger) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.AmoyGenesisHash}, wantHash: params.AmoyGenesisHash, @@ -141,8 +131,8 @@ func TestSetupGenesis(t *testing.T) { { name: "compatible config in DB", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&oldcustomg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, &customg, tmpdir, logger) + core.MustCommitGenesis(&oldcustomg, db, datadir.New(tmpdir), logger) + return core.CommitGenesisBlock(db, &customg, datadir.New(tmpdir), logger) }, wantHash: customghash, wantConfig: customg.Config, @@ -166,7 +156,7 @@ func TestSetupGenesis(t *testing.T) { return nil, nil, err } // This should return a compatibility error. - return core.CommitGenesisBlock(m.DB, &customg, tmpdir, logger) + return core.CommitGenesisBlock(m.DB, &customg, datadir.New(tmpdir), logger) }, wantHash: customghash, wantConfig: customg.Config, @@ -185,7 +175,7 @@ func TestSetupGenesis(t *testing.T) { t.Parallel() dirs := datadir.New(tmpdir) db, _ := temporaltest.NewTestDB(t, dirs) - blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New())) + blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, 0, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{}, dirs.Snap, 0, log.New())) config, genesis, err := test.fn(t, db) // Check the return values. if !reflect.DeepEqual(err, test.wantErr) { diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 86dff5e76c6..af141b610de 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -29,6 +29,7 @@ import ( "math/big" "slices" "sort" + "strconv" "strings" "time" @@ -277,7 +278,7 @@ func (hd *HeaderDownload) logAnchorState() { slices.Sort(bs) for j, b := range bs { if j == 0 { - sbb.WriteString(fmt.Sprintf("%d", b)) + sbb.WriteString(strconv.Itoa(b)) } else if j == len(bs)-1 { if bs[j-1]+1 == b { // Close interval diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go index 36aa86cfc98..5531cde488a 100644 --- a/turbo/stages/mock/mock_sentry.go +++ b/turbo/stages/mock/mock_sentry.go @@ -28,6 +28,7 @@ import ( "time" "github.com/c2h5oh/datasize" + "github.com/erigontech/erigon/turbo/jsonrpc/receipts" lru "github.com/hashicorp/golang-lru/arc/v2" "github.com/holiman/uint256" "go.uber.org/mock/gomock" @@ -134,6 +135,7 @@ type MockSentry struct { agg *libstate.Aggregator BlockSnapshots *freezeblocks.RoSnapshots BlockReader services.FullBlockReader + ReceiptsReader *receipts.Generator posStagedSync *stagedsync.Sync } @@ -277,6 +279,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK cfg.Sync.BodyDownloadTimeoutSeconds = 10 cfg.DeprecatedTxPool.Disable = !withTxPool cfg.DeprecatedTxPool.StartOnInit = true + cfg.Dirs = dirs logger := log.Root() logger.SetHandler(log.LvlFilterHandler(log.LvlError, log.StderrHandler)) @@ -287,6 +290,8 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db, nil, nil, nil, logger) allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 0, logger) allBorSnapshots := freezeblocks.NewBorRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 0, logger) + br := freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots) + mock := &MockSentry{ Ctx: ctx, cancel: ctxCancel, DB: db, agg: agg, tb: tb, @@ -303,9 +308,11 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK }, PeerId: gointerfaces.ConvertHashToH512([64]byte{0x12, 0x34, 0x50}), // "12345" BlockSnapshots: allSnapshots, - BlockReader: freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots), + BlockReader: br, + ReceiptsReader: receipts.NewGenerator(16, br, engine), HistoryV3: true, } + if tb != nil { tb.Cleanup(mock.Close) } @@ -357,7 +364,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK } // Committed genesis will be shared between download and mock sentry - _, mock.Genesis, err = core.CommitGenesisBlock(mock.DB, gspec, "", mock.Log) + _, mock.Genesis, err = core.CommitGenesisBlock(mock.DB, gspec, datadir.New(tmpdir), mock.Log) if _, ok := err.(*chain.ConfigCompatError); err != nil && !ok { if tb != nil { tb.Fatal(err) @@ -473,7 +480,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK cfg.Sync, stagedsync.MiningStages(mock.Ctx, stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, nil, nil, dirs.Tmp, mock.BlockReader), - stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miningStatePos, *mock.ChainConfig, nil, mock.BlockReader, nil, nil, nil, recents, signatures, false, nil), + stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miningStatePos, *mock.ChainConfig, nil, mock.BlockReader, nil, nil, recents, signatures, false, nil), stagedsync.StageExecuteBlocksCfg( mock.DB, prune, @@ -489,10 +496,9 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.sentriesClient.Hd, mock.gspec, ethconfig.Defaults.Sync, - mock.agg, nil, ), - stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd, nil), + stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd), stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, dirs.Tmp, nil, 0, mock.TxPool, nil, mock.BlockReader), stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, miningCancel, mock.BlockReader, latestBlockBuiltStore), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, @@ -512,7 +518,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.agg.SetProduceMod(mock.BlockReader.FreezingCfg().ProduceE3) mock.Sync = stagedsync.New( cfg.Sync, - stagedsync.DefaultStages(mock.Ctx, stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, mock.BlockReader, mock.Notifications, mock.Engine, mock.agg, false, false, nil, prune), stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, cfg.Sync, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.Notifications, nil), stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, stagedsync.MiningState{}, *mock.ChainConfig, nil /* heimdallClient */, mock.BlockReader, nil, nil, nil, recents, signatures, false, nil), stagedsync.StageBlockHashesCfg(mock.DB, mock.Dirs.Tmp, mock.ChainConfig, blockWriter), stagedsync.StageBodiesCfg(mock.DB, nil, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, blockWriter, nil), stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd, nil), stagedsync.StageExecuteBlocksCfg( + stagedsync.DefaultStages(mock.Ctx, stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, mock.BlockReader, mock.Notifications, mock.Engine, mock.agg, false, false, nil, prune), stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, cfg.Sync, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.Notifications), stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, stagedsync.MiningState{}, *mock.ChainConfig, nil, mock.BlockReader, nil, nil, recents, signatures, false, nil), stagedsync.StageBlockHashesCfg(mock.DB, mock.Dirs.Tmp, mock.ChainConfig, blockWriter), stagedsync.StageBodiesCfg(mock.DB, nil, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, blockWriter), stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd), stagedsync.StageExecuteBlocksCfg( mock.DB, prune, cfg.BatchSize, @@ -527,7 +533,6 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.sentriesClient.Hd, mock.gspec, ethconfig.Defaults.Sync, - mock.agg, nil, ), stagedsync.StageTxLookupCfg(mock.DB, prune, dirs.Tmp, mock.ChainConfig.Bor, mock.BlockReader), stagedsync.StageFinishCfg(mock.DB, dirs.Tmp, forkValidator), !withPosDownloader), stagedsync.DefaultUnwindOrder, @@ -548,7 +553,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK cfg.Sync, stagedsync.MiningStages(mock.Ctx, stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, nil, nil, dirs.Tmp, mock.BlockReader), - stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miner, *mock.ChainConfig, nil /*heimdallClient*/, mock.BlockReader, nil, nil, nil, recents, signatures, false, nil), + stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, miner, *mock.ChainConfig, nil, mock.BlockReader, nil, nil, recents, signatures, false, nil), stagedsync.StageExecuteBlocksCfg( mock.DB, prune, @@ -564,10 +569,9 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.sentriesClient.Hd, mock.gspec, ethconfig.Defaults.Sync, - mock.agg, nil, ), - stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd, nil), + stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd), stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, dirs.Tmp, nil, 0, mock.TxPool, nil, mock.BlockReader), stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, miningCancel, mock.BlockReader, latestBlockBuiltStore), ), diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 5cd9bf33e14..bcf6fe20539 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -51,8 +51,6 @@ import ( "github.com/erigontech/erigon/p2p/sentry" "github.com/erigontech/erigon/p2p/sentry/sentry_multi_client" "github.com/erigontech/erigon/polygon/bor" - "github.com/erigontech/erigon/polygon/bor/finality" - "github.com/erigontech/erigon/polygon/bor/finality/flags" "github.com/erigontech/erigon/polygon/heimdall" "github.com/erigontech/erigon/turbo/engineapi/engine_helpers" "github.com/erigontech/erigon/turbo/services" @@ -598,37 +596,14 @@ func NewDefaultStages(ctx context.Context, // Hence we run it in the test mode. runInTestMode := cfg.ImportMode - loopBreakCheck := NewLoopBreakCheck(cfg, heimdallClient) - - if heimdallClient != nil && flags.Milestone { - loopBreakCheck = func(int) bool { - return finality.IsMilestoneRewindPending() - } - } - - if cfg.Sync.LoopBlockLimit > 0 { - previousBreakCheck := loopBreakCheck - loopBreakCheck = func(loopCount int) bool { - if loopCount > int(cfg.Sync.LoopBlockLimit) { - return true - } - - if previousBreakCheck != nil { - return previousBreakCheck(loopCount) - } - - return false - } - } - return stagedsync.DefaultStages(ctx, stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, loopBreakCheck), - stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, loopBreakCheck, recents, signatures, cfg.WithHeimdallWaypointRecording, nil), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications), + stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, recents, signatures, cfg.WithHeimdallWaypointRecording, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageBodiesCfg(db, blobStore, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter, loopBreakCheck), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), - stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications.Accumulator, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, agg, SilkwormForExecutionStage(silkworm, cfg)), + stagedsync.StageBodiesCfg(db, blobStore, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), + stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications.Accumulator, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg)), stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader), stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), runInTestMode) } @@ -656,7 +631,6 @@ func NewPipelineStages(ctx context.Context, // During Import we don't want other services like header requests, body requests etc. to be running. // Hence we run it in the test mode. runInTestMode := cfg.ImportMode - loopBreakCheck := NewLoopBreakCheck(cfg, nil) var depositContract libcommon.Address if cfg.Genesis != nil { @@ -668,19 +642,19 @@ func NewPipelineStages(ctx context.Context, return stagedsync.PipelineStages(ctx, stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), - stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications.Accumulator, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, agg, SilkwormForExecutionStage(silkworm, cfg)), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), + stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications.Accumulator, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg)), stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader), stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), runInTestMode) } return stagedsync.UploaderPipelineStages(ctx, stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, loopBreakCheck), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), - stagedsync.StageBodiesCfg(db, blobStore, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter, loopBreakCheck), - stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications.Accumulator, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, agg, SilkwormForExecutionStage(silkworm, cfg)), stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader), stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), runInTestMode) + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), + stagedsync.StageBodiesCfg(db, blobStore, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter), + stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications.Accumulator, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg)), stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader), stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), runInTestMode) } func NewInMemoryExecution(ctx context.Context, db kv.RwDB, blobStore services.BlobStorage, cfg *ethconfig.Config, controlServer *sentry_multi_client.MultiClient, @@ -688,7 +662,7 @@ func NewInMemoryExecution(ctx context.Context, db kv.RwDB, blobStore services.Bl silkworm *silkworm.Silkworm, logger log.Logger) *stagedsync.Sync { return stagedsync.New( cfg.Sync, - stagedsync.StateStages(ctx, stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil, nil), stagedsync.StageBodiesCfg(db, blobStore, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, true, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, nil), stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications.Accumulator, cfg.StateStream, true, cfg.Dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, agg, SilkwormForExecutionStage(silkworm, cfg))), + stagedsync.StateStages(ctx, stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil), stagedsync.StageBodiesCfg(db, blobStore, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, true, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications.Accumulator, cfg.StateStream, true, cfg.Dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg))), stagedsync.StateUnwindOrder, nil, /* pruneOrder */ logger, @@ -715,7 +689,6 @@ func NewPolygonSyncStages( statusDataProvider *sentry.StatusDataProvider, stopNode func() error, ) []*stagedsync.Stage { - loopBreakCheck := NewLoopBreakCheck(config, heimdallClient) return stagedsync.PolygonSyncStages( ctx, stagedsync.StageSnapshotsCfg( @@ -747,18 +720,8 @@ func NewPolygonSyncStages( bor.GenesisContractStateReceiverABI(), config.LoopBlockLimit, ), - stagedsync.StageSendersCfg( - db, - chainConfig, - config.Sync, - false, /* badBlockHalt */ - config.Dirs.Tmp, - config.Prune, - blockReader, - nil, /* hd */ - loopBreakCheck, - ), - stagedsync.StageExecuteBlocksCfg(db, config.Prune, config.BatchSize, chainConfig, consensusEngine, &vm.Config{}, notifications.Accumulator, config.StateStream, false, config.Dirs, blockReader, nil, config.Genesis, config.Sync, agg, SilkwormForExecutionStage(silkworm, config)), + stagedsync.StageSendersCfg(db, chainConfig, config.Sync, false, config.Dirs.Tmp, config.Prune, blockReader, nil), + stagedsync.StageExecuteBlocksCfg(db, config.Prune, config.BatchSize, chainConfig, consensusEngine, &vm.Config{}, notifications.Accumulator, config.StateStream, false, config.Dirs, blockReader, nil, config.Genesis, config.Sync, SilkwormForExecutionStage(silkworm, config)), stagedsync.StageTxLookupCfg( db, config.Prune, @@ -773,22 +736,3 @@ func NewPolygonSyncStages( ), ) } - -func NewLoopBreakCheck(cfg *ethconfig.Config, heimdallClient heimdall.HeimdallClient) func(int) bool { - var loopBreakCheck func(int) bool - - if heimdallClient != nil && flags.Milestone { - loopBreakCheck = func(int) bool { - return finality.IsMilestoneRewindPending() - } - } - - previousBreakCheck := loopBreakCheck - return func(loopCount int) bool { - if previousBreakCheck != nil { - return previousBreakCheck(loopCount) - } - - return false - } -} diff --git a/turbo/transactions/call.go b/turbo/transactions/call.go index 7baeeff2437..ccfebaa001d 100644 --- a/turbo/transactions/call.go +++ b/turbo/transactions/call.go @@ -18,6 +18,7 @@ package transactions import ( "context" + "errors" "fmt" "time" @@ -89,7 +90,7 @@ func DoCall( var overflow bool baseFee, overflow = uint256.FromBig(header.BaseFee) if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") + return nil, errors.New("header.BaseFee uint256 overflow") } } msg, err := args.ToMessage(gasCap, baseFee) @@ -221,7 +222,7 @@ func NewReusableCaller( var overflow bool baseFee, overflow = uint256.FromBig(header.BaseFee) if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") + return nil, errors.New("header.BaseFee uint256 overflow") } } diff --git a/turbo/trie/debug.go b/turbo/trie/debug.go index 4d115a97e5d..ecc340338c6 100644 --- a/turbo/trie/debug.go +++ b/turbo/trie/debug.go @@ -23,6 +23,7 @@ package trie import ( "bytes" + "encoding/hex" "fmt" "io" @@ -44,12 +45,12 @@ func (n *fullNode) fstring(ind string) string { resp := fmt.Sprintf("full\n%s ", ind) for i, node := range &n.Children { if node == nil { - resp += fmt.Sprintf("%s: ", indices[i]) + resp += indices[i] + ": " } else { - resp += fmt.Sprintf("%s: %v", indices[i], node.fstring(ind+" ")) + resp += indices[i] + ": " + node.fstring(ind+" ") } } - return resp + fmt.Sprintf("\n%s] ", ind) + return resp + "\n" + ind + "]" } func (n *fullNode) print(w io.Writer) { fmt.Fprintf(w, "f(") @@ -113,9 +114,9 @@ func (an accountNode) fstring(ind string) string { encodedAccount := make([]byte, an.EncodingLengthForHashing()) an.EncodeForHashing(encodedAccount) if an.storage == nil { - return fmt.Sprintf("%x", encodedAccount) + return hex.EncodeToString(encodedAccount) } - return fmt.Sprintf("%x %v", encodedAccount, an.storage.fstring(ind+" ")) + return hex.EncodeToString(encodedAccount) + " " + an.storage.fstring(ind+" ") } func (an accountNode) print(w io.Writer) { diff --git a/turbo/trie/hashbuilder.go b/turbo/trie/hashbuilder.go index 0d0a738e92a..16217c17049 100644 --- a/turbo/trie/hashbuilder.go +++ b/turbo/trie/hashbuilder.go @@ -18,6 +18,7 @@ package trie import ( "bytes" + "errors" "fmt" "io" "math/bits" @@ -36,7 +37,7 @@ import ( const hashStackStride = length2.Hash + 1 // + 1 byte for RLP encoding -var EmptyCodeHash = crypto.Keccak256Hash(nil) +var EmptyCodeHash = crypto.Keccak256Hash(nil) //c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470 // HashBuilder implements the interface `structInfoReceiver` and opcodes that the structural information of the trie // is comprised of @@ -488,7 +489,7 @@ func (hb *HashBuilder) extensionHash(key []byte) error { fmt.Printf("extensionHash [%x]=>[%x]\nHash [%x]\n", key, capture, hb.hashStack[len(hb.hashStack)-hashStackStride:len(hb.hashStack)]) } if _, ok := hb.nodeStack[len(hb.nodeStack)-1].(*fullNode); ok { - return fmt.Errorf("extensionHash cannot be emitted when a node is on top of the stack") + return errors.New("extensionHash cannot be emitted when a node is on top of the stack") } return nil } @@ -670,7 +671,7 @@ func (hb *HashBuilder) emptyRoot() { func (hb *HashBuilder) RootHash() (libcommon.Hash, error) { if !hb.hasRoot() { - return libcommon.Hash{}, fmt.Errorf("no root in the tree") + return libcommon.Hash{}, errors.New("no root in the tree") } return hb.rootHash(), nil } diff --git a/turbo/trie/proof.go b/turbo/trie/proof.go index 38c26ed6b6d..12fb58b667f 100644 --- a/turbo/trie/proof.go +++ b/turbo/trie/proof.go @@ -18,6 +18,7 @@ package trie import ( "bytes" + "errors" "fmt" libcommon "github.com/erigontech/erigon-lib/common" @@ -127,7 +128,7 @@ func decodeRef(buf []byte) (node, []byte, error) { switch { case kind == rlp.List: if len(buf)-len(rest) >= length.Hash { - return nil, nil, fmt.Errorf("embedded nodes must be less than hash size") + return nil, nil, errors.New("embedded nodes must be less than hash size") } n, err := decodeNode(buf) if err != nil { @@ -192,7 +193,7 @@ func decodeShort(elems []byte) (*shortNode, error) { func decodeNode(encoded []byte) (node, error) { if len(encoded) == 0 { - return nil, fmt.Errorf("nodes must not be zero length") + return nil, errors.New("nodes must not be zero length") } elems, _, err := rlp.SplitList(encoded) if err != nil { @@ -240,7 +241,7 @@ func verifyProof(root libcommon.Hash, key []byte, proofs map[libcommon.Hash]node switch nt := node.(type) { case *fullNode: if len(key) == 0 { - return nil, fmt.Errorf("full nodes should not have values") + return nil, errors.New("full nodes should not have values") } node, key = nt.Children[key[0]], key[1:] if node == nil { @@ -307,13 +308,13 @@ func VerifyAccountProofByHash(stateRoot libcommon.Hash, accountKey libcommon.Has // A nil value proves the account does not exist. switch { case proof.Nonce != 0: - return fmt.Errorf("account is not in state, but has non-zero nonce") + return errors.New("account is not in state, but has non-zero nonce") case proof.Balance.ToInt().Sign() != 0: - return fmt.Errorf("account is not in state, but has balance") + return errors.New("account is not in state, but has balance") case proof.StorageHash != libcommon.Hash{}: - return fmt.Errorf("account is not in state, but has non-empty storage hash") + return errors.New("account is not in state, but has non-empty storage hash") case proof.CodeHash != libcommon.Hash{}: - return fmt.Errorf("account is not in state, but has non-empty code hash") + return errors.New("account is not in state, but has non-empty code hash") default: return nil } @@ -347,7 +348,7 @@ func VerifyStorageProof(storageRoot libcommon.Hash, proof accounts.StorProofResu func VerifyStorageProofByHash(storageRoot libcommon.Hash, keyHash libcommon.Hash, proof accounts.StorProofResult) error { if storageRoot == EmptyRoot || storageRoot == (libcommon.Hash{}) { if proof.Value.ToInt().Sign() != 0 { - return fmt.Errorf("empty storage root cannot have non-zero values") + return errors.New("empty storage root cannot have non-zero values") } // The spec here is a bit unclear. The yellow paper makes it clear that the // EmptyRoot hash is a special case where the trie is empty. Since the trie @@ -357,7 +358,7 @@ func VerifyStorageProofByHash(storageRoot libcommon.Hash, keyHash libcommon.Hash // pre-image of the EmptyRoot) should be included. This implementation // chooses to require the proof be empty. if len(proof.Proof) > 0 { - return fmt.Errorf("empty storage root should not have proof nodes") + return errors.New("empty storage root should not have proof nodes") } return nil } diff --git a/turbo/trie/retain_list.go b/turbo/trie/retain_list.go index 98087593a62..1bc58892915 100644 --- a/turbo/trie/retain_list.go +++ b/turbo/trie/retain_list.go @@ -22,6 +22,7 @@ package trie import ( "bytes" "encoding/binary" + "errors" "fmt" "math/big" "sort" @@ -151,7 +152,7 @@ func (pr *ProofRetainer) ProofResult() (*accounts.AccProofResult, error) { } if pr.acc.Initialised && result.StorageHash == (libcommon.Hash{}) { - return nil, fmt.Errorf("did not find storage root in proof elements") + return nil, errors.New("did not find storage root in proof elements") } result.StorageProof = make([]accounts.StorProofResult, len(pr.storageKeys)) diff --git a/turbo/trie/structural_test.go b/turbo/trie/structural_test.go index b50458bf234..4c1ff56c538 100644 --- a/turbo/trie/structural_test.go +++ b/turbo/trie/structural_test.go @@ -24,6 +24,7 @@ package trie import ( "bytes" "encoding/binary" + "errors" "fmt" "slices" "testing" @@ -550,7 +551,7 @@ func TestStorageOnly(t *testing.T) { require.Equal(t, fmt.Sprintf("%b", uint16(0b100000)), fmt.Sprintf("%b", hasTree)) require.NotNil(t, hashes) case 5: - require.NoError(t, fmt.Errorf("not expected")) + require.NoError(t, errors.New("not expected")) } return nil