diff --git a/mpt-witness-generator/.github/workflows/go.yml b/mpt-witness-generator/.github/workflows/go.yml new file mode 100644 index 0000000000..35860814a3 --- /dev/null +++ b/mpt-witness-generator/.github/workflows/go.yml @@ -0,0 +1,33 @@ +# This workflow will build a golang project +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go + +name: Go + +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + +jobs: + + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.20' + + - name: Format + run: go fmt ./... + + - name: Build + run: go build -v ./... + + - name: Test + env: + NO_GETH: true + run: go test -v ./... diff --git a/mpt-witness-generator/.gitignore b/mpt-witness-generator/.gitignore new file mode 100644 index 0000000000..2fc01ebc1c --- /dev/null +++ b/mpt-witness-generator/.gitignore @@ -0,0 +1,7 @@ +rust_call/target +.DS_Store +.vscode +mpt-witness-generator +mpt +generated_witnesses +rust_call/proof.json diff --git a/mpt-witness-generator/README.md b/mpt-witness-generator/README.md new file mode 100644 index 0000000000..064fede2a3 --- /dev/null +++ b/mpt-witness-generator/README.md @@ -0,0 +1,124 @@ +# Merkle Patricia Trie witness generator + +This project aims to prepare witness generator for Merkle Patricia Trie circuit which is part of +[zkevm-circuits](https://github.com/appliedzkp/zkevm-circuits). + +It is based on [geth](https://github.com/ethereum/go-ethereum). +It takes `eth_getProof` output and it transforms it into the MPT circuit witness. + +MPT circuit checks that the modification of the trie state happened correctly. + +Let us assume there are two proofs (as returned by `eth getProof`): + +- A proof that there exists value `val1` at key `key1` for address `addr` in the state trie with root `root1`. +- A proof that there exists value `val2` at key `key1` for address `addr` in the state trie with root `root2`. + +The circuit checks the transition from `val1` to `val2` at `key1` that led to the change +of trie root from `root1` to `root2`. + +For this reason, there are two parallel proofs for each trie modification. +There is `S` (as `State`) proof which presents the state of the trie +before the modification. And there is `C` (as `Change`) proof which presents the state +of the trie after modification. + +An example of `eth_getProof` output is given below: + +``` +[248 81 128 128 128 160 32 34 39 131 73 65 47 37 211 142 206 231 172 16 11 203 33 107 30 7 213 226 2 174 55 216 4 117 220 10 186 68 128 128 128 128 128 128 128 160 55 235 85 86 230 197 53 159 28 141 120 87 82 57 4 132 185 12 24 158 142 210 106 188 12 87 179 231 52 16 126 229 128 128 128 128 128] +[226 160 59 138 106 70 105 186 37 13 38 205 122 69 158 202 157 33 95 131 7 227 58 235 229 3 121 188 90 54 23 236 52 68 1] +``` + +The first element of the proof is a branch containing two children. The second element of the proof is +a storage leaf contained in the branch (from the first proof element). + +To simplify the MPT circuit, the `eth_getProof` elements are not directly used as rows. +Instead, the rows are prepared as: + +``` +[1 0 1 0 248 81 0 248 81 0 11 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 160 32 34 39 131 73 65 47 37 211 142 206 231 172 16 11 203 33 107 30 7 213 226 2 174 55 216 4 117 220 10 186 68 0 160 32 34 39 131 73 65 47 37 211 142 206 231 172 16 11 203 33 107 30 7 213 226 2 174 55 216 4 117 220 10 186 68 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 160 55 235 85 86 230 197 53 159 28 141 120 87 82 57 4 132 185 12 24 158 142 210 106 188 12 87 179 231 52 16 126 229 0 160 88 197 127 237 244 146 28 57 104 36 96 69 159 84 254 170 28 196 41 183 253 107 213 32 170 141 111 191 30 100 117 55 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1] +[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16] +[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 17] +[226 160 59 138 106 70 105 186 37 13 38 205 122 69 158 202 157 33 95 131 7 227 58 235 229 3 121 188 90 54 23 236 52 68 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2] +[1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 13] +[226 160 59 138 106 70 105 186 37 13 38 205 122 69 158 202 157 33 95 131 7 227 58 235 229 3 121 188 90 54 23 236 52 68 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3] +[17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14] +[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 15] +[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 19] +``` + +... + + + +## Generate witnesses + +To generate witnesses for MPT circuit, go into witness folder and execute + +``` +go test gen_witness_from_infura_blockchain_test.go prepare_witness.go leaf.go extension_node.go modified_extension_node.go nodes.go test_tools.go branch.go util.go +``` + +to generate the tests that use Infura blockchain. + +To generate the tests that use a local blockchain you need a local `geth`. You would +need to run something like: +``` +geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + +``` +The local `geth` is used to generate some tests that have a small number of accounts so that +these accounts appear in the first or second level of the trie. You might need to remove the +database if you already have some accounts: + +``` +geth removedb +``` + +And to generate the tests: + +``` +go test gen_witness_from_local_blockchain_test.go prepare_witness.go leaf.go extension_node.go modified_extension_node.go nodes.go test_tools.go branch.go util.go +``` + +The witness files will appear in generated_witnesses folder. + +## Calling from Rust + +Build: + +``` +go build -buildmode=c-archive -o libmpt.a witness_gen_wrapper.go +``` + +Copy libmpt.a and libmpt.h to rust_call/build: + +``` +mv libmpt.* rust_call/build +``` + +Note: to avoid the problem described [](https://github.com/golang/go/issues/42459), +the following has been set in rust_call/.cargo/config: + +``` +[build] +rustflags = ["-C", "link-args=-framework CoreFoundation -framework Security"] +``` diff --git a/mpt-witness-generator/go.mod b/mpt-witness-generator/go.mod new file mode 100644 index 0000000000..e7a910f810 --- /dev/null +++ b/mpt-witness-generator/go.mod @@ -0,0 +1,10 @@ +module github.com/privacy-scaling-explorations/mpt-witness-generator + +go 1.16 + +require ( + github.com/VictoriaMetrics/fastcache v1.6.0 // indirect + github.com/ethereum/go-ethereum v1.10.8 // indirect + github.com/holiman/uint256 v1.2.0 // indirect + golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 // indirect +) diff --git a/mpt-witness-generator/go.sum b/mpt-witness-generator/go.sum new file mode 100644 index 0000000000..5a8c30548f --- /dev/null +++ b/mpt-witness-generator/go.sum @@ -0,0 +1,578 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= +github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= +github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8= +github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= +github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= +github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= +github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0= +github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= +github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ethereum/go-ethereum v1.10.8 h1:0UP5WUR8hh46ffbjJV7PK499+uGEyasRIfffS0vy06o= +github.com/ethereum/go-ethereum v1.10.8/go.mod h1:pJNuIUYfX5+JKzSD/BTdNsvJSZ1TJqmz0dVyXMAbf6M= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= +github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs= +github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= +github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/mpt-witness-generator/oracle/apitypes.go b/mpt-witness-generator/oracle/apitypes.go new file mode 100644 index 0000000000..7459fd9b05 --- /dev/null +++ b/mpt-witness-generator/oracle/apitypes.go @@ -0,0 +1,153 @@ +package oracle + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +// SendTxArgs represents the arguments to submit a transaction +// This struct is identical to ethapi.TransactionArgs, except for the usage of +// common.MixedcaseAddress in From and To +type SendTxArgs struct { + From common.MixedcaseAddress `json:"from"` + To *common.MixedcaseAddress `json:"to"` + Gas hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` + MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` + Value hexutil.Big `json:"value"` + Nonce hexutil.Uint64 `json:"nonce"` + + // We accept "data" and "input" for backwards-compatibility reasons. + // "input" is the newer name and should be preferred by clients. + // Issue detail: https://github.com/ethereum/go-ethereum/issues/15628 + Data *hexutil.Bytes `json:"data"` + Input *hexutil.Bytes `json:"input,omitempty"` + + // For non-legacy transactions + AccessList *types.AccessList `json:"accessList,omitempty"` + ChainID *hexutil.Big `json:"chainId,omitempty"` + + // Signature values + V *hexutil.Big `json:"v" gencodec:"required"` + R *hexutil.Big `json:"r" gencodec:"required"` + S *hexutil.Big `json:"s" gencodec:"required"` +} + +type Header struct { + ParentHash *common.Hash `json:"parentHash" gencodec:"required"` + UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase *common.Address `json:"miner" gencodec:"required"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom *types.Bloom `json:"logsBloom" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + Number *hexutil.Big `json:"number" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` + // transactions + Transactions []SendTxArgs `json:"transactions"` +} + +func (dec *Header) ToHeader() types.Header { + var h types.Header + h.ParentHash = *dec.ParentHash + h.UncleHash = *dec.UncleHash + h.Coinbase = *dec.Coinbase + h.Root = *dec.Root + h.TxHash = *dec.TxHash + h.ReceiptHash = *dec.ReceiptHash + h.Bloom = *dec.Bloom + h.Difficulty = (*big.Int)(dec.Difficulty) + h.Number = (*big.Int)(dec.Number) + h.GasLimit = uint64(*dec.GasLimit) + h.GasUsed = uint64(*dec.GasUsed) + h.Time = uint64(*dec.Time) + h.Extra = *dec.Extra + if dec.MixDigest != nil { + h.MixDigest = *dec.MixDigest + } + if dec.Nonce != nil { + h.Nonce = *dec.Nonce + } + if dec.BaseFee != nil { + h.BaseFee = (*big.Int)(dec.BaseFee) + } + return h +} + +// ToTransaction converts the arguments to a transaction. +func (args *SendTxArgs) ToTransaction() *types.Transaction { + // Add the To-field, if specified + var to *common.Address + if args.To != nil { + dstAddr := args.To.Address() + to = &dstAddr + } + + var input []byte + if args.Input != nil { + input = *args.Input + } else if args.Data != nil { + input = *args.Data + } + + var data types.TxData + switch { + case args.MaxFeePerGas != nil: + al := types.AccessList{} + if args.AccessList != nil { + al = *args.AccessList + } + data = &types.DynamicFeeTx{ + To: to, + ChainID: (*big.Int)(args.ChainID), + Nonce: uint64(args.Nonce), + Gas: uint64(args.Gas), + GasFeeCap: (*big.Int)(args.MaxFeePerGas), + GasTipCap: (*big.Int)(args.MaxPriorityFeePerGas), + Value: (*big.Int)(&args.Value), + Data: input, + AccessList: al, + V: (*big.Int)(args.V), + R: (*big.Int)(args.R), + S: (*big.Int)(args.S), + } + case args.AccessList != nil: + data = &types.AccessListTx{ + To: to, + ChainID: (*big.Int)(args.ChainID), + Nonce: uint64(args.Nonce), + Gas: uint64(args.Gas), + GasPrice: (*big.Int)(args.GasPrice), + Value: (*big.Int)(&args.Value), + Data: input, + AccessList: *args.AccessList, + V: (*big.Int)(args.V), + R: (*big.Int)(args.R), + S: (*big.Int)(args.S), + } + default: + data = &types.LegacyTx{ + To: to, + Nonce: uint64(args.Nonce), + Gas: uint64(args.Gas), + GasPrice: (*big.Int)(args.GasPrice), + Value: (*big.Int)(&args.Value), + Data: input, + V: (*big.Int)(args.V), + R: (*big.Int)(args.R), + S: (*big.Int)(args.S), + } + } + return types.NewTx(data) +} diff --git a/mpt-witness-generator/oracle/prefetch.go b/mpt-witness-generator/oracle/prefetch.go new file mode 100644 index 0000000000..cd2a2f9676 --- /dev/null +++ b/mpt-witness-generator/oracle/prefetch.go @@ -0,0 +1,335 @@ +package oracle + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "math/big" + "net/http" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" +) + +type jsonreq struct { + Jsonrpc string `json:"jsonrpc"` + Method string `json:"method"` + Params []interface{} `json:"params"` + Id uint64 `json:"id"` +} + +type jsonresp struct { + Jsonrpc string `json:"jsonrpc"` + Id uint64 `json:"id"` + Result AccountResult `json:"result"` +} + +type jsonresps struct { + Jsonrpc string `json:"jsonrpc"` + Id uint64 `json:"id"` + Result string `json:"result"` +} + +type jsonrespi struct { + Jsonrpc string `json:"jsonrpc"` + Id uint64 `json:"id"` + Result hexutil.Uint64 `json:"result"` +} + +type jsonrespt struct { + Jsonrpc string `json:"jsonrpc"` + Id uint64 `json:"id"` + Result Header `json:"result"` +} + +// Result structs for GetProof +type AccountResult struct { + Address common.Address `json:"address"` + AccountProof []string `json:"accountProof"` + Balance *hexutil.Big `json:"balance"` + CodeHash common.Hash `json:"codeHash"` + Nonce hexutil.Uint64 `json:"nonce"` + StorageHash common.Hash `json:"storageHash"` + StorageProof []StorageResult `json:"storageProof"` +} + +type StorageResult struct { + Key string `json:"key"` + Value *hexutil.Big `json:"value"` + Proof []string `json:"proof"` +} + +// Account is the Ethereum consensus representation of accounts. +// These objects are stored in the main account trie. +type Account struct { + Nonce uint64 + Balance *big.Int + Root common.Hash // merkle root of the storage trie + CodeHash []byte +} + +var NodeUrl = "https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161" +var RemoteUrl = "https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161" +var LocalUrl = "http://localhost:8545" + +// For generating special tests for MPT circuit: +var PreventHashingInSecureTrie = false + +func toFilename(key string) string { + return fmt.Sprintf("/tmp/eth/json_%s", key) +} + +func cacheRead(key string) []byte { + dat, err := ioutil.ReadFile(toFilename(key)) + if err == nil { + return dat + } + panic("cache missing") +} + +func cacheExists(key string) bool { + _, err := os.Stat(toFilename(key)) + return err == nil +} + +func cacheWrite(key string, value []byte) { + ioutil.WriteFile(toFilename(key), value, 0644) +} + +func getAPI(jsonData []byte) io.Reader { + key := hexutil.Encode(crypto.Keccak256(jsonData)) + /* Note: switching between two testnets (to prepare tests with account in the first level) + if cacheExists(key) { + return bytes.NewReader(cacheRead(key)) + } + */ + resp, _ := http.Post(NodeUrl, "application/json", bytes.NewBuffer(jsonData)) + defer resp.Body.Close() + ret, _ := ioutil.ReadAll(resp.Body) + cacheWrite(key, ret) + return bytes.NewReader(ret) +} + +var unhashMap = make(map[common.Hash]common.Address) + +func unhash(addrHash common.Hash) common.Address { + return unhashMap[addrHash] +} + +var cached = make(map[string]bool) + +func PrefetchStorage(blockNumber *big.Int, addr common.Address, skey common.Hash, postProcess func(map[common.Hash][]byte)) []string { + key := fmt.Sprintf("proof_%d_%s_%s", blockNumber, addr, skey) + // TODO: should return proof anyway + if cached[key] { + return nil + } + cached[key] = true + + ap := getProofAccount(blockNumber, addr, skey, true) + //fmt.Println("PrefetchStorage", blockNumber, addr, skey, len(ap)) + newPreimages := make(map[common.Hash][]byte) + for _, s := range ap { + ret, _ := hex.DecodeString(s[2:]) + hash := crypto.Keccak256Hash(ret) + //fmt.Println(" ", i, hash) + newPreimages[hash] = ret + } + + if postProcess != nil { + postProcess(newPreimages) + } + + for hash, val := range newPreimages { + preimages[hash] = val + } + + return ap +} + +func PrefetchAccount(blockNumber *big.Int, addr common.Address, postProcess func(map[common.Hash][]byte)) []string { + key := fmt.Sprintf("proof_%d_%s", blockNumber, addr) + if cached[key] { + return nil + } + cached[key] = true + + ap := getProofAccount(blockNumber, addr, common.Hash{}, false) + newPreimages := make(map[common.Hash][]byte) + for _, s := range ap { + ret, _ := hex.DecodeString(s[2:]) + hash := crypto.Keccak256Hash(ret) + newPreimages[hash] = ret + + /* + // just for debugging: + elems, _, err := rlp.SplitList(ret) + if err != nil { + fmt.Println("decode error", err) + } + switch c, _ := rlp.CountValues(elems); c { + case 2: + fmt.Println("2") + case 17: + fmt.Println("17") + default: + fmt.Println("invalid number of list elements") + } + */ + } + + if postProcess != nil { + postProcess(newPreimages) + } + + for hash, val := range newPreimages { + preimages[hash] = val + } + + return ap +} + +func PrefetchCode(blockNumber *big.Int, addrHash common.Hash) { + key := fmt.Sprintf("code_%d_%s", blockNumber, addrHash) + if cached[key] { + return + } + cached[key] = true + ret := getProvedCodeBytes(blockNumber, addrHash) + hash := crypto.Keccak256Hash(ret) + preimages[hash] = ret +} + +var inputs [7]common.Hash + +func Input(index int) common.Hash { + if index < 0 || index > 5 { + panic("bad input index") + } + return inputs[index] +} + +func Output(output common.Hash) { + if output == inputs[6] { + fmt.Println("good transition") + } else { + fmt.Println(output, "!=", inputs[5]) + panic("BAD transition :((") + } +} + +func check(err error) { + if err != nil { + log.Fatal(err) + } +} + +func PrefetchBlock(blockNumber *big.Int, startBlock bool, hasher types.TrieHasher) types.Header { + r := jsonreq{Jsonrpc: "2.0", Method: "eth_getBlockByNumber", Id: 1} + r.Params = make([]interface{}, 2) + r.Params[0] = fmt.Sprintf("0x%x", blockNumber.Int64()) + r.Params[1] = true + jsonData, _ := json.Marshal(r) + + /*dat, _ := ioutil.ReadAll(getAPI(jsonData)) + fmt.Println(string(dat))*/ + + jr := jsonrespt{} + check(json.NewDecoder(getAPI(jsonData)).Decode(&jr)) + //fmt.Println(jr.Result) + // blockHeader := types.Header(jr.Result) + blockHeader := jr.Result.ToHeader() + + // put in the start block header + if startBlock { + blockHeaderRlp, _ := rlp.EncodeToBytes(blockHeader) + hash := crypto.Keccak256Hash(blockHeaderRlp) + preimages[hash] = blockHeaderRlp + inputs[0] = hash + return blockHeader + } + + // second block + if blockHeader.ParentHash != Input(0) { + fmt.Println(blockHeader.ParentHash, Input(0)) + panic("block transition isn't correct") + } + inputs[1] = blockHeader.TxHash + inputs[2] = blockHeader.Coinbase.Hash() + inputs[3] = blockHeader.UncleHash + inputs[4] = common.BigToHash(big.NewInt(int64(blockHeader.GasLimit))) + inputs[5] = common.BigToHash(big.NewInt(int64(blockHeader.Time))) + + // secret input + inputs[6] = blockHeader.Root + + // save the inputs + saveinput := make([]byte, 0) + for i := 0; i < len(inputs); i++ { + saveinput = append(saveinput, inputs[i].Bytes()[:]...) + } + key := fmt.Sprintf("/tmp/eth/%d", blockNumber.Uint64()-1) + ioutil.WriteFile(key, saveinput, 0644) + + // save the txs + txs := make([]*types.Transaction, len(jr.Result.Transactions)) + for i := 0; i < len(jr.Result.Transactions); i++ { + txs[i] = jr.Result.Transactions[i].ToTransaction() + } + fmt.Println(txs[0].To()) + testTxHash := types.DeriveSha(types.Transactions(txs), hasher) + if testTxHash != blockHeader.TxHash { + fmt.Println(testTxHash, "!=", blockHeader.TxHash) + panic("tx hash derived wrong") + } + + return blockHeader +} + +func getProofAccount(blockNumber *big.Int, addr common.Address, skey common.Hash, storage bool) []string { + addrHash := crypto.Keccak256Hash(addr[:]) + unhashMap[addrHash] = addr + + r := jsonreq{Jsonrpc: "2.0", Method: "eth_getProof", Id: 1} + r.Params = make([]interface{}, 3) + r.Params[0] = addr + r.Params[1] = [1]common.Hash{skey} + r.Params[2] = fmt.Sprintf("0x%x", blockNumber.Int64()) + jsonData, _ := json.Marshal(r) + jr := jsonresp{} + json.NewDecoder(getAPI(jsonData)).Decode(&jr) + + if storage { + return jr.Result.StorageProof[0].Proof + } else { + return jr.Result.AccountProof + } +} + +func getProvedCodeBytes(blockNumber *big.Int, addrHash common.Hash) []byte { + addr := unhash(addrHash) + + r := jsonreq{Jsonrpc: "2.0", Method: "eth_getCode", Id: 1} + r.Params = make([]interface{}, 2) + r.Params[0] = addr + r.Params[1] = fmt.Sprintf("0x%x", blockNumber.Int64()) + jsonData, _ := json.Marshal(r) + jr := jsonresps{} + json.NewDecoder(getAPI(jsonData)).Decode(&jr) + + //fmt.Println(jr.Result) + + // curl -X POST --data '{"jsonrpc":"2.0","method":"eth_getCode","params":["0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", "0x2"],"id":1}' + + ret, _ := hex.DecodeString(jr.Result[2:]) + //fmt.Println(ret) + return ret +} diff --git a/mpt-witness-generator/oracle/preimage.go b/mpt-witness-generator/oracle/preimage.go new file mode 100644 index 0000000000..93dd8323cb --- /dev/null +++ b/mpt-witness-generator/oracle/preimage.go @@ -0,0 +1,49 @@ +package oracle + +import ( + "fmt" + "io/ioutil" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +var preimages = make(map[common.Hash][]byte) + +func Preimage(hash common.Hash) []byte { + val, ok := preimages[hash] + key := fmt.Sprintf("/tmp/eth/%s", hash) + ioutil.WriteFile(key, val, 0644) + if !ok { + fmt.Println("can't find preimage", hash) + } + comphash := crypto.Keccak256Hash(val) + if hash != comphash { + panic("corruption in hash " + hash.String()) + } + return val +} + +// TODO: Maybe we will want to have a seperate preimages for next block's preimages? +func Preimages() map[common.Hash][]byte { + return preimages +} + +// KeyValueWriter wraps the Put method of a backing data store. +type PreimageKeyValueWriter struct{} + +// Put inserts the given value into the key-value data store. +func (kw PreimageKeyValueWriter) Put(key []byte, value []byte) error { + hash := crypto.Keccak256Hash(value) + if hash != common.BytesToHash(key) { + panic("bad preimage value write") + } + preimages[hash] = common.CopyBytes(value) + // fmt.Println("tx preimage", hash, common.Bytes2Hex(value)) + return nil +} + +// Delete removes the key from the key-value data store. +func (kw PreimageKeyValueWriter) Delete(key []byte) error { + return nil +} diff --git a/mpt-witness-generator/rust_call/.cargo/config b/mpt-witness-generator/rust_call/.cargo/config new file mode 100644 index 0000000000..e90e761d30 --- /dev/null +++ b/mpt-witness-generator/rust_call/.cargo/config @@ -0,0 +1,2 @@ +[build] +rustflags = ["-C", "link-args=-framework CoreFoundation -framework Security"] diff --git a/mpt-witness-generator/rust_call/Cargo.lock b/mpt-witness-generator/rust_call/Cargo.lock new file mode 100644 index 0000000000..2f4d2570f8 --- /dev/null +++ b/mpt-witness-generator/rust_call/Cargo.lock @@ -0,0 +1,89 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + +[[package]] +name = "proc-macro2" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rust_call" +version = "0.1.0" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "ryu" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" + +[[package]] +name = "serde" +version = "1.0.136" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.136" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "syn" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" diff --git a/mpt-witness-generator/rust_call/Cargo.toml b/mpt-witness-generator/rust_call/Cargo.toml new file mode 100644 index 0000000000..5197fe66aa --- /dev/null +++ b/mpt-witness-generator/rust_call/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "rust_call" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" \ No newline at end of file diff --git a/mpt-witness-generator/rust_call/build.rs b/mpt-witness-generator/rust_call/build.rs new file mode 100644 index 0000000000..978a982142 --- /dev/null +++ b/mpt-witness-generator/rust_call/build.rs @@ -0,0 +1,8 @@ +fn main() { + let path = "./build"; + let lib = "mpt"; + + println!("cargo:rustc-link-search=native={}", path); + println!("cargo:rustc-link-lib=static={}", lib); +} + diff --git a/mpt-witness-generator/rust_call/build/libmpt.a b/mpt-witness-generator/rust_call/build/libmpt.a new file mode 100644 index 0000000000..db38863154 Binary files /dev/null and b/mpt-witness-generator/rust_call/build/libmpt.a differ diff --git a/mpt-witness-generator/rust_call/build/libmpt.h b/mpt-witness-generator/rust_call/build/libmpt.h new file mode 100644 index 0000000000..5978e80bbb --- /dev/null +++ b/mpt-witness-generator/rust_call/build/libmpt.h @@ -0,0 +1,81 @@ +/* Code generated by cmd/cgo; DO NOT EDIT. */ + +/* package command-line-arguments */ + + +#line 1 "cgo-builtin-export-prolog" + +#include + +#ifndef GO_CGO_EXPORT_PROLOGUE_H +#define GO_CGO_EXPORT_PROLOGUE_H + +#ifndef GO_CGO_GOSTRING_TYPEDEF +typedef struct { const char *p; ptrdiff_t n; } _GoString_; +#endif + +#endif + +/* Start of preamble from import "C" comments. */ + + + + +/* End of preamble from import "C" comments. */ + + +/* Start of boilerplate cgo prologue. */ +#line 1 "cgo-gcc-export-header-prolog" + +#ifndef GO_CGO_PROLOGUE_H +#define GO_CGO_PROLOGUE_H + +typedef signed char GoInt8; +typedef unsigned char GoUint8; +typedef short GoInt16; +typedef unsigned short GoUint16; +typedef int GoInt32; +typedef unsigned int GoUint32; +typedef long long GoInt64; +typedef unsigned long long GoUint64; +typedef GoInt64 GoInt; +typedef GoUint64 GoUint; +typedef size_t GoUintptr; +typedef float GoFloat32; +typedef double GoFloat64; +#ifdef _MSC_VER +#include +typedef _Fcomplex GoComplex64; +typedef _Dcomplex GoComplex128; +#else +typedef float _Complex GoComplex64; +typedef double _Complex GoComplex128; +#endif + +/* + static assertion to make sure the file is being used on architecture + at least with matching size of GoInt. +*/ +typedef char _check_for_64_bit_pointer_matching_GoInt[sizeof(void*)==64/8 ? 1:-1]; + +#ifndef GO_CGO_GOSTRING_TYPEDEF +typedef _GoString_ GoString; +#endif +typedef void *GoMap; +typedef void *GoChan; +typedef struct { void *t; void *v; } GoInterface; +typedef struct { void *data; GoInt len; GoInt cap; } GoSlice; + +#endif + +/* End of boilerplate cgo prologue. */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern char* GetWitness(char* proofConf); + +#ifdef __cplusplus +} +#endif diff --git a/mpt-witness-generator/rust_call/src/main.rs b/mpt-witness-generator/rust_call/src/main.rs new file mode 100644 index 0000000000..7144e39003 --- /dev/null +++ b/mpt-witness-generator/rust_call/src/main.rs @@ -0,0 +1,30 @@ +// use serde_json::{json, Value}; +use std::ffi::{CStr, CString}; +use std::os::raw::c_char; +use std::fs::File; +use std::io::Write; + +extern "C" { + fn GetWitness(str: *const c_char) -> *const c_char; +} + +fn main() { + let data = r#" + { + "NodeUrl": "https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161", + "BlockNum": 14359865, + "Addr": "0x4E5B2e1dc63F6b91cb6Cd759936495434C7e972F", + "Keys": ["0x12", "0x21"], + "Values": ["0x1123e2", "0xa21"] + }"#; + + let c_config = CString::new(data).expect("invalid config"); + + let result = unsafe { GetWitness(c_config.as_ptr()) }; + let c_str = unsafe { CStr::from_ptr(result) }; + let string = c_str.to_str().expect("Error translating from library"); + println!("{:?}", string); + + let mut f = File::create("proof.json").expect("Unable to create file"); + f.write_all(string.as_bytes()).expect("Unable to write data"); +} diff --git a/mpt-witness-generator/state/access_list.go b/mpt-witness-generator/state/access_list.go new file mode 100644 index 0000000000..4194691345 --- /dev/null +++ b/mpt-witness-generator/state/access_list.go @@ -0,0 +1,136 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "github.com/ethereum/go-ethereum/common" +) + +type accessList struct { + addresses map[common.Address]int + slots []map[common.Hash]struct{} +} + +// ContainsAddress returns true if the address is in the access list. +func (al *accessList) ContainsAddress(address common.Address) bool { + _, ok := al.addresses[address] + return ok +} + +// Contains checks if a slot within an account is present in the access list, returning +// separate flags for the presence of the account and the slot respectively. +func (al *accessList) Contains(address common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { + idx, ok := al.addresses[address] + if !ok { + // no such address (and hence zero slots) + return false, false + } + if idx == -1 { + // address yes, but no slots + return true, false + } + _, slotPresent = al.slots[idx][slot] + return true, slotPresent +} + +// newAccessList creates a new accessList. +func newAccessList() *accessList { + return &accessList{ + addresses: make(map[common.Address]int), + } +} + +// Copy creates an independent copy of an accessList. +func (a *accessList) Copy() *accessList { + cp := newAccessList() + for k, v := range a.addresses { + cp.addresses[k] = v + } + cp.slots = make([]map[common.Hash]struct{}, len(a.slots)) + for i, slotMap := range a.slots { + newSlotmap := make(map[common.Hash]struct{}, len(slotMap)) + for k := range slotMap { + newSlotmap[k] = struct{}{} + } + cp.slots[i] = newSlotmap + } + return cp +} + +// AddAddress adds an address to the access list, and returns 'true' if the operation +// caused a change (addr was not previously in the list). +func (al *accessList) AddAddress(address common.Address) bool { + if _, present := al.addresses[address]; present { + return false + } + al.addresses[address] = -1 + return true +} + +// AddSlot adds the specified (addr, slot) combo to the access list. +// Return values are: +// - address added +// - slot added +// For any 'true' value returned, a corresponding journal entry must be made. +func (al *accessList) AddSlot(address common.Address, slot common.Hash) (addrChange bool, slotChange bool) { + idx, addrPresent := al.addresses[address] + if !addrPresent || idx == -1 { + // Address not present, or addr present but no slots there + al.addresses[address] = len(al.slots) + slotmap := map[common.Hash]struct{}{slot: {}} + al.slots = append(al.slots, slotmap) + return !addrPresent, true + } + // There is already an (address,slot) mapping + slotmap := al.slots[idx] + if _, ok := slotmap[slot]; !ok { + slotmap[slot] = struct{}{} + // Journal add slot change + return false, true + } + // No changes required + return false, false +} + +// DeleteSlot removes an (address, slot)-tuple from the access list. +// This operation needs to be performed in the same order as the addition happened. +// This method is meant to be used by the journal, which maintains ordering of +// operations. +func (al *accessList) DeleteSlot(address common.Address, slot common.Hash) { + idx, addrOk := al.addresses[address] + // There are two ways this can fail + if !addrOk { + panic("reverting slot change, address not present in list") + } + slotmap := al.slots[idx] + delete(slotmap, slot) + // If that was the last (first) slot, remove it + // Since additions and rollbacks are always performed in order, + // we can delete the item without worrying about screwing up later indices + if len(slotmap) == 0 { + al.slots = al.slots[:idx] + al.addresses[address] = -1 + } +} + +// DeleteAddress removes an address from the access list. This operation +// needs to be performed in the same order as the addition happened. +// This method is meant to be used by the journal, which maintains ordering of +// operations. +func (al *accessList) DeleteAddress(address common.Address) { + delete(al.addresses, address) +} diff --git a/mpt-witness-generator/state/database.go b/mpt-witness-generator/state/database.go new file mode 100644 index 0000000000..89c86df0c8 --- /dev/null +++ b/mpt-witness-generator/state/database.go @@ -0,0 +1,126 @@ +package state + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/privacy-scaling-explorations/mpt-witness-generator/oracle" + "github.com/privacy-scaling-explorations/mpt-witness-generator/trie" +) + +// TODO: add oracle calls here +// wrapper for the oracle + +type Database struct { + db *trie.Database + BlockNumber *big.Int + StateRoot common.Hash +} + +func NewDatabase(header types.Header) Database { + //triedb := trie.Database{BlockNumber: header.Number, Root: header.Root} + //triedb.Preseed() + triedb := trie.NewDatabase(header) + return Database{db: &triedb, BlockNumber: header.Number, StateRoot: header.Root} +} + +// ContractCode retrieves a particular contract's code. +func (db *Database) ContractCode(addrHash common.Hash, codeHash common.Hash) ([]byte, error) { + oracle.PrefetchCode(db.BlockNumber, addrHash) + code := oracle.Preimage(codeHash) + return code, nil +} + +// ContractCodeSize retrieves a particular contracts code's size. +func (db *Database) ContractCodeSize(addrHash common.Hash, codeHash common.Hash) (int, error) { + oracle.PrefetchCode(db.BlockNumber, addrHash) + code := oracle.Preimage(codeHash) + return len(code), nil +} + +func (db *Database) CopyTrie(t Trie) Trie { + // panic("don't copy tries") // <- from cannon + switch t := t.(type) { + case *trie.SecureTrie: + return t.Copy() + default: + panic(fmt.Errorf("unknown trie type %T", t)) + } +} + +// OpenTrie opens the main account trie at a specific root hash. +func (db *Database) OpenTrie(root common.Hash) (Trie, error) { + tr, err := trie.NewSecure(root, db.db) + if err != nil { + return nil, err + } + return tr, nil +} + +// OpenStorageTrie opens the storage trie of an account. +func (db *Database) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) { + //return SimpleTrie{db.BlockNumber, root, true, addrHash}, nil + tr, err := trie.NewSecure(root, db.db) + if err != nil { + return nil, err + } + return tr, nil +} + +type Trie interface { + // TryGet returns the value for key stored in the trie. The value bytes must + // not be modified by the caller. If a node was not found in the database, a + // trie.MissingNodeError is returned. + TryGet(key []byte) ([]byte, error) + + // TryUpdate associates key with value in the trie. If value has length zero, any + // existing value is deleted from the trie. The value bytes must not be modified + // by the caller while they are stored in the trie. If a node was not found in the + // database, a trie.MissingNodeError is returned. + TryUpdate(key, value []byte) error + + TryUpdateAlwaysHash(key, value []byte) error + + // TryDelete removes any existing value for key from the trie. If a node was not + // found in the database, a trie.MissingNodeError is returned. + TryDelete(key []byte) error + + // Hash returns the root hash of the trie. It does not write to the database and + // can be used even if the trie doesn't have one. + Hash() common.Hash + + // Commit writes all nodes to the trie's memory database, tracking the internal + // and external (for account tries) references. + Commit(onleaf trie.LeafCallback) (common.Hash, error) + + Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) ([]byte, [][]byte, bool, error) + + GetNodeByNibbles(key []byte) ([]byte, error) + + GetRoot() trie.Node +} + +// stubbed: we don't prefetch + +type triePrefetcher struct { +} + +func (p *triePrefetcher) prefetch(root common.Hash, keys [][]byte) { +} + +func (p *triePrefetcher) used(root common.Hash, used [][]byte) { +} + +func (p *triePrefetcher) close() { +} + +func (p *triePrefetcher) copy() *triePrefetcher { + return p +} + +func (p *triePrefetcher) trie(root common.Hash) Trie { + return nil +} diff --git a/mpt-witness-generator/state/journal.go b/mpt-witness-generator/state/journal.go new file mode 100644 index 0000000000..2070f30875 --- /dev/null +++ b/mpt-witness-generator/state/journal.go @@ -0,0 +1,269 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +// journalEntry is a modification entry in the state change journal that can be +// reverted on demand. +type journalEntry interface { + // revert undoes the changes introduced by this journal entry. + revert(*StateDB) + + // dirtied returns the Ethereum address modified by this journal entry. + dirtied() *common.Address +} + +// journal contains the list of state modifications applied since the last state +// commit. These are tracked to be able to be reverted in case of an execution +// exception or revertal request. +type journal struct { + entries []journalEntry // Current changes tracked by the journal + dirties map[common.Address]int // Dirty accounts and the number of changes +} + +// newJournal create a new initialized journal. +func newJournal() *journal { + return &journal{ + dirties: make(map[common.Address]int), + } +} + +// append inserts a new modification entry to the end of the change journal. +func (j *journal) append(entry journalEntry) { + j.entries = append(j.entries, entry) + if addr := entry.dirtied(); addr != nil { + j.dirties[*addr]++ + } +} + +// revert undoes a batch of journalled modifications along with any reverted +// dirty handling too. +func (j *journal) revert(statedb *StateDB, snapshot int) { + for i := len(j.entries) - 1; i >= snapshot; i-- { + // Undo the changes made by the operation + j.entries[i].revert(statedb) + + // Drop any dirty tracking induced by the change + if addr := j.entries[i].dirtied(); addr != nil { + if j.dirties[*addr]--; j.dirties[*addr] == 0 { + delete(j.dirties, *addr) + } + } + } + j.entries = j.entries[:snapshot] +} + +// dirty explicitly sets an address to dirty, even if the change entries would +// otherwise suggest it as clean. This method is an ugly hack to handle the RIPEMD +// precompile consensus exception. +func (j *journal) dirty(addr common.Address) { + j.dirties[addr]++ +} + +// length returns the current number of entries in the journal. +func (j *journal) length() int { + return len(j.entries) +} + +type ( + // Changes to the account trie. + createObjectChange struct { + account *common.Address + } + resetObjectChange struct { + prev *stateObject + prevdestruct bool + } + suicideChange struct { + account *common.Address + prev bool // whether account had already suicided + prevbalance *big.Int + } + + // Changes to individual accounts. + balanceChange struct { + account *common.Address + prev *big.Int + } + nonceChange struct { + account *common.Address + prev uint64 + } + storageChange struct { + account *common.Address + key, prevalue common.Hash + } + codeChange struct { + account *common.Address + prevcode, prevhash []byte + } + + // Changes to other state values. + refundChange struct { + prev uint64 + } + addLogChange struct { + txhash common.Hash + } + addPreimageChange struct { + hash common.Hash + } + touchChange struct { + account *common.Address + } + // Changes to the access list + accessListAddAccountChange struct { + address *common.Address + } + accessListAddSlotChange struct { + address *common.Address + slot *common.Hash + } +) + +func (ch createObjectChange) revert(s *StateDB) { + delete(s.stateObjects, *ch.account) + delete(s.stateObjectsDirty, *ch.account) +} + +func (ch createObjectChange) dirtied() *common.Address { + return ch.account +} + +func (ch resetObjectChange) revert(s *StateDB) { + s.setStateObject(ch.prev) + if !ch.prevdestruct && s.snap != nil { + delete(s.snapDestructs, ch.prev.addrHash) + } +} + +func (ch resetObjectChange) dirtied() *common.Address { + return nil +} + +func (ch suicideChange) revert(s *StateDB) { + obj := s.getStateObject(*ch.account) + if obj != nil { + obj.suicided = ch.prev + obj.setBalance(ch.prevbalance) + } +} + +func (ch suicideChange) dirtied() *common.Address { + return ch.account +} + +var ripemd = common.HexToAddress("0000000000000000000000000000000000000003") + +func (ch touchChange) revert(s *StateDB) { +} + +func (ch touchChange) dirtied() *common.Address { + return ch.account +} + +func (ch balanceChange) revert(s *StateDB) { + s.getStateObject(*ch.account).setBalance(ch.prev) +} + +func (ch balanceChange) dirtied() *common.Address { + return ch.account +} + +func (ch nonceChange) revert(s *StateDB) { + s.getStateObject(*ch.account).setNonce(ch.prev) +} + +func (ch nonceChange) dirtied() *common.Address { + return ch.account +} + +func (ch codeChange) revert(s *StateDB) { + s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) +} + +func (ch codeChange) dirtied() *common.Address { + return ch.account +} + +func (ch storageChange) revert(s *StateDB) { + s.getStateObject(*ch.account).setState(ch.key, ch.prevalue) +} + +func (ch storageChange) dirtied() *common.Address { + return ch.account +} + +func (ch refundChange) revert(s *StateDB) { + s.refund = ch.prev +} + +func (ch refundChange) dirtied() *common.Address { + return nil +} + +func (ch addLogChange) revert(s *StateDB) { + logs := s.logs[ch.txhash] + if len(logs) == 1 { + delete(s.logs, ch.txhash) + } else { + s.logs[ch.txhash] = logs[:len(logs)-1] + } + s.logSize-- +} + +func (ch addLogChange) dirtied() *common.Address { + return nil +} + +func (ch addPreimageChange) revert(s *StateDB) { + delete(s.preimages, ch.hash) +} + +func (ch addPreimageChange) dirtied() *common.Address { + return nil +} + +func (ch accessListAddAccountChange) revert(s *StateDB) { + /* + One important invariant here, is that whenever a (addr, slot) is added, if the + addr is not already present, the add causes two journal entries: + - one for the address, + - one for the (address,slot) + Therefore, when unrolling the change, we can always blindly delete the + (addr) at this point, since no storage adds can remain when come upon + a single (addr) change. + */ + s.accessList.DeleteAddress(*ch.address) +} + +func (ch accessListAddAccountChange) dirtied() *common.Address { + return nil +} + +func (ch accessListAddSlotChange) revert(s *StateDB) { + s.accessList.DeleteSlot(*ch.address, *ch.slot) +} + +func (ch accessListAddSlotChange) dirtied() *common.Address { + return nil +} diff --git a/mpt-witness-generator/state/state_object.go b/mpt-witness-generator/state/state_object.go new file mode 100644 index 0000000000..197c8b63f7 --- /dev/null +++ b/mpt-witness-generator/state/state_object.go @@ -0,0 +1,563 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "bytes" + "fmt" + "io" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/rlp" + "github.com/privacy-scaling-explorations/mpt-witness-generator/oracle" +) + +var emptyCodeHash = crypto.Keccak256(nil) + +type Code []byte + +func (c Code) String() string { + return string(c) //strings.Join(Disassemble(c), " ") +} + +type Storage map[common.Hash]common.Hash + +func (s Storage) String() (str string) { + for key, value := range s { + str += fmt.Sprintf("%X : %X\n", key, value) + } + + return +} + +func (s Storage) Copy() Storage { + cpy := make(Storage) + for key, value := range s { + cpy[key] = value + } + + return cpy +} + +// stateObject represents an Ethereum account which is being modified. +// +// The usage pattern is as follows: +// First you need to obtain a state object. +// Account values can be accessed and modified through the object. +// Finally, call CommitTrie to write the modified storage trie into a database. +type stateObject struct { + address common.Address + addrHash common.Hash // hash of ethereum address of the account + data Account + db *StateDB + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error + + // Write caches. + Trie Trie // storage trie, which becomes non-nil on first access + code Code // contract bytecode, which gets set when code is loaded + + originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction + pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block + dirtyStorage Storage // Storage entries that have been modified in the current transaction execution + fakeStorage Storage // Fake storage which constructed by caller for debugging purpose. + + // Cache flags. + // When an object is marked suicided it will be delete from the trie + // during the "update" phase of the state transition. + dirtyCode bool // true if the code was updated + suicided bool + deleted bool +} + +// empty returns whether the account is considered empty. +func (s *stateObject) empty() bool { + return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) +} + +// Account is the Ethereum consensus representation of accounts. +// These objects are stored in the main account trie. +type Account struct { + Nonce uint64 + Balance *big.Int + Root common.Hash // merkle root of the storage trie + CodeHash []byte +} + +// newObject creates a state object. +func newObject(db *StateDB, address common.Address, data Account) *stateObject { + if data.Balance == nil { + data.Balance = new(big.Int) + } + if data.CodeHash == nil { + data.CodeHash = emptyCodeHash + } + if data.Root == (common.Hash{}) { + data.Root = emptyRoot + } + return &stateObject{ + db: db, + address: address, + addrHash: crypto.Keccak256Hash(address[:]), + data: data, + originStorage: make(Storage), + pendingStorage: make(Storage), + dirtyStorage: make(Storage), + } +} + +// EncodeRLP implements rlp.Encoder. +func (s *stateObject) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, s.data) +} + +// setError remembers the first non-nil error it is called with. +func (s *stateObject) setError(err error) { + if s.dbErr == nil { + s.dbErr = err + } +} + +func (s *stateObject) markSuicided() { + s.suicided = true +} + +func (s *stateObject) touch() { + s.db.journal.append(touchChange{ + account: &s.address, + }) + if s.address == ripemd { + // Explicitly put it in the dirty-cache, which is otherwise generated from + // flattened journals. + s.db.journal.dirty(s.address) + } +} + +func (s *stateObject) getTrie(db Database) Trie { + if s.Trie == nil { + // Try fetching from prefetcher first + // We don't prefetch empty tries + if s.data.Root != emptyRoot && s.db.prefetcher != nil { + // When the miner is creating the pending state, there is no + // prefetcher + s.Trie = s.db.prefetcher.trie(s.data.Root) + } + if s.Trie == nil { + var err error + s.Trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root) + if err != nil { + s.Trie, _ = db.OpenStorageTrie(s.addrHash, common.Hash{}) + s.setError(fmt.Errorf("can't create storage trie: %v", err)) + } + } + } + return s.Trie +} + +// GetState retrieves a value from the account storage trie. +func (s *stateObject) GetState(db Database, key common.Hash) common.Hash { + // If the fake storage is set, only lookup the state here(in the debugging mode) + if s.fakeStorage != nil { + return s.fakeStorage[key] + } + // If we have a dirty value for this state entry, return it + value, dirty := s.dirtyStorage[key] + if dirty { + return value + } + // Otherwise return the entry's original value + return s.GetCommittedState(db, key) +} + +// GetCommittedState retrieves a value from the committed account storage trie. +func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash { + // If the fake storage is set, only lookup the state here(in the debugging mode) + if s.fakeStorage != nil { + return s.fakeStorage[key] + } + // If we have a pending write or clean cached, return that + if value, pending := s.pendingStorage[key]; pending { + return value + } + if value, cached := s.originStorage[key]; cached { + return value + } + // If no live objects are available, attempt to use snapshots + var ( + enc []byte + err error + meter *time.Duration + ) + readStart := time.Now() + if metrics.EnabledExpensive { + // If the snap is 'under construction', the first lookup may fail. If that + // happens, we don't want to double-count the time elapsed. Thus this + // dance with the metering. + defer func() { + if meter != nil { + *meter += time.Since(readStart) + } + }() + } + if s.db.snap != nil { + if metrics.EnabledExpensive { + meter = &s.db.SnapshotStorageReads + } + // If the object was destructed in *this* block (and potentially resurrected), + // the storage has been cleared out, and we should *not* consult the previous + // snapshot about any storage values. The only possible alternatives are: + // 1) resurrect happened, and new slot values were set -- those should + // have been handles via pendingStorage above. + // 2) we don't have new values, and can deliver empty response back + if _, destructed := s.db.snapDestructs[s.addrHash]; destructed { + return common.Hash{} + } + enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes())) + } + // If snapshot unavailable or reading from it failed, load from the database + if s.db.snap == nil || err != nil { + if meter != nil { + // If we already spent time checking the snapshot, account for it + // and reset the readStart + *meter += time.Since(readStart) + readStart = time.Now() + } + if metrics.EnabledExpensive { + meter = &s.db.StorageReads + } + oracle.PrefetchStorage(db.BlockNumber, s.address, key, nil) + if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil { + s.setError(err) + return common.Hash{} + } + } + var value common.Hash + if len(enc) > 0 { + _, content, _, err := rlp.Split(enc) + if err != nil { + s.setError(err) + } + value.SetBytes(content) + } + s.originStorage[key] = value + return value +} + +// SetState updates a value in account storage. +func (s *stateObject) SetState(db Database, key, value common.Hash) { + // If the fake storage is set, put the temporary state update here. + if s.fakeStorage != nil { + s.fakeStorage[key] = value + return + } + // If the new value is the same as old, don't set + prev := s.GetState(db, key) + if prev == value { + return + } + // New value is different, update and journal the change + s.db.journal.append(storageChange{ + account: &s.address, + key: key, + prevalue: prev, + }) + s.setState(key, value) +} + +// SetStorage replaces the entire state storage with the given one. +// +// After this function is called, all original state will be ignored and state +// lookup only happens in the fake state storage. +// +// Note this function should only be used for debugging purpose. +func (s *stateObject) SetStorage(storage map[common.Hash]common.Hash) { + // Allocate fake storage if it's nil. + if s.fakeStorage == nil { + s.fakeStorage = make(Storage) + } + for key, value := range storage { + s.fakeStorage[key] = value + } + // Don't bother journal since this function should only be used for + // debugging and the `fake` storage won't be committed to database. +} + +func (s *stateObject) setState(key, value common.Hash) { + s.dirtyStorage[key] = value +} + +// finalise moves all dirty storage slots into the pending area to be hashed or +// committed later. It is invoked at the end of every transaction. +func (s *stateObject) finalise(prefetch bool) { + slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) + for key, value := range s.dirtyStorage { + s.pendingStorage[key] = value + if value != s.originStorage[key] { + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + } + } + if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot { + s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch) + } + if len(s.dirtyStorage) > 0 { + s.dirtyStorage = make(Storage) + } +} + +// updateTrie writes cached storage modifications into the object's storage trie. +// It will return nil if the trie has not been loaded and no changes have been made +func (s *stateObject) updateTrie(db Database) Trie { + // Make sure all dirty slots are finalized into the pending storage area + s.finalise(false) // Don't prefetch any more, pull directly if need be + if len(s.pendingStorage) == 0 { + return s.Trie + } + // Track the amount of time wasted on updating the storage trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) + } + // The snapshot storage map for the object + var storage map[common.Hash][]byte + // Insert all the pending updates into the trie + tr := s.getTrie(db) + hasher := s.db.hasher + + usedStorage := make([][]byte, 0, len(s.pendingStorage)) + for key, value := range s.pendingStorage { + // Skip noop changes, persist actual changes + if value == s.originStorage[key] { + continue + } + s.originStorage[key] = value + + var v []byte + if (value == common.Hash{}) { + //fmt.Println("delete", s.address, key) + // Get absense proof of key in case the deletion needs the sister node. + + // Note: commented for now because of `ExtNodeDeleted` + // oracle.PrefetchStorage(big.NewInt(db.BlockNumber.Int64()+1), s.address, key, trie.GenPossibleShortNodePreimage) + s.setError(tr.TryDelete(key[:])) + } else { + //fmt.Println("update", s.address, key, value) + // Encoding []byte cannot fail, ok to ignore the error. + v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) + s.setError(tr.TryUpdate(key[:], v)) + } + // If state snapshotting is active, cache the data til commit + if s.db.snap != nil { + if storage == nil { + // Retrieve the old storage map, if available, create a new one otherwise + if storage = s.db.snapStorage[s.addrHash]; storage == nil { + storage = make(map[common.Hash][]byte) + s.db.snapStorage[s.addrHash] = storage + } + } + storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00 + } + usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure + } + if s.db.prefetcher != nil { + s.db.prefetcher.used(s.data.Root, usedStorage) + } + if len(s.pendingStorage) > 0 { + s.pendingStorage = make(Storage) + } + return tr +} + +// UpdateRoot sets the trie root to the current root hash of +func (s *stateObject) updateRoot(db Database) { + // If nothing changed, don't bother with hashing anything + if s.updateTrie(db) == nil { + return + } + // Track the amount of time wasted on hashing the storage trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now()) + } + s.data.Root = s.Trie.Hash() +} + +// CommitTrie the storage trie of the object to db. +// This updates the trie root. +func (s *stateObject) CommitTrie(db Database) error { + // If nothing changed, don't bother with hashing anything + if s.updateTrie(db) == nil { + return nil + } + if s.dbErr != nil { + return s.dbErr + } + // Track the amount of time wasted on committing the storage trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) + } + root, err := s.Trie.Commit(nil) + if err == nil { + s.data.Root = root + } + return err +} + +// AddBalance adds amount to s's balance. +// It is used to add funds to the destination account of a transfer. +func (s *stateObject) AddBalance(amount *big.Int) { + // EIP161: We must check emptiness for the objects such that the account + // clearing (0,0,0 objects) can take effect. + if amount.Sign() == 0 { + if s.empty() { + s.touch() + } + return + } + s.SetBalance(new(big.Int).Add(s.Balance(), amount)) +} + +// SubBalance removes amount from s's balance. +// It is used to remove funds from the origin account of a transfer. +func (s *stateObject) SubBalance(amount *big.Int) { + if amount.Sign() == 0 { + return + } + s.SetBalance(new(big.Int).Sub(s.Balance(), amount)) +} + +func (s *stateObject) SetBalance(amount *big.Int) { + s.db.journal.append(balanceChange{ + account: &s.address, + prev: new(big.Int).Set(s.data.Balance), + }) + s.setBalance(amount) +} + +func (s *stateObject) setBalance(amount *big.Int) { + s.data.Balance = amount +} + +func (s *stateObject) deepCopy(db *StateDB) *stateObject { + stateObject := newObject(db, s.address, s.data) + if s.Trie != nil { + stateObject.Trie = db.Db.CopyTrie(s.Trie) + } + stateObject.code = s.code + stateObject.dirtyStorage = s.dirtyStorage.Copy() + stateObject.originStorage = s.originStorage.Copy() + stateObject.pendingStorage = s.pendingStorage.Copy() + stateObject.suicided = s.suicided + stateObject.dirtyCode = s.dirtyCode + stateObject.deleted = s.deleted + return stateObject +} + +// +// Attribute accessors +// + +// Returns the address of the contract/account +func (s *stateObject) Address() common.Address { + return s.address +} + +// Code returns the contract code associated with this object, if any. +func (s *stateObject) Code(db Database) []byte { + if s.code != nil { + return s.code + } + if bytes.Equal(s.CodeHash(), emptyCodeHash) { + return nil + } + code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash())) + if err != nil { + s.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) + } + s.code = code + return code +} + +// CodeSize returns the size of the contract code associated with this object, +// or zero if none. This method is an almost mirror of Code, but uses a cache +// inside the database to avoid loading codes seen recently. +func (s *stateObject) CodeSize(db Database) int { + if s.code != nil { + return len(s.code) + } + if bytes.Equal(s.CodeHash(), emptyCodeHash) { + return 0 + } + size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash())) + if err != nil { + s.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) + } + return size +} + +func (s *stateObject) SetCode(codeHash common.Hash, code []byte) { + prevcode := s.Code(s.db.Db) + s.db.journal.append(codeChange{ + account: &s.address, + prevhash: s.CodeHash(), + prevcode: prevcode, + }) + s.setCode(codeHash, code) +} + +func (s *stateObject) setCode(codeHash common.Hash, code []byte) { + s.code = code + s.data.CodeHash = codeHash[:] + s.dirtyCode = true +} + +func (s *stateObject) SetNonce(nonce uint64) { + s.db.journal.append(nonceChange{ + account: &s.address, + prev: s.data.Nonce, + }) + s.setNonce(nonce) +} + +func (s *stateObject) setNonce(nonce uint64) { + s.data.Nonce = nonce +} + +func (s *stateObject) CodeHash() []byte { + return s.data.CodeHash +} + +func (s *stateObject) Balance() *big.Int { + return s.data.Balance +} + +func (s *stateObject) Nonce() uint64 { + return s.data.Nonce +} + +// Never called, but must be present to allow stateObject to be used +// as a vm.Account interface that also satisfies the vm.ContractRef +// interface. Interfaces are awesome. +func (s *stateObject) Value() *big.Int { + panic("Value on stateObject should never be called") +} diff --git a/mpt-witness-generator/state/statedb.go b/mpt-witness-generator/state/statedb.go new file mode 100644 index 0000000000..0a5376f108 --- /dev/null +++ b/mpt-witness-generator/state/statedb.go @@ -0,0 +1,1110 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package state provides a caching layer atop the Ethereum state trie. +package state + +import ( + "encoding/hex" + "errors" + "fmt" + "math/big" + "sort" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/rlp" + "github.com/privacy-scaling-explorations/mpt-witness-generator/oracle" + "github.com/privacy-scaling-explorations/mpt-witness-generator/trie" +) + +// for includes we don't have +// + +type revision struct { + id int + journalIndex int +} + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") +) + +type proofList [][]byte + +func (n *proofList) Put(key []byte, value []byte) error { + *n = append(*n, value) + return nil +} + +func (n *proofList) Delete(key []byte) error { + panic("not supported") +} + +// StateDB structs within the ethereum protocol are used to store anything +// within the merkle trie. StateDBs take care of caching and storing +// nested states. It's the general query interface to retrieve: +// * Contracts +// * Accounts +type StateDB struct { + Db Database + prefetcher *triePrefetcher + originalRoot common.Hash // The pre-state root, before any changes were made + trie Trie + hasher crypto.KeccakState + + snaps *snapshot.Tree + snap snapshot.Snapshot + snapDestructs map[common.Hash]struct{} + snapAccounts map[common.Hash][]byte + snapStorage map[common.Hash]map[common.Hash][]byte + + // This map holds 'live' objects, which will get modified while processing a state transition. + stateObjects map[common.Address]*stateObject + stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie + stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error + + // The refund counter, also used by state transitioning. + refund uint64 + + thash common.Hash + txIndex int + logs map[common.Hash][]*types.Log + logSize uint + + preimages map[common.Hash][]byte + + // Per-transaction access list + accessList *accessList + + // Journal of state modifications. This is the backbone of + // Snapshot and RevertToSnapshot. + journal *journal + validRevisions []revision + nextRevisionId int + + loadRemoteAccountsIntoStateObjects bool // for MPT generator + + // Measurements gathered during execution for debugging purposes + AccountReads time.Duration + AccountHashes time.Duration + AccountUpdates time.Duration + AccountCommits time.Duration + StorageReads time.Duration + StorageHashes time.Duration + StorageUpdates time.Duration + StorageCommits time.Duration + SnapshotAccountReads time.Duration + SnapshotStorageReads time.Duration + SnapshotCommits time.Duration +} + +func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { + tr, err := db.OpenTrie(root) + if err != nil { + return nil, err + } + sdb := &StateDB{ + Db: db, + trie: tr, + originalRoot: root, + snaps: snaps, + stateObjects: make(map[common.Address]*stateObject), + stateObjectsPending: make(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), + logs: make(map[common.Hash][]*types.Log), + preimages: make(map[common.Hash][]byte), + journal: newJournal(), + accessList: newAccessList(), + hasher: crypto.NewKeccakState(), + loadRemoteAccountsIntoStateObjects: true, + } + /*if sdb.snaps != nil { + if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil { + sdb.snapDestructs = make(map[common.Hash]struct{}) + sdb.snapAccounts = make(map[common.Hash][]byte) + sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte) + } + }*/ + return sdb, nil +} + +// setError remembers the first non-nil error it is called with. +func (s *StateDB) setError(err error) { + if s.dbErr == nil { + s.dbErr = err + } +} + +func (s *StateDB) Error() error { + return s.dbErr +} + +func (s *StateDB) AddLog(log *types.Log) { + log.TxHash = s.thash + log.TxIndex = uint(s.txIndex) + log.Index = s.logSize + s.logs[s.thash] = append(s.logs[s.thash], log) + s.logSize++ +} + +func (s *StateDB) GetLogs(hash common.Hash, blockHash common.Hash) []*types.Log { + logs := s.logs[hash] + for _, l := range logs { + l.BlockHash = blockHash + } + return logs +} + +func (s *StateDB) Logs() []*types.Log { + var logs []*types.Log + for _, lgs := range s.logs { + logs = append(logs, lgs...) + } + return logs +} + +func (s *StateDB) GetTrie() Trie { + return s.trie +} + +func (s *StateDB) GetTrieRootElement() ([]byte, error) { + return rlp.EncodeToBytes(s.trie.GetRoot()) +} + +// AddPreimage records a SHA3 preimage seen by the VM. +func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) { + if _, ok := s.preimages[hash]; !ok { + s.journal.append(addPreimageChange{hash: hash}) + pi := make([]byte, len(preimage)) + copy(pi, preimage) + s.preimages[hash] = pi + } +} + +// Preimages returns a list of SHA3 preimages that have been submitted. +func (s *StateDB) Preimages() map[common.Hash][]byte { + return s.preimages +} + +// AddRefund adds gas to the refund counter +func (s *StateDB) AddRefund(gas uint64) { + s.refund += gas +} + +// SubRefund removes gas from the refund counter. +// This method will panic if the refund counter goes below zero +func (s *StateDB) SubRefund(gas uint64) { + if gas > s.refund { + panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund)) + } + s.refund -= gas +} + +// Exist reports whether the given account address exists in the state. +// Notably this also returns true for suicided accounts. +func (s *StateDB) Exist(addr common.Address) bool { + return s.getStateObject(addr) != nil +} + +// Empty returns whether the state object is either non-existent +// or empty according to the EIP161 specification (balance = nonce = code = 0) +func (s *StateDB) Empty(addr common.Address) bool { + so := s.getStateObject(addr) + return so == nil || so.empty() +} + +// GetBalance retrieves the balance from the given address or 0 if object not found +func (s *StateDB) GetBalance(addr common.Address) *big.Int { + stateObject := s.getStateObject(addr) + if stateObject != nil { + return stateObject.Balance() + } + return common.Big0 +} + +func (s *StateDB) GetNonce(addr common.Address) uint64 { + stateObject := s.getStateObject(addr) + if stateObject != nil { + return stateObject.Nonce() + } + + return 0 +} + +// TxIndex returns the current transaction index set by Prepare. +func (s *StateDB) TxIndex() int { + return s.txIndex +} + +func (s *StateDB) GetCode(addr common.Address) []byte { + stateObject := s.getStateObject(addr) + if stateObject != nil { + return stateObject.Code(s.Db) + } + return nil +} + +func (s *StateDB) GetCodeSize(addr common.Address) int { + stateObject := s.getStateObject(addr) + if stateObject != nil { + return stateObject.CodeSize(s.Db) + } + return 0 +} + +func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return common.Hash{} + } + return common.BytesToHash(stateObject.CodeHash()) +} + +// GetState retrieves a value from the given account's storage trie. +func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + stateObject := s.getStateObject(addr) + if stateObject != nil { + return stateObject.GetState(s.Db, hash) + } + return common.Hash{} +} + +// GetProof returns the Merkle proof for a given account. +func (s *StateDB) GetProof(addr common.Address) ([][]byte, []byte, [][]byte, bool, error) { + return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) +} + +// GetProofByHash returns the Merkle proof for a given account. +func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, []byte, [][]byte, bool, error) { + var proof proofList + neighbourNode, extNibbles, isLastLeaf, err := s.trie.Prove(addrHash[:], 0, &proof) + return proof, neighbourNode, extNibbles, isLastLeaf, err +} + +// GetStorageProof returns the Merkle proof for given storage slot. +func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, []byte, [][]byte, bool, error) { + var proof proofList + trie := s.StorageTrie(a) + if trie == nil { + return proof, nil, nil, false, errors.New("storage trie for requested address does not exist") + } + var newKey []byte + if !oracle.PreventHashingInSecureTrie { + newKey = crypto.Keccak256(key.Bytes()) + } else { + newKey = key.Bytes() + } + neighbourNode, extNibbles, isLastLeaf, err := trie.Prove(newKey, 0, &proof) + return proof, neighbourNode, extNibbles, isLastLeaf, err +} + +func (s *StateDB) GetNodeByNibbles(a common.Address, key []byte) ([]byte, error) { + trie := s.StorageTrie(a) + return trie.GetNodeByNibbles(key) +} + +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + stateObject := s.getStateObject(addr) + if stateObject != nil { + return stateObject.GetCommittedState(s.Db, hash) + } + return common.Hash{} +} + +// Database retrieves the low level database supporting the lower level trie ops. +func (s *StateDB) Database() Database { + return s.Db +} + +// StorageTrie returns the storage trie of an account. +// The return value is a copy and is nil for non-existent accounts. +func (s *StateDB) StorageTrie(addr common.Address) Trie { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return nil + } + cpy := stateObject.deepCopy(s) + cpy.updateTrie(s.Db) + return cpy.getTrie(s.Db) +} + +func (s *StateDB) HasSuicided(addr common.Address) bool { + stateObject := s.getStateObject(addr) + if stateObject != nil { + return stateObject.suicided + } + return false +} + +/* + * SETTERS + */ + +// AddBalance adds amount to the account associated with addr. +func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { + s.SetStateObjectIfExists(addr) + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.AddBalance(amount) + } +} + +// SubBalance subtracts amount from the account associated with addr. +func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { + s.SetStateObjectIfExists(addr) + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SubBalance(amount) + } +} + +func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { + s.SetStateObjectIfExists(addr) + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetBalance(amount) + } +} + +func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { + s.SetStateObjectIfExists(addr) + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetNonce(nonce) + } +} + +func (s *StateDB) SetCode(addr common.Address, code []byte) { + s.SetStateObjectIfExists(addr) + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetCode(crypto.Keccak256Hash(code), code) + } +} + +func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { + s.SetStateObjectIfExists(addr) + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetState(s.Db, key, value) + } +} + +// SetStorage replaces the entire storage for the specified account with given +// storage. This function should only be used for debugging. +func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { + s.SetStateObjectIfExists(addr) + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetStorage(storage) + } +} + +// Retrieve an account from chain instead of +// creating a new account in GetOrNewStateObject (called from example from SetBalance). +// The reason the new account is created without this call is that the local statedb.stateObjects +// is populated only with the objects that are created locally. +func (s *StateDB) SetStateObjectIfExists(addr common.Address) { + if s.loadRemoteAccountsIntoStateObjects { + ap := oracle.PrefetchAccount(s.Db.BlockNumber, addr, nil) + if len(ap) > 0 { + ret, _ := hex.DecodeString(ap[len(ap)-1][2:]) + s.setStateObjectFromEncoding(addr, ret) + } + } +} + +// Suicide marks the given account as suicided. +// This clears the account balance. +// +// The account's state object is still available until the state is committed, +// getStateObject will return a non-nil account after Suicide. +func (s *StateDB) Suicide(addr common.Address) bool { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return false + } + s.journal.append(suicideChange{ + account: &addr, + prev: stateObject.suicided, + prevbalance: new(big.Int).Set(stateObject.Balance()), + }) + stateObject.markSuicided() + stateObject.data.Balance = new(big.Int) + + return true +} + +// Added for MPT generator: +func (s *StateDB) DeleteAccount(addr common.Address) bool { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return false + } + s.deleteStateObject(stateObject) + + return true +} + +// +// Setting, updating & deleting state object methods. +// + +// updateStateObject writes the given object to the trie. +func (s *StateDB) updateStateObject(obj *stateObject) { + // Track the amount of time wasted on updating the account from the trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) + } + // Encode the account and update the account trie + addr := obj.Address() + + data, err := rlp.EncodeToBytes(obj) + if err != nil { + panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err)) + } + if err = s.trie.TryUpdateAlwaysHash(addr[:], data); err != nil { + s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) + } + + // If state snapshotting is active, cache the data til commit. Note, this + // update mechanism is not symmetric to the deletion, because whereas it is + // enough to track account updates at commit time, deletions need tracking + // at transaction boundary level to ensure we capture state clearing. + /*if s.snap != nil { + s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash) + }*/ +} + +// deleteStateObject removes the given object from the state trie. +func (s *StateDB) deleteStateObject(obj *stateObject) { + // Track the amount of time wasted on deleting the account from the trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) + } + // Delete the account from the trie + addr := obj.Address() + // Get absense proof of account in case the deletion needs the sister node. + oracle.PrefetchAccount(big.NewInt(s.Db.BlockNumber.Int64()+1), addr, trie.GenPossibleShortNodePreimage) + if err := s.trie.TryDelete(addr[:]); err != nil { + s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) + } +} + +// getStateObject retrieves a state object given by the address, returning nil if +// the object is not found or was deleted in this execution context. If you need +// to differentiate between non-existent/just-deleted, use getDeletedStateObject. +func (s *StateDB) getStateObject(addr common.Address) *stateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj + } + return nil +} + +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { + // Prefer live objects if any is available + if obj := s.stateObjects[addr]; obj != nil { + return obj + } + // If no live objects are available, attempt to use snapshots + var ( + data *Account + err error + ) + /*if s.snap != nil { + if metrics.EnabledExpensive { + defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now()) + } + var acc *snapshot.Account + if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil { + if acc == nil { + return nil + } + data = &Account{ + Nonce: acc.Nonce, + Balance: acc.Balance, + CodeHash: acc.CodeHash, + Root: common.BytesToHash(acc.Root), + } + if len(data.CodeHash) == 0 { + data.CodeHash = emptyCodeHash + } + if data.Root == (common.Hash{}) { + data.Root = emptyRoot + } + } + }*/ + // If snapshot unavailable or reading from it failed, load from the database + if s.snap == nil || err != nil { + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now()) + } + oracle.PrefetchAccount(s.Db.BlockNumber, addr, nil) + enc, err := s.trie.TryGet(addr.Bytes()) + if err != nil { + s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err)) + return nil + } + if len(enc) == 0 { + return nil + } + data = new(Account) + if err := rlp.DecodeBytes(enc, data); err != nil { + log.Error("Failed to decode state object", "addr", addr, "err", err) + return nil + } + } + // Insert into the live set + obj := newObject(s, addr, *data) + s.setStateObject(obj) + return obj +} + +// Added for MPT generator. This loads account into stateObjects - if an account is not +// in stateObjects, a new account is created in GetOrNewStateObject. +func (s *StateDB) setStateObjectFromEncoding(addr common.Address, enc []byte) error { + if len(enc) == 0 { + return errors.New("encoding of account is of length 0") + } + data := new(Account) + keyLen := enc[2] - 128 + accData := enc[3+keyLen+2:] + + if err := rlp.DecodeBytes(accData, data); err != nil { + // If it's not account RLP, nothing is set (in stateObjects) - this is to prevent + // the need of checking whether enc is account RLP or something else (like branch RLP). + fmt.Println("failed to decode account") + return nil + } + + obj := newObject(s, addr, *data) + s.setStateObject(obj) + + return nil +} + +func (s *StateDB) setStateObject(object *stateObject) { + s.stateObjects[object.Address()] = object +} + +// GetOrNewStateObject retrieves a state object or create a new state object if nil. +func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject { + stateObject := s.getStateObject(addr) + if stateObject == nil { + stateObject, _ = s.createObject(addr) + } + return stateObject +} + +// createObject creates a new state object. If there is an existing account with +// the given address, it is overwritten and returned as the second return value. +func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { + prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! + + var prevdestruct bool + if s.snap != nil && prev != nil { + _, prevdestruct = s.snapDestructs[prev.addrHash] + if !prevdestruct { + s.snapDestructs[prev.addrHash] = struct{}{} + } + } + newobj = newObject(s, addr, Account{}) + if prev == nil { + s.journal.append(createObjectChange{account: &addr}) + } else { + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) + } + s.setStateObject(newobj) + if prev != nil && !prev.deleted { + return newobj, prev + } + return newobj, nil +} + +// CreateAccount explicitly creates a state object. If a state object with the address +// already exists the balance is carried over to the new account. +// +// CreateAccount is called during the EVM CREATE operation. The situation might arise that +// a contract does the following: +// +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// +// Carrying over the balance ensures that Ether doesn't disappear. +func (s *StateDB) CreateAccount(addr common.Address) { + newObj, prev := s.createObject(addr) + if prev != nil { + newObj.setBalance(prev.data.Balance) + } +} + +func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { + /*so := db.getStateObject(addr) + if so == nil { + return nil + } + it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil)) + + for it.Next() { + key := common.BytesToHash(db.trie.GetKey(it.Key)) + if value, dirty := so.dirtyStorage[key]; dirty { + if !cb(key, value) { + return nil + } + continue + } + + if len(it.Value) > 0 { + _, content, _, err := rlp.Split(it.Value) + if err != nil { + return err + } + if !cb(key, common.BytesToHash(content)) { + return nil + } + } + }*/ + fmt.Println("ForEachStorage is BROKEN!!") + return nil +} + +// Copy creates a deep, independent copy of the state. +// Snapshots of the copied state cannot be applied to the copy. +func (s *StateDB) Copy() *StateDB { + // Copy all the basic fields, initialize the memory ones + state := &StateDB{ + Db: s.Db, + trie: s.Db.CopyTrie(s.trie), + stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), + stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), + stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + refund: s.refund, + logs: make(map[common.Hash][]*types.Log, len(s.logs)), + logSize: s.logSize, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), + hasher: crypto.NewKeccakState(), + } + // Copy the dirty states, logs, and preimages + for addr := range s.journal.dirties { + // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), + // and in the Finalise-method, there is a case where an object is in the journal but not + // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for + // nil + if object, exist := s.stateObjects[addr]; exist { + // Even though the original object is dirty, we are not copying the journal, + // so we need to make sure that anyside effect the journal would have caused + // during a commit (or similar op) is already applied to the copy. + state.stateObjects[addr] = object.deepCopy(state) + + state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits + state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits + } + } + // Above, we don't copy the actual journal. This means that if the copy is copied, the + // loop above will be a no-op, since the copy's journal is empty. + // Thus, here we iterate over stateObjects, to enable copies of copies + for addr := range s.stateObjectsPending { + if _, exist := state.stateObjects[addr]; !exist { + state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) + } + state.stateObjectsPending[addr] = struct{}{} + } + for addr := range s.stateObjectsDirty { + if _, exist := state.stateObjects[addr]; !exist { + state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) + } + state.stateObjectsDirty[addr] = struct{}{} + } + for hash, logs := range s.logs { + cpy := make([]*types.Log, len(logs)) + for i, l := range logs { + cpy[i] = new(types.Log) + *cpy[i] = *l + } + state.logs[hash] = cpy + } + for hash, preimage := range s.preimages { + state.preimages[hash] = preimage + } + // Do we need to copy the access list? In practice: No. At the start of a + // transaction, the access list is empty. In practice, we only ever copy state + // _between_ transactions/blocks, never in the middle of a transaction. + // However, it doesn't cost us much to copy an empty list, so we do it anyway + // to not blow up if we ever decide copy it in the middle of a transaction + state.accessList = s.accessList.Copy() + + // If there's a prefetcher running, make an inactive copy of it that can + // only access data but does not actively preload (since the user will not + // know that they need to explicitly terminate an active copy). + if s.prefetcher != nil { + state.prefetcher = s.prefetcher.copy() + } + if s.snaps != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that aswell. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snaps = s.snaps + state.snap = s.snap + // deep copy needed + state.snapDestructs = make(map[common.Hash]struct{}) + for k, v := range s.snapDestructs { + state.snapDestructs[k] = v + } + state.snapAccounts = make(map[common.Hash][]byte) + for k, v := range s.snapAccounts { + state.snapAccounts[k] = v + } + state.snapStorage = make(map[common.Hash]map[common.Hash][]byte) + for k, v := range s.snapStorage { + temp := make(map[common.Hash][]byte) + for kk, vv := range v { + temp[kk] = vv + } + state.snapStorage[k] = temp + } + } + return state +} + +// Snapshot returns an identifier for the current revision of the state. +func (s *StateDB) Snapshot() int { + id := s.nextRevisionId + s.nextRevisionId++ + s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()}) + return id +} + +// RevertToSnapshot reverts all state changes made since the given revision. +func (s *StateDB) RevertToSnapshot(revid int) { + // Find the snapshot in the stack of valid snapshots. + idx := sort.Search(len(s.validRevisions), func(i int) bool { + return s.validRevisions[i].id >= revid + }) + if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { + panic(fmt.Errorf("revision id %v cannot be reverted", revid)) + } + snapshot := s.validRevisions[idx].journalIndex + + // Replay the journal to undo changes and remove invalidated snapshots + s.journal.revert(s, snapshot) + s.validRevisions = s.validRevisions[:idx] +} + +// GetRefund returns the current value of the refund counter. +func (s *StateDB) GetRefund() uint64 { + return s.refund +} + +// Finalise finalises the state by removing the s destructed objects and clears +// the journal as well as the refunds. Finalise, however, will not push any updates +// into the tries just yet. Only IntermediateRoot or Commit will do that. +func (s *StateDB) Finalise(deleteEmptyObjects bool) { + addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) + for addr := range s.journal.dirties { + obj, exist := s.stateObjects[addr] + if !exist { + // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 + // That tx goes out of gas, and although the notion of 'touched' does not exist there, the + // touch-event will still be recorded in the journal. Since ripeMD is a special snowflake, + // it will persist in the journal even though the journal is reverted. In this special circumstance, + // it may exist in `s.journal.dirties` but not in `s.stateObjects`. + // Thus, we can safely ignore it here + continue + } + if obj.suicided || (deleteEmptyObjects && obj.empty()) { + obj.deleted = true + + // If state snapshotting is active, also mark the destruction there. + // Note, we can't do this only at the end of a block because multiple + // transactions within the same block might self destruct and then + // ressurrect an account; but the snapshotter needs both events. + if s.snap != nil { + s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely) + delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect) + delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a ressurrect) + } + } else { + obj.finalise(true) // Prefetch slots in the background + } + s.stateObjectsPending[addr] = struct{}{} + s.stateObjectsDirty[addr] = struct{}{} + + // At this point, also ship the address off to the precacher. The precacher + // will start loading tries, and when the change is eventually committed, + // the commit-phase will be a lot faster + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch) + } + // Invalidate journal because reverting across transactions is not allowed. + s.clearJournalAndRefund() +} + +// IntermediateRoot computes the current root hash of the state trie. +// It is called in between transactions to get the root hash that +// goes into transaction receipts. +func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { + // Finalise all the dirty storage states and write them into the tries + s.Finalise(deleteEmptyObjects) + + // If there was a trie prefetcher operating, it gets aborted and irrevocably + // modified after we start retrieving tries. Remove it from the statedb after + // this round of use. + // + // This is weird pre-byzantium since the first tx runs with a prefetcher and + // the remainder without, but pre-byzantium even the initial prefetcher is + // useless, so no sleep lost. + prefetcher := s.prefetcher + if s.prefetcher != nil { + defer func() { + s.prefetcher.close() + s.prefetcher = nil + }() + } + // Although naively it makes sense to retrieve the account trie and then do + // the contract storage and account updates sequentially, that short circuits + // the account prefetcher. Instead, let's process all the storage updates + // first, giving the account prefeches just a few more milliseconds of time + // to pull useful data from disk. + for addr := range s.stateObjectsPending { + if obj := s.stateObjects[addr]; !obj.deleted { + obj.updateRoot(s.Db) + } + } + // Now we're about to start to write changes to the trie. The trie is so far + // _untouched_. We can check with the prefetcher, if it can give us a trie + // which has the same root, but also has some content loaded into it. + if prefetcher != nil { + if trie := prefetcher.trie(s.originalRoot); trie != nil { + s.trie = trie + } + } + usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) + for addr := range s.stateObjectsPending { + if obj := s.stateObjects[addr]; obj.deleted { + s.deleteStateObject(obj) + } else { + s.updateStateObject(obj) + } + usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure + } + if prefetcher != nil { + prefetcher.used(s.originalRoot, usedAddrs) + } + if len(s.stateObjectsPending) > 0 { + s.stateObjectsPending = make(map[common.Address]struct{}) + } + // Track the amount of time wasted on hashing the account trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) + } + return s.trie.Hash() +} + +// Prepare sets the current transaction hash and index which are +// used when the EVM emits new state logs. +func (s *StateDB) Prepare(thash common.Hash, ti int) { + s.thash = thash + s.txIndex = ti + s.accessList = newAccessList() +} + +func (s *StateDB) clearJournalAndRefund() { + if len(s.journal.entries) > 0 { + s.journal = newJournal() + s.refund = 0 + } + s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires +} + +// Commit writes the state to the underlying in-memory trie database. +func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { + if s.dbErr != nil { + return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) + } + // Finalize any pending changes and merge everything into the tries + s.IntermediateRoot(deleteEmptyObjects) + + for addr := range s.stateObjectsDirty { + if obj := s.stateObjects[addr]; !obj.deleted { + fmt.Println("dirty state object", addr) + // Write any contract code associated with the state object + if obj.code != nil && obj.dirtyCode { + fmt.Println("write code", common.BytesToHash(obj.CodeHash())) + } + // Write any storage changes in the state object to its storage trie + if err := obj.CommitTrie(s.Db); err != nil { + return common.Hash{}, err + } + } + } + + // Commit objects to the trie, measuring the elapsed time + /*codeWriter := s.db.TrieDB().DiskDB().NewBatch() + for addr := range s.stateObjectsDirty { + if obj := s.stateObjects[addr]; !obj.deleted { + // Write any contract code associated with the state object + if obj.code != nil && obj.dirtyCode { + rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) + obj.dirtyCode = false + } + // Write any storage changes in the state object to its storage trie + if err := obj.CommitTrie(s.db); err != nil { + return common.Hash{}, err + } + } + } + if len(s.stateObjectsDirty) > 0 { + s.stateObjectsDirty = make(map[common.Address]struct{}) + } + if codeWriter.ValueSize() > 0 { + if err := codeWriter.Write(); err != nil { + log.Crit("Failed to commit dirty codes", "error", err) + } + }*/ + + // Write the account trie changes, measuing the amount of wasted time + var start time.Time + if metrics.EnabledExpensive { + start = time.Now() + } + // The onleaf func is called _serially_, so we can reuse the same account + // for unmarshalling every time. + var account Account + root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error { + if err := rlp.DecodeBytes(leaf, &account); err != nil { + return nil + } + /*if account.Root != emptyRoot { + s.db.TrieDB().Reference(account.Root, parent) + }*/ + return nil + }) + if metrics.EnabledExpensive { + s.AccountCommits += time.Since(start) + } + // If snapshotting is enabled, update the snapshot tree with this new version + /*if s.snap != nil { + if metrics.EnabledExpensive { + defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now()) + } + // Only update if there's a state transition (skip empty Clique blocks) + if parent := s.snap.Root(); parent != root { + if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil { + log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err) + } + // Keep 128 diff layers in the memory, persistent layer is 129th. + // - head layer is paired with HEAD state + // - head-1 layer is paired with HEAD-1 state + // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state + if err := s.snaps.Cap(root, 128); err != nil { + log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err) + } + } + s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil + }*/ + //fmt.Println("Commit doesn't work!!!") + return root, err +} + +// PrepareAccessList handles the preparatory steps for executing a state transition with +// regards to both EIP-2929 and EIP-2930: +// +// - Add sender to access list (2929) +// - Add destination to access list (2929) +// - Add precompiles to access list (2929) +// - Add the contents of the optional tx access list (2930) +// +// This method should only be called if Berlin/2929+2930 is applicable at the current number. +func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { + s.AddAddressToAccessList(sender) + if dst != nil { + s.AddAddressToAccessList(*dst) + // If it's a create-tx, the destination will be added inside evm.create + } + for _, addr := range precompiles { + s.AddAddressToAccessList(addr) + } + for _, el := range list { + s.AddAddressToAccessList(el.Address) + for _, key := range el.StorageKeys { + s.AddSlotToAccessList(el.Address, key) + } + } +} + +// AddAddressToAccessList adds the given address to the access list +func (s *StateDB) AddAddressToAccessList(addr common.Address) { + if s.accessList.AddAddress(addr) { + s.journal.append(accessListAddAccountChange{&addr}) + } +} + +// AddSlotToAccessList adds the given (address, slot)-tuple to the access list +func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) { + addrMod, slotMod := s.accessList.AddSlot(addr, slot) + if addrMod { + // In practice, this should not happen, since there is no way to enter the + // scope of 'address' without having the 'address' become already added + // to the access list (via call-variant, create, etc). + // Better safe than sorry, though + s.journal.append(accessListAddAccountChange{&addr}) + } + if slotMod { + s.journal.append(accessListAddSlotChange{ + address: &addr, + slot: &slot, + }) + } +} + +// AddressInAccessList returns true if the given address is in the access list. +func (s *StateDB) AddressInAccessList(addr common.Address) bool { + return s.accessList.ContainsAddress(addr) +} + +// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list. +func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { + return s.accessList.Contains(addr, slot) +} + +// MPT generator: used only because the tests have been written by creating a new account +// for each address - changing this would mean the tests would change. +func (s *StateDB) DisableLoadingRemoteAccounts() { + s.loadRemoteAccountsIntoStateObjects = false +} diff --git a/mpt-witness-generator/trie/committer.go b/mpt-witness-generator/trie/committer.go new file mode 100644 index 0000000000..61cd82017e --- /dev/null +++ b/mpt-witness-generator/trie/committer.go @@ -0,0 +1,271 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "golang.org/x/crypto/sha3" +) + +// leafChanSize is the size of the leafCh. It's a pretty arbitrary number, to allow +// some parallelism but not incur too much memory overhead. +const leafChanSize = 200 + +// leaf represents a trie leaf value +type leaf struct { + size int // size of the rlp data (estimate) + hash common.Hash // hash of rlp data + node Node // the node to commit +} + +// committer is a type used for the trie Commit operation. A committer has some +// internal preallocated temp space, and also a callback that is invoked when +// leaves are committed. The leafs are passed through the `leafCh`, to allow +// some level of parallelism. +// By 'some level' of parallelism, it's still the case that all leaves will be +// processed sequentially - onleaf will never be called in parallel or out of order. +type committer struct { + tmp sliceBuffer + sha crypto.KeccakState + + onleaf LeafCallback + leafCh chan *leaf +} + +// committers live in a global sync.Pool +var committerPool = sync.Pool{ + New: func() interface{} { + return &committer{ + tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode. + sha: sha3.NewLegacyKeccak256().(crypto.KeccakState), + } + }, +} + +// newCommitter creates a new committer or picks one from the pool. +func newCommitter() *committer { + return committerPool.Get().(*committer) +} + +func returnCommitterToPool(h *committer) { + h.onleaf = nil + h.leafCh = nil + committerPool.Put(h) +} + +// commit collapses a node down into a hash node and inserts it into the database +func (c *committer) Commit(n Node, db *Database) (HashNode, error) { + if db == nil { + return nil, errors.New("no db provided") + } + h, err := c.commit(n, db) + if err != nil { + return nil, err + } + return h.(HashNode), nil +} + +// commit collapses a node down into a hash node and inserts it into the database +func (c *committer) commit(n Node, db *Database) (Node, error) { + // if this path is clean, use available cached data + hash, dirty := n.cache() + if hash != nil && !dirty { + return hash, nil + } + // Commit children, then parent, and remove remove the dirty flag. + switch cn := n.(type) { + case *ShortNode: + // Commit child + collapsed := cn.copy() + + // If the child is fullnode, recursively commit. + // Otherwise it can only be hashNode or valueNode. + if _, ok := cn.Val.(*FullNode); ok { + childV, err := c.commit(cn.Val, db) + if err != nil { + return nil, err + } + collapsed.Val = childV + } + // The key needs to be copied, since we're delivering it to database + collapsed.Key = HexToCompact(cn.Key) + hashedNode := c.store(collapsed, db) + if hn, ok := hashedNode.(HashNode); ok { + return hn, nil + } + return collapsed, nil + case *FullNode: + hashedKids, err := c.commitChildren(cn, db) + if err != nil { + return nil, err + } + collapsed := cn.copy() + collapsed.Children = hashedKids + + hashedNode := c.store(collapsed, db) + if hn, ok := hashedNode.(HashNode); ok { + return hn, nil + } + return collapsed, nil + case HashNode: + return cn, nil + default: + // nil, valuenode shouldn't be committed + panic(fmt.Sprintf("%T: invalid node: %v", n, n)) + } +} + +// commitChildren commits the children of the given fullnode +func (c *committer) commitChildren(n *FullNode, db *Database) ([17]Node, error) { + var children [17]Node + for i := 0; i < 16; i++ { + child := n.Children[i] + if child == nil { + continue + } + // If it's the hashed child, save the hash value directly. + // Note: it's impossible that the child in range [0, 15] + // is a valuenode. + if hn, ok := child.(HashNode); ok { + children[i] = hn + continue + } + // Commit the child recursively and store the "hashed" value. + // Note the returned node can be some embedded nodes, so it's + // possible the type is not hashnode. + hashed, err := c.commit(child, db) + if err != nil { + return children, err + } + children[i] = hashed + } + // For the 17th child, it's possible the type is valuenode. + if n.Children[16] != nil { + children[16] = n.Children[16] + } + return children, nil +} + +// store hashes the node n and if we have a storage layer specified, it writes +// the key/value pair to it and tracks any node->child references as well as any +// node->external trie references. +func (c *committer) store(n Node, db *Database) Node { + // Larger nodes are replaced by their hash and stored in the database. + var ( + hash, _ = n.cache() + size int + ) + if hash == nil { + // This was not generated - must be a small node stored in the parent. + // In theory we should apply the leafCall here if it's not nil(embedded + // node usually contains value). But small value(less than 32bytes) is + // not our target. + return n + } else { + // We have the hash already, estimate the RLP encoding-size of the node. + // The size is used for mem tracking, does not need to be exact + size = estimateSize(n) + } + // If we're using channel-based leaf-reporting, send to channel. + // The leaf channel will be active only when there an active leaf-callback + if c.leafCh != nil { + c.leafCh <- &leaf{ + size: size, + hash: common.BytesToHash(hash), + node: n, + } + } else if db != nil { + // No leaf-callback used, but there's still a database. Do serial + // insertion + db.lock.Lock() + db.insert(common.BytesToHash(hash), size, n) + db.lock.Unlock() + } + return hash +} + +// commitLoop does the actual insert + leaf callback for nodes. +func (c *committer) commitLoop(db *Database) { + for item := range c.leafCh { + var ( + hash = item.hash + size = item.size + n = item.node + ) + // We are pooling the trie nodes into an intermediate memory cache + db.lock.Lock() + db.insert(hash, size, n) + db.lock.Unlock() + + if c.onleaf != nil { + switch n := n.(type) { + case *ShortNode: + if child, ok := n.Val.(ValueNode); ok { + c.onleaf(nil, nil, child, hash) + } + case *FullNode: + // For children in range [0, 15], it's impossible + // to contain valuenode. Only check the 17th child. + if n.Children[16] != nil { + c.onleaf(nil, nil, n.Children[16].(ValueNode), hash) + } + } + } + } +} + +func (c *committer) makeHashNode(data []byte) HashNode { + n := make(HashNode, c.sha.Size()) + c.sha.Reset() + c.sha.Write(data) + c.sha.Read(n) + return n +} + +// estimateSize estimates the size of an rlp-encoded node, without actually +// rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie +// with 1000 leafs, the only errors above 1% are on small shortnodes, where this +// method overestimates by 2 or 3 bytes (e.g. 37 instead of 35) +func estimateSize(n Node) int { + switch n := n.(type) { + case *ShortNode: + // A short node contains a compacted key, and a value. + return 3 + len(n.Key) + estimateSize(n.Val) + case *FullNode: + // A full node contains up to 16 hashes (some nils), and a key + s := 3 + for i := 0; i < 16; i++ { + if child := n.Children[i]; child != nil { + s += estimateSize(child) + } else { + s++ + } + } + return s + case ValueNode: + return 1 + len(n) + case HashNode: + return 1 + len(n) + default: + panic(fmt.Sprintf("node type %T", n)) + } +} diff --git a/mpt-witness-generator/trie/database.go b/mpt-witness-generator/trie/database.go new file mode 100644 index 0000000000..65dd5de1e9 --- /dev/null +++ b/mpt-witness-generator/trie/database.go @@ -0,0 +1,101 @@ +package trie + +import ( + "bytes" + "io" + "math/big" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/privacy-scaling-explorations/mpt-witness-generator/oracle" +) + +// rawNode is a simple binary blob used to differentiate between collapsed trie +// nodes and already encoded RLP binary blobs (while at the same time store them +// in the same cache fields). +type rawNode []byte + +func (n rawNode) cache() (HashNode, bool) { panic("this should never end up in a live trie") } +func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } + +func (n rawNode) EncodeRLP(w io.Writer) error { + _, err := w.Write(n) + return err +} + +type Database struct { + BlockNumber *big.Int + Root common.Hash + lock sync.RWMutex +} + +func NewDatabase(header types.Header) Database { + triedb := Database{BlockNumber: header.Number, Root: header.Root} + //triedb.preimages = make(map[common.Hash][]byte) + //fmt.Println("init database") + oracle.PrefetchAccount(header.Number, common.Address{}, nil) + + //panic("preseed") + return triedb +} + +// Node retrieves an encoded cached trie node from memory. If it cannot be found +// cached, the method queries the persistent database for the content. +func (db *Database) Node(hash common.Hash) ([]byte, error) { + panic("no Node function") +} + +// node retrieves a cached trie node from memory, or returns nil if none can be +// found in the memory cache. +func (db *Database) node(hash common.Hash) Node { + //fmt.Println("node", hash) + if val := oracle.Preimage(hash); val != nil { + return mustDecodeNode(hash[:], val) + } + return nil +} + +// insert inserts a collapsed trie node into the memory database. +// The blob size must be specified to allow proper size tracking. +// All nodes inserted by this function will be reference tracked +// and in theory should only used for **trie nodes** insertion. +func (db *Database) insert(hash common.Hash, size int, node Node) { + // can put things in the oracle here if we care + //fmt.Println("insert", hash, size) +} + +func GenPossibleShortNodePreimage(preimages map[common.Hash][]byte) { + newPreimages := make(map[common.Hash][]byte) + + for _, val := range preimages { + node, err := DecodeNode(nil, val) + if err != nil { + continue + } + + if node, ok := node.(*ShortNode); ok { + for i := len(node.Key) - 1; i > 0; i-- { + n := ShortNode{ + Key: HexToCompact(node.Key[i:]), + Val: node.Val, + } + buf := new(bytes.Buffer) + if err := rlp.Encode(buf, n); err != nil { + panic("encode error: " + err.Error()) + } + preimage := buf.Bytes() + if len(preimage) < 32 { + continue + } + newPreimages[crypto.Keccak256Hash(preimage)] = preimage + } + } + } + + for hash, val := range newPreimages { + preimages[hash] = val + } +} diff --git a/mpt-witness-generator/trie/encoding.go b/mpt-witness-generator/trie/encoding.go new file mode 100644 index 0000000000..1ed739984d --- /dev/null +++ b/mpt-witness-generator/trie/encoding.go @@ -0,0 +1,146 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +// Trie keys are dealt with in three distinct encodings: +// +// KEYBYTES encoding contains the actual key and nothing else. This encoding is the +// input to most API functions. +// +// HEX encoding contains one byte for each nibble of the key and an optional trailing +// 'terminator' byte of value 0x10 which indicates whether or not the node at the key +// contains a value. Hex key encoding is used for nodes loaded in memory because it's +// convenient to access. +// +// COMPACT encoding is defined by the Ethereum Yellow Paper (it's called "hex prefix +// encoding" there) and contains the bytes of the key and a flag. The high nibble of the +// first byte contains the flag; the lowest bit encoding the oddness of the length and +// the second-lowest encoding whether the node at the key is a value node. The low nibble +// of the first byte is zero in the case of an even number of nibbles and the first nibble +// in the case of an odd number. All remaining nibbles (now an even number) fit properly +// into the remaining bytes. Compact encoding is used for nodes stored on disk. + +func HexToCompact(hex []byte) []byte { + terminator := byte(0) + if hasTerm(hex) { + terminator = 1 + hex = hex[:len(hex)-1] + } + + buf := make([]byte, len(hex)/2+1) + buf[0] = terminator << 5 // the flag byte + if len(hex)&1 == 1 { + buf[0] |= 1 << 4 // odd flag + buf[0] |= hex[0] // first nibble is contained in the first byte + hex = hex[1:] + } + decodeNibbles(hex, buf[1:]) + return buf +} + +// hexToCompactInPlace places the compact key in input buffer, returning the length +// needed for the representation +func hexToCompactInPlace(hex []byte) int { + var ( + hexLen = len(hex) // length of the hex input + firstByte = byte(0) + ) + // Check if we have a terminator there + if hexLen > 0 && hex[hexLen-1] == 16 { + firstByte = 1 << 5 + hexLen-- // last part was the terminator, ignore that + } + var ( + binLen = hexLen/2 + 1 + ni = 0 // index in hex + bi = 1 // index in bin (compact) + ) + if hexLen&1 == 1 { + firstByte |= 1 << 4 // odd flag + firstByte |= hex[0] // first nibble is contained in the first byte + ni++ + } + for ; ni < hexLen; bi, ni = bi+1, ni+2 { + hex[bi] = hex[ni]<<4 | hex[ni+1] + } + hex[0] = firstByte + return binLen +} + +func compactToHex(compact []byte) []byte { + if len(compact) == 0 { + return compact + } + base := KeybytesToHex(compact) + // delete terminator flag + if base[0] < 2 { + base = base[:len(base)-1] + } + // apply odd flag + chop := 2 - base[0]&1 + return base[chop:] +} + +func KeybytesToHex(str []byte) []byte { + l := len(str)*2 + 1 + var nibbles = make([]byte, l) + for i, b := range str { + nibbles[i*2] = b / 16 + nibbles[i*2+1] = b % 16 + } + nibbles[l-1] = 16 + return nibbles +} + +// hexToKeybytes turns hex nibbles into key bytes. +// This can only be used for keys of even length. +func HexToKeybytes(hex []byte) []byte { + if hasTerm(hex) { + hex = hex[:len(hex)-1] + } + if len(hex)&1 != 0 { + panic("can't convert hex key of odd length") + } + key := make([]byte, len(hex)/2) + decodeNibbles(hex, key) + return key +} + +func decodeNibbles(nibbles []byte, bytes []byte) { + for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 { + bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1] + } +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + var i, length = 0, len(a) + if len(b) < length { + length = len(b) + } + for ; i < length; i++ { + if a[i] != b[i] { + break + } + } + return i +} + +// hasTerm returns whether a hex key has the terminator flag. +func hasTerm(s []byte) bool { + return len(s) > 0 && s[len(s)-1] == 16 +} diff --git a/mpt-witness-generator/trie/errors.go b/mpt-witness-generator/trie/errors.go new file mode 100644 index 0000000000..567b80078c --- /dev/null +++ b/mpt-witness-generator/trie/errors.go @@ -0,0 +1,35 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +// MissingNodeError is returned by the trie functions (TryGet, TryUpdate, TryDelete) +// in the case where a trie node is not present in the local database. It contains +// information necessary for retrieving the missing node. +type MissingNodeError struct { + NodeHash common.Hash // hash of the missing node + Path []byte // hex-encoded path to the missing node +} + +func (err *MissingNodeError) Error() string { + return fmt.Sprintf("missing trie node %x (path %x)", err.NodeHash, err.Path) +} diff --git a/mpt-witness-generator/trie/hasher.go b/mpt-witness-generator/trie/hasher.go new file mode 100644 index 0000000000..c96677415b --- /dev/null +++ b/mpt-witness-generator/trie/hasher.go @@ -0,0 +1,207 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "sync" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" +) + +type sliceBuffer []byte + +func (b *sliceBuffer) Write(data []byte) (n int, err error) { + *b = append(*b, data...) + return len(data), nil +} + +func (b *sliceBuffer) Reset() { + *b = (*b)[:0] +} + +// hasher is a type used for the trie Hash operation. A hasher has some +// internal preallocated temp space +type hasher struct { + sha crypto.KeccakState + tmp sliceBuffer + parallel bool // Whether to use paralallel threads when hashing +} + +// hasherPool holds pureHashers +var hasherPool = sync.Pool{ + New: func() interface{} { + return &hasher{ + tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode. + sha: sha3.NewLegacyKeccak256().(crypto.KeccakState), + } + }, +} + +func NewHasher(parallel bool) *hasher { + h := hasherPool.Get().(*hasher) + h.parallel = parallel + return h +} + +func returnHasherToPool(h *hasher) { + hasherPool.Put(h) +} + +// Hash collapses a node down into a Hash node, also returning a copy of the +// original node initialized with the computed Hash to replace the original one. +func (h *hasher) Hash(n Node, force bool) (hashed Node, cached Node) { + // Return the cached hash if it's available + if hash, _ := n.cache(); hash != nil { + return hash, n + } + // Trie not processed yet, walk the children + switch n := n.(type) { + case *ShortNode: + collapsed, cached := h.hashShortNodeChildren(n) + hashed := h.shortnodeToHash(collapsed, force) + // We need to retain the possibly _not_ hashed node, in case it was too + // small to be hashed + if hn, ok := hashed.(HashNode); ok { + cached.flags.hash = hn + } else { + cached.flags.hash = nil + } + return hashed, cached + case *FullNode: + collapsed, cached := h.hashFullNodeChildren(n) + hashed = h.FullnodeToHash(collapsed, force) + if hn, ok := hashed.(HashNode); ok { + cached.flags.hash = hn + } else { + cached.flags.hash = nil + } + return hashed, cached + default: + // Value and hash nodes don't have children so they're left as were + return n, n + } +} + +// hashShortNodeChildren collapses the short node. The returned collapsed node +// holds a live reference to the Key, and must not be modified. +// The cached +func (h *hasher) hashShortNodeChildren(n *ShortNode) (collapsed, cached *ShortNode) { + // Hash the short node's child, caching the newly hashed subtree + collapsed, cached = n.copy(), n.copy() + // Previously, we did copy this one. We don't seem to need to actually + // do that, since we don't overwrite/reuse keys + //cached.Key = common.CopyBytes(n.Key) + collapsed.Key = HexToCompact(n.Key) + // Unless the child is a valuenode or hashnode, hash it + switch n.Val.(type) { + case *FullNode, *ShortNode: + collapsed.Val, cached.Val = h.Hash(n.Val, false) + } + return collapsed, cached +} + +func (h *hasher) hashFullNodeChildren(n *FullNode) (collapsed *FullNode, cached *FullNode) { + // Hash the full node's children, caching the newly hashed subtrees + cached = n.copy() + collapsed = n.copy() + if h.parallel { + var wg sync.WaitGroup + wg.Add(16) + for i := 0; i < 16; i++ { + go func(i int) { + hasher := NewHasher(false) + if child := n.Children[i]; child != nil { + collapsed.Children[i], cached.Children[i] = hasher.Hash(child, false) + } else { + collapsed.Children[i] = nilValueNode + } + returnHasherToPool(hasher) + wg.Done() + }(i) + } + wg.Wait() + } else { + for i := 0; i < 16; i++ { + if child := n.Children[i]; child != nil { + collapsed.Children[i], cached.Children[i] = h.Hash(child, false) + } else { + collapsed.Children[i] = nilValueNode + } + } + } + return collapsed, cached +} + +// shortnodeToHash creates a hashNode from a shortNode. The supplied shortnode +// should have hex-type Key, which will be converted (without modification) +// into compact form for RLP encoding. +// If the rlp data is smaller than 32 bytes, `nil` is returned. +func (h *hasher) shortnodeToHash(n *ShortNode, force bool) Node { + h.tmp.Reset() + if err := rlp.Encode(&h.tmp, n); err != nil { + panic("encode error: " + err.Error()) + } + + if len(h.tmp) < 32 && !force { + return n // Nodes smaller than 32 bytes are stored inside their parent + } + return h.HashData(h.tmp) +} + +// shortnodeToHash is used to creates a hashNode from a set of hashNodes, (which +// may contain nil values) +func (h *hasher) FullnodeToHash(n *FullNode, force bool) Node { + h.tmp.Reset() + // Generate the RLP encoding of the node + if err := n.EncodeRLP(&h.tmp); err != nil { + panic("encode error: " + err.Error()) + } + + if len(h.tmp) < 32 && !force { + return n // Nodes smaller than 32 bytes are stored inside their parent + } + return h.HashData(h.tmp) +} + +// HashData hashes the provided data +func (h *hasher) HashData(data []byte) HashNode { + n := make(HashNode, 32) + h.sha.Reset() + h.sha.Write(data) + h.sha.Read(n) + return n +} + +// ProofHash is used to construct trie proofs, and returns the 'collapsed' +// node (for later RLP encoding) aswell as the hashed node -- unless the +// node is smaller than 32 bytes, in which case it will be returned as is. +// This method does not do anything on value- or hash-nodes. +func (h *hasher) ProofHash(original Node) (collapsed, hashed Node) { + switch n := original.(type) { + case *ShortNode: + sn, _ := h.hashShortNodeChildren(n) + return sn, h.shortnodeToHash(sn, false) + case *FullNode: + fn, _ := h.hashFullNodeChildren(n) + return fn, h.FullnodeToHash(fn, false) + default: + // Value and hash nodes don't have children so they're left as were + return n, n + } +} diff --git a/mpt-witness-generator/trie/iterator.go b/mpt-witness-generator/trie/iterator.go new file mode 100644 index 0000000000..6237ce0027 --- /dev/null +++ b/mpt-witness-generator/trie/iterator.go @@ -0,0 +1,714 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "container/heap" + "errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" +) + +// Iterator is a key-value trie iterator that traverses a Trie. +type Iterator struct { + nodeIt NodeIterator + + Key []byte // Current data key on which the iterator is positioned on + Value []byte // Current data value on which the iterator is positioned on + Err error +} + +// NewIterator creates a new key-value iterator from a node iterator. +// Note that the value returned by the iterator is raw. If the content is encoded +// (e.g. storage value is RLP-encoded), it's caller's duty to decode it. +func NewIterator(it NodeIterator) *Iterator { + return &Iterator{ + nodeIt: it, + } +} + +// Next moves the iterator forward one key-value entry. +func (it *Iterator) Next() bool { + for it.nodeIt.Next(true) { + if it.nodeIt.Leaf() { + it.Key = it.nodeIt.LeafKey() + it.Value = it.nodeIt.LeafBlob() + return true + } + } + it.Key = nil + it.Value = nil + it.Err = it.nodeIt.Error() + return false +} + +// Prove generates the Merkle proof for the leaf node the iterator is currently +// positioned on. +func (it *Iterator) Prove() [][]byte { + return it.nodeIt.LeafProof() +} + +// NodeIterator is an iterator to traverse the trie pre-order. +type NodeIterator interface { + // Next moves the iterator to the next node. If the parameter is false, any child + // nodes will be skipped. + Next(bool) bool + + // Error returns the error status of the iterator. + Error() error + + // Hash returns the hash of the current node. + Hash() common.Hash + + // Parent returns the hash of the parent of the current node. The hash may be the one + // grandparent if the immediate parent is an internal node with no hash. + Parent() common.Hash + + // Path returns the hex-encoded path to the current node. + // Callers must not retain references to the return value after calling Next. + // For leaf nodes, the last element of the path is the 'terminator symbol' 0x10. + Path() []byte + + // Leaf returns true iff the current node is a leaf node. + Leaf() bool + + // LeafKey returns the key of the leaf. The method panics if the iterator is not + // positioned at a leaf. Callers must not retain references to the value after + // calling Next. + LeafKey() []byte + + // LeafBlob returns the content of the leaf. The method panics if the iterator + // is not positioned at a leaf. Callers must not retain references to the value + // after calling Next. + LeafBlob() []byte + + // LeafProof returns the Merkle proof of the leaf. The method panics if the + // iterator is not positioned at a leaf. Callers must not retain references + // to the value after calling Next. + LeafProof() [][]byte + + // AddResolver sets an intermediate database to use for looking up trie nodes + // before reaching into the real persistent layer. + // + // This is not required for normal operation, rather is an optimization for + // cases where trie nodes can be recovered from some external mechanism without + // reading from disk. In those cases, this resolver allows short circuiting + // accesses and returning them from memory. + // + // Before adding a similar mechanism to any other place in Geth, consider + // making trie.Database an interface and wrapping at that level. It's a huge + // refactor, but it could be worth it if another occurrence arises. + AddResolver(ethdb.KeyValueStore) +} + +// nodeIteratorState represents the iteration state at one particular node of the +// trie, which can be resumed at a later invocation. +type nodeIteratorState struct { + hash common.Hash // Hash of the node being iterated (nil if not standalone) + node Node // Trie node being iterated + parent common.Hash // Hash of the first full ancestor node (nil if current is the root) + index int // Child to be processed next + pathlen int // Length of the path to this node +} + +type nodeIterator struct { + trie *Trie // Trie being iterated + stack []*nodeIteratorState // Hierarchy of trie nodes persisting the iteration state + path []byte // Path to the current node + err error // Failure set in case of an internal error in the iterator + + resolver ethdb.KeyValueStore // Optional intermediate resolver above the disk layer +} + +// errIteratorEnd is stored in nodeIterator.err when iteration is done. +var errIteratorEnd = errors.New("end of iteration") + +// seekError is stored in nodeIterator.err if the initial seek has failed. +type seekError struct { + key []byte + err error +} + +func (e seekError) Error() string { + return "seek error: " + e.err.Error() +} + +func newNodeIterator(trie *Trie, start []byte) NodeIterator { + if trie.Hash() == emptyState { + return new(nodeIterator) + } + it := &nodeIterator{trie: trie} + it.err = it.seek(start) + return it +} + +func (it *nodeIterator) AddResolver(resolver ethdb.KeyValueStore) { + it.resolver = resolver +} + +func (it *nodeIterator) Hash() common.Hash { + if len(it.stack) == 0 { + return common.Hash{} + } + return it.stack[len(it.stack)-1].hash +} + +func (it *nodeIterator) Parent() common.Hash { + if len(it.stack) == 0 { + return common.Hash{} + } + return it.stack[len(it.stack)-1].parent +} + +func (it *nodeIterator) Leaf() bool { + return hasTerm(it.path) +} + +func (it *nodeIterator) LeafKey() []byte { + if len(it.stack) > 0 { + if _, ok := it.stack[len(it.stack)-1].node.(ValueNode); ok { + return HexToKeybytes(it.path) + } + } + panic("not at leaf") +} + +func (it *nodeIterator) LeafBlob() []byte { + if len(it.stack) > 0 { + if node, ok := it.stack[len(it.stack)-1].node.(ValueNode); ok { + return node + } + } + panic("not at leaf") +} + +func (it *nodeIterator) LeafProof() [][]byte { + if len(it.stack) > 0 { + if _, ok := it.stack[len(it.stack)-1].node.(ValueNode); ok { + hasher := NewHasher(false) + defer returnHasherToPool(hasher) + proofs := make([][]byte, 0, len(it.stack)) + + for i, item := range it.stack[:len(it.stack)-1] { + // Gather nodes that end up as hash nodes (or the root) + node, hashed := hasher.ProofHash(item.node) + if _, ok := hashed.(HashNode); ok || i == 0 { + enc, _ := rlp.EncodeToBytes(node) + proofs = append(proofs, enc) + } + } + return proofs + } + } + panic("not at leaf") +} + +func (it *nodeIterator) Path() []byte { + return it.path +} + +func (it *nodeIterator) Error() error { + if it.err == errIteratorEnd { + return nil + } + if seek, ok := it.err.(seekError); ok { + return seek.err + } + return it.err +} + +// Next moves the iterator to the next node, returning whether there are any +// further nodes. In case of an internal error this method returns false and +// sets the Error field to the encountered failure. If `descend` is false, +// skips iterating over any subnodes of the current node. +func (it *nodeIterator) Next(descend bool) bool { + if it.err == errIteratorEnd { + return false + } + if seek, ok := it.err.(seekError); ok { + if it.err = it.seek(seek.key); it.err != nil { + return false + } + } + // Otherwise step forward with the iterator and report any errors. + state, parentIndex, path, err := it.peek(descend) + it.err = err + if it.err != nil { + return false + } + it.push(state, parentIndex, path) + return true +} + +func (it *nodeIterator) seek(prefix []byte) error { + // The path we're looking for is the hex encoded key without terminator. + key := KeybytesToHex(prefix) + key = key[:len(key)-1] + // Move forward until we're just before the closest match to key. + for { + state, parentIndex, path, err := it.peekSeek(key) + if err == errIteratorEnd { + return errIteratorEnd + } else if err != nil { + return seekError{prefix, err} + } else if bytes.Compare(path, key) >= 0 { + return nil + } + it.push(state, parentIndex, path) + } +} + +// init initializes the the iterator. +func (it *nodeIterator) init() (*nodeIteratorState, error) { + root := it.trie.Hash() + state := &nodeIteratorState{node: it.trie.root, index: -1} + if root != emptyRoot { + state.hash = root + } + return state, state.resolve(it, nil) +} + +// peek creates the next state of the iterator. +func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, error) { + // Initialize the iterator if we've just started. + if len(it.stack) == 0 { + state, err := it.init() + return state, nil, nil, err + } + if !descend { + // If we're skipping children, pop the current node first + it.pop() + } + + // Continue iteration to the next child + for len(it.stack) > 0 { + parent := it.stack[len(it.stack)-1] + ancestor := parent.hash + if (ancestor == common.Hash{}) { + ancestor = parent.parent + } + state, path, ok := it.nextChild(parent, ancestor) + if ok { + if err := state.resolve(it, path); err != nil { + return parent, &parent.index, path, err + } + return state, &parent.index, path, nil + } + // No more child nodes, move back up. + it.pop() + } + return nil, nil, nil, errIteratorEnd +} + +// peekSeek is like peek, but it also tries to skip resolving hashes by skipping +// over the siblings that do not lead towards the desired seek position. +func (it *nodeIterator) peekSeek(seekKey []byte) (*nodeIteratorState, *int, []byte, error) { + // Initialize the iterator if we've just started. + if len(it.stack) == 0 { + state, err := it.init() + return state, nil, nil, err + } + if !bytes.HasPrefix(seekKey, it.path) { + // If we're skipping children, pop the current node first + it.pop() + } + + // Continue iteration to the next child + for len(it.stack) > 0 { + parent := it.stack[len(it.stack)-1] + ancestor := parent.hash + if (ancestor == common.Hash{}) { + ancestor = parent.parent + } + state, path, ok := it.nextChildAt(parent, ancestor, seekKey) + if ok { + if err := state.resolve(it, path); err != nil { + return parent, &parent.index, path, err + } + return state, &parent.index, path, nil + } + // No more child nodes, move back up. + it.pop() + } + return nil, nil, nil, errIteratorEnd +} + +func (it *nodeIterator) resolveHash(hash HashNode, path []byte) (Node, error) { + if it.resolver != nil { + if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 { + if resolved, err := DecodeNode(hash, blob); err == nil { + return resolved, nil + } + } + } + resolved, err := it.trie.resolveHash(hash, path) + return resolved, err +} + +func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error { + if hash, ok := st.node.(HashNode); ok { + resolved, err := it.resolveHash(hash, path) + if err != nil { + return err + } + st.node = resolved + st.hash = common.BytesToHash(hash) + } + return nil +} + +func findChild(n *FullNode, index int, path []byte, ancestor common.Hash) (Node, *nodeIteratorState, []byte, int) { + var ( + child Node + state *nodeIteratorState + childPath []byte + ) + for ; index < len(n.Children); index++ { + if n.Children[index] != nil { + child = n.Children[index] + hash, _ := child.cache() + state = &nodeIteratorState{ + hash: common.BytesToHash(hash), + node: child, + parent: ancestor, + index: -1, + pathlen: len(path), + } + childPath = append(childPath, path...) + childPath = append(childPath, byte(index)) + return child, state, childPath, index + } + } + return nil, nil, nil, 0 +} + +func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor common.Hash) (*nodeIteratorState, []byte, bool) { + switch node := parent.node.(type) { + case *FullNode: + //Full node, move to the first non-nil child. + if child, state, path, index := findChild(node, parent.index+1, it.path, ancestor); child != nil { + parent.index = index - 1 + return state, path, true + } + case *ShortNode: + // Short node, return the pointer singleton child + if parent.index < 0 { + hash, _ := node.Val.cache() + state := &nodeIteratorState{ + hash: common.BytesToHash(hash), + node: node.Val, + parent: ancestor, + index: -1, + pathlen: len(it.path), + } + path := append(it.path, node.Key...) + return state, path, true + } + } + return parent, it.path, false +} + +// nextChildAt is similar to nextChild, except that it targets a child as close to the +// target key as possible, thus skipping siblings. +func (it *nodeIterator) nextChildAt(parent *nodeIteratorState, ancestor common.Hash, key []byte) (*nodeIteratorState, []byte, bool) { + switch n := parent.node.(type) { + case *FullNode: + // Full node, move to the first non-nil child before the desired key position + child, state, path, index := findChild(n, parent.index+1, it.path, ancestor) + if child == nil { + // No more children in this fullnode + return parent, it.path, false + } + // If the child we found is already past the seek position, just return it. + if bytes.Compare(path, key) >= 0 { + parent.index = index - 1 + return state, path, true + } + // The child is before the seek position. Try advancing + for { + nextChild, nextState, nextPath, nextIndex := findChild(n, index+1, it.path, ancestor) + // If we run out of children, or skipped past the target, return the + // previous one + if nextChild == nil || bytes.Compare(nextPath, key) >= 0 { + parent.index = index - 1 + return state, path, true + } + // We found a better child closer to the target + state, path, index = nextState, nextPath, nextIndex + } + case *ShortNode: + // Short node, return the pointer singleton child + if parent.index < 0 { + hash, _ := n.Val.cache() + state := &nodeIteratorState{ + hash: common.BytesToHash(hash), + node: n.Val, + parent: ancestor, + index: -1, + pathlen: len(it.path), + } + path := append(it.path, n.Key...) + return state, path, true + } + } + return parent, it.path, false +} + +func (it *nodeIterator) push(state *nodeIteratorState, parentIndex *int, path []byte) { + it.path = path + it.stack = append(it.stack, state) + if parentIndex != nil { + *parentIndex++ + } +} + +func (it *nodeIterator) pop() { + parent := it.stack[len(it.stack)-1] + it.path = it.path[:parent.pathlen] + it.stack = it.stack[:len(it.stack)-1] +} + +func compareNodes(a, b NodeIterator) int { + if cmp := bytes.Compare(a.Path(), b.Path()); cmp != 0 { + return cmp + } + if a.Leaf() && !b.Leaf() { + return -1 + } else if b.Leaf() && !a.Leaf() { + return 1 + } + if cmp := bytes.Compare(a.Hash().Bytes(), b.Hash().Bytes()); cmp != 0 { + return cmp + } + if a.Leaf() && b.Leaf() { + return bytes.Compare(a.LeafBlob(), b.LeafBlob()) + } + return 0 +} + +type differenceIterator struct { + a, b NodeIterator // Nodes returned are those in b - a. + eof bool // Indicates a has run out of elements + count int // Number of nodes scanned on either trie +} + +// NewDifferenceIterator constructs a NodeIterator that iterates over elements in b that +// are not in a. Returns the iterator, and a pointer to an integer recording the number +// of nodes seen. +func NewDifferenceIterator(a, b NodeIterator) (NodeIterator, *int) { + a.Next(true) + it := &differenceIterator{ + a: a, + b: b, + } + return it, &it.count +} + +func (it *differenceIterator) Hash() common.Hash { + return it.b.Hash() +} + +func (it *differenceIterator) Parent() common.Hash { + return it.b.Parent() +} + +func (it *differenceIterator) Leaf() bool { + return it.b.Leaf() +} + +func (it *differenceIterator) LeafKey() []byte { + return it.b.LeafKey() +} + +func (it *differenceIterator) LeafBlob() []byte { + return it.b.LeafBlob() +} + +func (it *differenceIterator) LeafProof() [][]byte { + return it.b.LeafProof() +} + +func (it *differenceIterator) Path() []byte { + return it.b.Path() +} + +func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueStore) { + panic("not implemented") +} + +func (it *differenceIterator) Next(bool) bool { + // Invariants: + // - We always advance at least one element in b. + // - At the start of this function, a's path is lexically greater than b's. + if !it.b.Next(true) { + return false + } + it.count++ + + if it.eof { + // a has reached eof, so we just return all elements from b + return true + } + + for { + switch compareNodes(it.a, it.b) { + case -1: + // b jumped past a; advance a + if !it.a.Next(true) { + it.eof = true + return true + } + it.count++ + case 1: + // b is before a + return true + case 0: + // a and b are identical; skip this whole subtree if the nodes have hashes + hasHash := it.a.Hash() == common.Hash{} + if !it.b.Next(hasHash) { + return false + } + it.count++ + if !it.a.Next(hasHash) { + it.eof = true + return true + } + it.count++ + } + } +} + +func (it *differenceIterator) Error() error { + if err := it.a.Error(); err != nil { + return err + } + return it.b.Error() +} + +type nodeIteratorHeap []NodeIterator + +func (h nodeIteratorHeap) Len() int { return len(h) } +func (h nodeIteratorHeap) Less(i, j int) bool { return compareNodes(h[i], h[j]) < 0 } +func (h nodeIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h *nodeIteratorHeap) Push(x interface{}) { *h = append(*h, x.(NodeIterator)) } +func (h *nodeIteratorHeap) Pop() interface{} { + n := len(*h) + x := (*h)[n-1] + *h = (*h)[0 : n-1] + return x +} + +type unionIterator struct { + items *nodeIteratorHeap // Nodes returned are the union of the ones in these iterators + count int // Number of nodes scanned across all tries +} + +// NewUnionIterator constructs a NodeIterator that iterates over elements in the union +// of the provided NodeIterators. Returns the iterator, and a pointer to an integer +// recording the number of nodes visited. +func NewUnionIterator(iters []NodeIterator) (NodeIterator, *int) { + h := make(nodeIteratorHeap, len(iters)) + copy(h, iters) + heap.Init(&h) + + ui := &unionIterator{items: &h} + return ui, &ui.count +} + +func (it *unionIterator) Hash() common.Hash { + return (*it.items)[0].Hash() +} + +func (it *unionIterator) Parent() common.Hash { + return (*it.items)[0].Parent() +} + +func (it *unionIterator) Leaf() bool { + return (*it.items)[0].Leaf() +} + +func (it *unionIterator) LeafKey() []byte { + return (*it.items)[0].LeafKey() +} + +func (it *unionIterator) LeafBlob() []byte { + return (*it.items)[0].LeafBlob() +} + +func (it *unionIterator) LeafProof() [][]byte { + return (*it.items)[0].LeafProof() +} + +func (it *unionIterator) Path() []byte { + return (*it.items)[0].Path() +} + +func (it *unionIterator) AddResolver(resolver ethdb.KeyValueStore) { + panic("not implemented") +} + +// Next returns the next node in the union of tries being iterated over. +// +// It does this by maintaining a heap of iterators, sorted by the iteration +// order of their next elements, with one entry for each source trie. Each +// time Next() is called, it takes the least element from the heap to return, +// advancing any other iterators that also point to that same element. These +// iterators are called with descend=false, since we know that any nodes under +// these nodes will also be duplicates, found in the currently selected iterator. +// Whenever an iterator is advanced, it is pushed back into the heap if it still +// has elements remaining. +// +// In the case that descend=false - eg, we're asked to ignore all subnodes of the +// current node - we also advance any iterators in the heap that have the current +// path as a prefix. +func (it *unionIterator) Next(descend bool) bool { + if len(*it.items) == 0 { + return false + } + + // Get the next key from the union + least := heap.Pop(it.items).(NodeIterator) + + // Skip over other nodes as long as they're identical, or, if we're not descending, as + // long as they have the same prefix as the current node. + for len(*it.items) > 0 && ((!descend && bytes.HasPrefix((*it.items)[0].Path(), least.Path())) || compareNodes(least, (*it.items)[0]) == 0) { + skipped := heap.Pop(it.items).(NodeIterator) + // Skip the whole subtree if the nodes have hashes; otherwise just skip this node + if skipped.Next(skipped.Hash() == common.Hash{}) { + it.count++ + // If there are more elements, push the iterator back on the heap + heap.Push(it.items, skipped) + } + } + if least.Next(descend) { + it.count++ + heap.Push(it.items, least) + } + return len(*it.items) > 0 +} + +func (it *unionIterator) Error() error { + for i := 0; i < len(*it.items); i++ { + if err := (*it.items)[i].Error(); err != nil { + return err + } + } + return nil +} diff --git a/mpt-witness-generator/trie/node.go b/mpt-witness-generator/trie/node.go new file mode 100644 index 0000000000..0f4ac24277 --- /dev/null +++ b/mpt-witness-generator/trie/node.go @@ -0,0 +1,225 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "fmt" + "io" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" +) + +var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"} + +type Node interface { + fstring(string) string + cache() (HashNode, bool) +} + +type ( + FullNode struct { + Children [17]Node // Actual trie node data to encode/decode (needs custom encoder) + flags nodeFlag + } + ShortNode struct { + Key []byte + Val Node + flags nodeFlag + } + HashNode []byte + ValueNode []byte +) + +// nilValueNode is used when collapsing internal trie nodes for hashing, since +// unset children need to serialize correctly. +var nilValueNode = ValueNode(nil) + +// EncodeRLP encodes a full node into the consensus RLP format. +func (n *FullNode) EncodeRLP(w io.Writer) error { + var nodes [17]Node + + for i, child := range &n.Children { + if child != nil { + nodes[i] = child + } else { + nodes[i] = nilValueNode + } + } + return rlp.Encode(w, nodes) +} + +func (n *FullNode) copy() *FullNode { copy := *n; return © } +func (n *ShortNode) copy() *ShortNode { copy := *n; return © } + +// nodeFlag contains caching-related metadata about a node. +type nodeFlag struct { + hash HashNode // cached hash of the node (may be nil) + dirty bool // whether the node has changes that must be written to the database +} + +func (n *FullNode) cache() (HashNode, bool) { return n.flags.hash, n.flags.dirty } +func (n *ShortNode) cache() (HashNode, bool) { return n.flags.hash, n.flags.dirty } +func (n HashNode) cache() (HashNode, bool) { return nil, true } +func (n ValueNode) cache() (HashNode, bool) { return nil, true } + +// Pretty printing. +func (n *FullNode) String() string { return n.fstring("") } +func (n *ShortNode) String() string { return n.fstring("") } +func (n HashNode) String() string { return n.fstring("") } +func (n ValueNode) String() string { return n.fstring("") } + +func (n *FullNode) fstring(ind string) string { + resp := fmt.Sprintf("[\n%s ", ind) + for i, node := range &n.Children { + if node == nil { + resp += fmt.Sprintf("%s: ", indices[i]) + } else { + resp += fmt.Sprintf("%s: %v", indices[i], node.fstring(ind+" ")) + } + } + return resp + fmt.Sprintf("\n%s] ", ind) +} +func (n *ShortNode) fstring(ind string) string { + return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" ")) +} +func (n HashNode) fstring(ind string) string { + return fmt.Sprintf("<%x> ", []byte(n)) +} +func (n ValueNode) fstring(ind string) string { + return fmt.Sprintf("%x ", []byte(n)) +} + +func mustDecodeNode(hash, buf []byte) Node { + n, err := DecodeNode(hash, buf) + if err != nil { + panic(fmt.Sprintf("node %x: %v", hash, err)) + } + return n +} + +// DecodeNode parses the RLP encoding of a trie node. +func DecodeNode(hash, buf []byte) (Node, error) { + if len(buf) == 0 { + return nil, io.ErrUnexpectedEOF + } + elems, _, err := rlp.SplitList(buf) + if err != nil { + return nil, fmt.Errorf("decode error: %v", err) + } + switch c, _ := rlp.CountValues(elems); c { + case 2: + n, err := decodeShort(hash, elems) + return n, wrapError(err, "short") + case 17: + n, err := decodeFull(hash, elems) + return n, wrapError(err, "full") + default: + return nil, fmt.Errorf("invalid number of list elements: %v", c) + } +} + +func decodeShort(hash, elems []byte) (Node, error) { + kbuf, rest, err := rlp.SplitString(elems) + if err != nil { + return nil, err + } + flag := nodeFlag{hash: hash} + key := compactToHex(kbuf) + if hasTerm(key) { + // value node + val, _, err := rlp.SplitString(rest) + if err != nil { + return nil, fmt.Errorf("invalid value node: %v", err) + } + return &ShortNode{key, append(ValueNode{}, val...), flag}, nil + } + r, _, err := decodeRef(rest) + if err != nil { + return nil, wrapError(err, "val") + } + return &ShortNode{key, r, flag}, nil +} + +func decodeFull(hash, elems []byte) (*FullNode, error) { + n := &FullNode{flags: nodeFlag{hash: hash}} + for i := 0; i < 16; i++ { + cld, rest, err := decodeRef(elems) + if err != nil { + return n, wrapError(err, fmt.Sprintf("[%d]", i)) + } + n.Children[i], elems = cld, rest + } + val, _, err := rlp.SplitString(elems) + if err != nil { + return n, err + } + if len(val) > 0 { + n.Children[16] = append(ValueNode{}, val...) + } + return n, nil +} + +const hashLen = len(common.Hash{}) + +func decodeRef(buf []byte) (Node, []byte, error) { + kind, val, rest, err := rlp.Split(buf) + if err != nil { + return nil, buf, err + } + switch { + case kind == rlp.List: + // 'embedded' node reference. The encoding must be smaller + // than a hash in order to be valid. + if size := len(buf) - len(rest); size > hashLen { + err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen) + return nil, buf, err + } + n, err := DecodeNode(nil, buf) + return n, rest, err + case kind == rlp.String && len(val) == 0: + // empty node + return nil, rest, nil + case kind == rlp.String && len(val) == 32: + return append(HashNode{}, val...), rest, nil + default: + return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0 or 32)", len(val)) + } +} + +// wraps a decoding error with information about the path to the +// invalid child node (for debugging encoding issues). +type decodeError struct { + what error + stack []string +} + +func wrapError(err error, ctx string) error { + if err == nil { + return nil + } + if decErr, ok := err.(*decodeError); ok { + decErr.stack = append(decErr.stack, ctx) + return decErr + } + return &decodeError{err, []string{ctx}} +} + +func (err *decodeError) Error() string { + return fmt.Sprintf("%v (decode path: %s)", err.what, strings.Join(err.stack, "<-")) +} diff --git a/mpt-witness-generator/trie/proof.go b/mpt-witness-generator/trie/proof.go new file mode 100644 index 0000000000..39ce02f401 --- /dev/null +++ b/mpt-witness-generator/trie/proof.go @@ -0,0 +1,692 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +// Prove constructs a merkle proof for key. The result contains all encoded nodes +// on the path to the value at key. The value itself is also included in the last +// node and can be retrieved by verifying the proof. +// +// If the trie does not contain a value for key, the returned proof contains all +// nodes of the longest existing prefix of the key (at least the root node), ending +// with the node that proves the absence of the key. +func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) ([]byte, [][]byte, bool, error) { + // Collect all nodes on the path to key. + key = KeybytesToHex(key) + var nodes []Node + tn := t.root + var neighbourNode Node + for len(key) > 0 && tn != nil { + switch n := tn.(type) { + case *ShortNode: + if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { + // The trie doesn't contain the key. + tn = nil + } else { + tn = n.Val + key = key[len(n.Key):] + } + nodes = append(nodes, n) + case *FullNode: + tn = n.Children[key[0]] + + count := 0 + neighbourIndex := -1 + // If there is only one neighbour node, store it, as it is needed when the node + // is deleted and the branch turns into leaf (that used to be neighbour node). + for i, c := range n.Children { + if byte(i) == key[0] { + continue + } + if c != nil { + count++ + neighbourIndex = i + } + } + neighbourNode = n.Children[neighbourIndex] + + key = key[1:] + nodes = append(nodes, n) + case HashNode: + var err error + tn, err = t.resolveHash(n, nil) + if err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + return nil, nil, false, err + } + default: + panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) + } + } + hasher := NewHasher(false) + defer returnHasherToPool(hasher) + + var extNibbles [][]byte + + // From getProof response it is not possible to see (in some cases it is though) whether + // the short node is a leaf or an extension node. + isLastLeaf := false + if len(nodes) > 0 { + n := nodes[len(nodes)-1] + if short, ok := n.(*ShortNode); ok { + isLastLeaf = hasTerm(short.Key) + } + } + + for _, n := range nodes { + if fromLevel > 0 { + fromLevel-- + continue + } + // var hn Node + + // We need nibbles in witness for extension keys. + // copy n.Key before it gets changed in ProofHash + var nCopy []byte + if short, ok := n.(*ShortNode); ok { + if !hasTerm(short.Key) { // only for extension keys + nCopy = make([]byte, len(short.Key)) + copy(nCopy, short.Key) + extNibbles = append(extNibbles, nCopy) + } + } + + // n, hn = hasher.ProofHash(n) + n, _ = hasher.ProofHash(n) + // if hash, ok := hn.(HashNode); ok || i == 0 { + // If the node's database encoding is a hash (or is the + // root node), it becomes a proof element. + enc, _ := rlp.EncodeToBytes(n) + /* + if !ok { + hash = hasher.HashData(enc) + } + */ + // proofDb.Put(hash, enc) + proofDb.Put([]byte{1, 1, 1}, enc) + // } + } + + neighbourNodeRLP := []byte{} + if neighbourNode != nil { + neighbourHash, _ := hasher.ProofHash(neighbourNode) + neighbourNodeRLP, _ = rlp.EncodeToBytes(neighbourHash) + } + + return neighbourNodeRLP, extNibbles, isLastLeaf, nil +} + +func (t *Trie) GetNodeByNibbles(key []byte) ([]byte, error) { + tn := t.root + // var node Node + for len(key) > 0 && tn != nil { + switch n := tn.(type) { + case *ShortNode: + if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { + // The trie doesn't contain the key. + tn = nil + } else { + tn = n.Val + key = key[len(n.Key):] + } + case *FullNode: + tn = n.Children[key[0]] + key = key[1:] + if len(key) == 0 { + hasher := NewHasher(false) + s := tn.(*ShortNode) + nn, _ := hasher.ProofHash(s) + enc, _ := rlp.EncodeToBytes(nn) + return enc, nil + } + case HashNode: + var err error + tn, err = t.resolveHash(n, nil) + if err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + return nil, err + } + default: + panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) + } + } + + return nil, nil +} + +// Prove constructs a merkle proof for key. The result contains all encoded nodes +// on the path to the value at key. The value itself is also included in the last +// node and can be retrieved by verifying the proof. +// +// If the trie does not contain a value for key, the returned proof contains all +// nodes of the longest existing prefix of the key (at least the root node), ending +// with the node that proves the absence of the key. +func (t *SecureTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) ([]byte, [][]byte, bool, error) { + return t.trie.Prove(key, fromLevel, proofDb) +} + +func (t *SecureTrie) GetNodeByNibbles(key []byte) ([]byte, error) { + return t.trie.GetNodeByNibbles(key) +} + +// VerifyProof checks merkle proofs. The given proof must contain the value for +// key in a trie with the given root hash. VerifyProof returns an error if the +// proof contains invalid trie nodes or the wrong value. +func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, err error) { + key = KeybytesToHex(key) + wantHash := rootHash + for i := 0; ; i++ { + buf, _ := proofDb.Get(wantHash[:]) + if buf == nil { + return nil, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash) + } + n, err := DecodeNode(wantHash[:], buf) + if err != nil { + return nil, fmt.Errorf("bad proof node %d: %v", i, err) + } + keyrest, cld := get(n, key, true) + switch cld := cld.(type) { + case nil: + // The trie doesn't contain the key. + return nil, nil + case HashNode: + key = keyrest + copy(wantHash[:], cld) + case ValueNode: + return cld, nil + } + } +} + +// proofToPath converts a merkle proof to trie node path. The main purpose of +// this function is recovering a node path from the merkle proof stream. All +// necessary nodes will be resolved and leave the remaining as hashnode. +// +// The given edge proof is allowed to be an existent or non-existent proof. +func proofToPath(rootHash common.Hash, root Node, key []byte, proofDb ethdb.KeyValueReader, allowNonExistent bool) (Node, []byte, error) { + // resolveNode retrieves and resolves trie node from merkle proof stream + resolveNode := func(hash common.Hash) (Node, error) { + buf, _ := proofDb.Get(hash[:]) + if buf == nil { + return nil, fmt.Errorf("proof node (hash %064x) missing", hash) + } + n, err := DecodeNode(hash[:], buf) + if err != nil { + return nil, fmt.Errorf("bad proof node %v", err) + } + return n, err + } + // If the root node is empty, resolve it first. + // Root node must be included in the proof. + if root == nil { + n, err := resolveNode(rootHash) + if err != nil { + return nil, nil, err + } + root = n + } + var ( + err error + child, parent Node + keyrest []byte + valnode []byte + ) + key, parent = KeybytesToHex(key), root + for { + keyrest, child = get(parent, key, false) + switch cld := child.(type) { + case nil: + // The trie doesn't contain the key. It's possible + // the proof is a non-existing proof, but at least + // we can prove all resolved nodes are correct, it's + // enough for us to prove range. + if allowNonExistent { + return root, nil, nil + } + return nil, nil, errors.New("the node is not contained in trie") + case *ShortNode: + key, parent = keyrest, child // Already resolved + continue + case *FullNode: + key, parent = keyrest, child // Already resolved + continue + case HashNode: + child, err = resolveNode(common.BytesToHash(cld)) + if err != nil { + return nil, nil, err + } + case ValueNode: + valnode = cld + } + // Link the parent and child. + switch pnode := parent.(type) { + case *ShortNode: + pnode.Val = child + case *FullNode: + pnode.Children[key[0]] = child + default: + panic(fmt.Sprintf("%T: invalid node: %v", pnode, pnode)) + } + if len(valnode) > 0 { + return root, valnode, nil // The whole path is resolved + } + key, parent = keyrest, child + } +} + +// unsetInternal removes all internal node references(hashnode, embedded node). +// It should be called after a trie is constructed with two edge paths. Also +// the given boundary keys must be the one used to construct the edge paths. +// +// It's the key step for range proof. All visited nodes should be marked dirty +// since the node content might be modified. Besides it can happen that some +// fullnodes only have one child which is disallowed. But if the proof is valid, +// the missing children will be filled, otherwise it will be thrown anyway. +// +// Note we have the assumption here the given boundary keys are different +// and right is larger than left. +func unsetInternal(n Node, left []byte, right []byte) (bool, error) { + left, right = KeybytesToHex(left), KeybytesToHex(right) + + // Step down to the fork point. There are two scenarios can happen: + // - the fork point is a shortnode: either the key of left proof or + // right proof doesn't match with shortnode's key. + // - the fork point is a fullnode: both two edge proofs are allowed + // to point to a non-existent key. + var ( + pos = 0 + parent Node + + // fork indicator, 0 means no fork, -1 means proof is less, 1 means proof is greater + shortForkLeft, shortForkRight int + ) +findFork: + for { + switch rn := (n).(type) { + case *ShortNode: + rn.flags = nodeFlag{dirty: true} + + // If either the key of left proof or right proof doesn't match with + // shortnode, stop here and the forkpoint is the shortnode. + if len(left)-pos < len(rn.Key) { + shortForkLeft = bytes.Compare(left[pos:], rn.Key) + } else { + shortForkLeft = bytes.Compare(left[pos:pos+len(rn.Key)], rn.Key) + } + if len(right)-pos < len(rn.Key) { + shortForkRight = bytes.Compare(right[pos:], rn.Key) + } else { + shortForkRight = bytes.Compare(right[pos:pos+len(rn.Key)], rn.Key) + } + if shortForkLeft != 0 || shortForkRight != 0 { + break findFork + } + parent = n + n, pos = rn.Val, pos+len(rn.Key) + case *FullNode: + rn.flags = nodeFlag{dirty: true} + + // If either the node pointed by left proof or right proof is nil, + // stop here and the forkpoint is the fullnode. + leftnode, rightnode := rn.Children[left[pos]], rn.Children[right[pos]] + if leftnode == nil || rightnode == nil || leftnode != rightnode { + break findFork + } + parent = n + n, pos = rn.Children[left[pos]], pos+1 + default: + panic(fmt.Sprintf("%T: invalid node: %v", n, n)) + } + } + switch rn := n.(type) { + case *ShortNode: + // There can have these five scenarios: + // - both proofs are less than the trie path => no valid range + // - both proofs are greater than the trie path => no valid range + // - left proof is less and right proof is greater => valid range, unset the shortnode entirely + // - left proof points to the shortnode, but right proof is greater + // - right proof points to the shortnode, but left proof is less + if shortForkLeft == -1 && shortForkRight == -1 { + return false, errors.New("empty range") + } + if shortForkLeft == 1 && shortForkRight == 1 { + return false, errors.New("empty range") + } + if shortForkLeft != 0 && shortForkRight != 0 { + // The fork point is root node, unset the entire trie + if parent == nil { + return true, nil + } + parent.(*FullNode).Children[left[pos-1]] = nil + return false, nil + } + // Only one proof points to non-existent key. + if shortForkRight != 0 { + if _, ok := rn.Val.(ValueNode); ok { + // The fork point is root node, unset the entire trie + if parent == nil { + return true, nil + } + parent.(*FullNode).Children[left[pos-1]] = nil + return false, nil + } + return false, unset(rn, rn.Val, left[pos:], len(rn.Key), false) + } + if shortForkLeft != 0 { + if _, ok := rn.Val.(ValueNode); ok { + // The fork point is root node, unset the entire trie + if parent == nil { + return true, nil + } + parent.(*FullNode).Children[right[pos-1]] = nil + return false, nil + } + return false, unset(rn, rn.Val, right[pos:], len(rn.Key), true) + } + return false, nil + case *FullNode: + // unset all internal nodes in the forkpoint + for i := left[pos] + 1; i < right[pos]; i++ { + rn.Children[i] = nil + } + if err := unset(rn, rn.Children[left[pos]], left[pos:], 1, false); err != nil { + return false, err + } + if err := unset(rn, rn.Children[right[pos]], right[pos:], 1, true); err != nil { + return false, err + } + return false, nil + default: + panic(fmt.Sprintf("%T: invalid node: %v", n, n)) + } +} + +// unset removes all internal node references either the left most or right most. +// It can meet these scenarios: +// +// - The given path is existent in the trie, unset the associated nodes with the +// specific direction +// - The given path is non-existent in the trie +// - the fork point is a fullnode, the corresponding child pointed by path +// is nil, return +// - the fork point is a shortnode, the shortnode is included in the range, +// keep the entire branch and return. +// - the fork point is a shortnode, the shortnode is excluded in the range, +// unset the entire branch. +func unset(parent Node, child Node, key []byte, pos int, removeLeft bool) error { + switch cld := child.(type) { + case *FullNode: + if removeLeft { + for i := 0; i < int(key[pos]); i++ { + cld.Children[i] = nil + } + cld.flags = nodeFlag{dirty: true} + } else { + for i := key[pos] + 1; i < 16; i++ { + cld.Children[i] = nil + } + cld.flags = nodeFlag{dirty: true} + } + return unset(cld, cld.Children[key[pos]], key, pos+1, removeLeft) + case *ShortNode: + if len(key[pos:]) < len(cld.Key) || !bytes.Equal(cld.Key, key[pos:pos+len(cld.Key)]) { + // Find the fork point, it's an non-existent branch. + if removeLeft { + if bytes.Compare(cld.Key, key[pos:]) < 0 { + // The key of fork shortnode is less than the path + // (it belongs to the range), unset the entrie + // branch. The parent must be a fullnode. + fn := parent.(*FullNode) + fn.Children[key[pos-1]] = nil + } else { + // The key of fork shortnode is greater than the + // path(it doesn't belong to the range), keep + // it with the cached hash available. + } + } else { + if bytes.Compare(cld.Key, key[pos:]) > 0 { + // The key of fork shortnode is greater than the + // path(it belongs to the range), unset the entrie + // branch. The parent must be a fullnode. + fn := parent.(*FullNode) + fn.Children[key[pos-1]] = nil + } else { + // The key of fork shortnode is less than the + // path(it doesn't belong to the range), keep + // it with the cached hash available. + } + } + return nil + } + if _, ok := cld.Val.(ValueNode); ok { + fn := parent.(*FullNode) + fn.Children[key[pos-1]] = nil + return nil + } + cld.flags = nodeFlag{dirty: true} + return unset(cld, cld.Val, key, pos+len(cld.Key), removeLeft) + case nil: + // If the node is nil, then it's a child of the fork point + // fullnode(it's a non-existent branch). + return nil + default: + panic("it shouldn't happen") // hashNode, valueNode + } +} + +// hasRightElement returns the indicator whether there exists more elements +// in the right side of the given path. The given path can point to an existent +// key or a non-existent one. This function has the assumption that the whole +// path should already be resolved. +func hasRightElement(node Node, key []byte) bool { + pos, key := 0, KeybytesToHex(key) + for node != nil { + switch rn := node.(type) { + case *FullNode: + for i := key[pos] + 1; i < 16; i++ { + if rn.Children[i] != nil { + return true + } + } + node, pos = rn.Children[key[pos]], pos+1 + case *ShortNode: + if len(key)-pos < len(rn.Key) || !bytes.Equal(rn.Key, key[pos:pos+len(rn.Key)]) { + return bytes.Compare(rn.Key, key[pos:]) > 0 + } + node, pos = rn.Val, pos+len(rn.Key) + case ValueNode: + return false // We have resolved the whole path + default: + panic(fmt.Sprintf("%T: invalid node: %v", node, node)) // hashnode + } + } + return false +} + +/* +// VerifyRangeProof checks whether the given leaf nodes and edge proof +// can prove the given trie leaves range is matched with the specific root. +// Besides, the range should be consecutive (no gap inside) and monotonic +// increasing. +// +// Note the given proof actually contains two edge proofs. Both of them can +// be non-existent proofs. For example the first proof is for a non-existent +// key 0x03, the last proof is for a non-existent key 0x10. The given batch +// leaves are [0x04, 0x05, .. 0x09]. It's still feasible to prove the given +// batch is valid. +// +// The firstKey is paired with firstProof, not necessarily the same as keys[0] +// (unless firstProof is an existent proof). Similarly, lastKey and lastProof +// are paired. +// +// Expect the normal case, this function can also be used to verify the following +// range proofs: +// +// - All elements proof. In this case the proof can be nil, but the range should +// be all the leaves in the trie. +// +// - One element proof. In this case no matter the edge proof is a non-existent +// proof or not, we can always verify the correctness of the proof. +// +// - Zero element proof. In this case a single non-existent proof is enough to prove. +// Besides, if there are still some other leaves available on the right side, then +// an error will be returned. +// +// Except returning the error to indicate the proof is valid or not, the function will +// also return a flag to indicate whether there exists more accounts/slots in the trie. +// +// Note: This method does not verify that the proof is of minimal form. If the input +// proofs are 'bloated' with neighbour leaves or random data, aside from the 'useful' +// data, then the proof will still be accepted. +func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) { + if len(keys) != len(values) { + return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values)) + } + // Ensure the received batch is monotonic increasing. + for i := 0; i < len(keys)-1; i++ { + if bytes.Compare(keys[i], keys[i+1]) >= 0 { + return false, errors.New("range is not monotonically increasing") + } + } + // Special case, there is no edge proof at all. The given range is expected + // to be the whole leaf-set in the trie. + if proof == nil { + tr := NewStackTrie(nil) + for index, key := range keys { + tr.TryUpdate(key, values[index]) + } + if have, want := tr.Hash(), rootHash; have != want { + return false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have) + } + return false, nil // No more elements + } + // Special case, there is a provided edge proof but zero key/value + // pairs, ensure there are no more accounts / slots in the trie. + if len(keys) == 0 { + root, val, err := proofToPath(rootHash, nil, firstKey, proof, true) + if err != nil { + return false, err + } + if val != nil || hasRightElement(root, firstKey) { + return false, errors.New("more entries available") + } + return hasRightElement(root, firstKey), nil + } + // Special case, there is only one element and two edge keys are same. + // In this case, we can't construct two edge paths. So handle it here. + if len(keys) == 1 && bytes.Equal(firstKey, lastKey) { + root, val, err := proofToPath(rootHash, nil, firstKey, proof, false) + if err != nil { + return false, err + } + if !bytes.Equal(firstKey, keys[0]) { + return false, errors.New("correct proof but invalid key") + } + if !bytes.Equal(val, values[0]) { + return false, errors.New("correct proof but invalid data") + } + return hasRightElement(root, firstKey), nil + } + // Ok, in all other cases, we require two edge paths available. + // First check the validity of edge keys. + if bytes.Compare(firstKey, lastKey) >= 0 { + return false, errors.New("invalid edge keys") + } + // todo(rjl493456442) different length edge keys should be supported + if len(firstKey) != len(lastKey) { + return false, errors.New("inconsistent edge keys") + } + // Convert the edge proofs to edge trie paths. Then we can + // have the same tree architecture with the original one. + // For the first edge proof, non-existent proof is allowed. + root, _, err := proofToPath(rootHash, nil, firstKey, proof, true) + if err != nil { + return false, err + } + // Pass the root node here, the second path will be merged + // with the first one. For the last edge proof, non-existent + // proof is also allowed. + root, _, err = proofToPath(rootHash, root, lastKey, proof, true) + if err != nil { + return false, err + } + // Remove all internal references. All the removed parts should + // be re-filled(or re-constructed) by the given leaves range. + empty, err := unsetInternal(root, firstKey, lastKey) + if err != nil { + return false, err + } + // Rebuild the trie with the leaf stream, the shape of trie + // should be same with the original one. + tr := &Trie{root: root, db: NewDatabase(memorydb.New())} + if empty { + tr.root = nil + } + for index, key := range keys { + tr.TryUpdate(key, values[index]) + } + if tr.Hash() != rootHash { + return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash()) + } + return hasRightElement(root, keys[len(keys)-1]), nil +} +*/ + +// get returns the child of the given node. Return nil if the +// node with specified key doesn't exist at all. +// +// There is an additional flag `skipResolved`. If it's set then +// all resolved nodes won't be returned. +func get(tn Node, key []byte, skipResolved bool) ([]byte, Node) { + for { + switch n := tn.(type) { + case *ShortNode: + if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { + return nil, nil + } + tn = n.Val + key = key[len(n.Key):] + if !skipResolved { + return key, tn + } + case *FullNode: + tn = n.Children[key[0]] + key = key[1:] + if !skipResolved { + return key, tn + } + case HashNode: + return key, n + case nil: + return key, nil + case ValueNode: + return nil, n + default: + panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) + } + } +} diff --git a/mpt-witness-generator/trie/secure_trie.go b/mpt-witness-generator/trie/secure_trie.go new file mode 100644 index 0000000000..3bf3a889f3 --- /dev/null +++ b/mpt-witness-generator/trie/secure_trie.go @@ -0,0 +1,231 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/privacy-scaling-explorations/mpt-witness-generator/oracle" +) + +// SecureTrie wraps a trie with key hashing. In a secure trie, all +// access operations hash the key using keccak256. This prevents +// calling code from creating long chains of nodes that +// increase the access time. +// +// Contrary to a regular trie, a SecureTrie can only be created with +// New and must have an attached database. The database also stores +// the preimage of each key. +// +// SecureTrie is not safe for concurrent use. +type SecureTrie struct { + trie Trie + hashKeyBuf [common.HashLength]byte + secKeyCache map[string][]byte + secKeyCacheOwner *SecureTrie // Pointer to self, replace the key cache on mismatch +} + +// NewSecure creates a trie with an existing root node from a backing database +// and optional intermediate in-memory node pool. +// +// If root is the zero hash or the sha3 hash of an empty string, the +// trie is initially empty. Otherwise, New will panic if db is nil +// and returns MissingNodeError if the root node cannot be found. +// +// Accessing the trie loads nodes from the database or node pool on demand. +// Loaded nodes are kept around until their 'cache generation' expires. +// A new cache generation is created by each call to Commit. +// cachelimit sets the number of past cache generations to keep. +func NewSecure(root common.Hash, db *Database) (*SecureTrie, error) { + if db == nil { + panic("trie.NewSecure called without a database") + } + trie, err := New(root, db) + if err != nil { + return nil, err + } + return &SecureTrie{trie: *trie}, nil +} + +// Get returns the value for key stored in the trie. +// The value bytes must not be modified by the caller. +func (t *SecureTrie) Get(key []byte) []byte { + res, err := t.TryGet(key) + if err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + } + return res +} + +// TryGet returns the value for key stored in the trie. +// The value bytes must not be modified by the caller. +// If a node was not found in the database, a MissingNodeError is returned. +func (t *SecureTrie) TryGet(key []byte) ([]byte, error) { + return t.trie.TryGet(t.hashKey(key)) +} + +// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not +// possible to use keybyte-encoding as the path might contain odd nibbles. +func (t *SecureTrie) TryGetNode(path []byte) ([]byte, int, error) { + return t.trie.TryGetNode(path) +} + +// Update associates key with value in the trie. Subsequent calls to +// Get will return value. If value has length zero, any existing value +// is deleted from the trie and calls to Get will return nil. +// +// The value bytes must not be modified by the caller while they are +// stored in the trie. +func (t *SecureTrie) Update(key, value []byte) { + if err := t.TryUpdate(key, value); err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + } +} + +// TryUpdate associates key with value in the trie. Subsequent calls to +// Get will return value. If value has length zero, any existing value +// is deleted from the trie and calls to Get will return nil. +// +// The value bytes must not be modified by the caller while they are +// stored in the trie. +// +// If a node was not found in the database, a MissingNodeError is returned. +func (t *SecureTrie) TryUpdate(key, value []byte) error { + hk := t.hashKey(key) + err := t.trie.TryUpdate(hk, value) + if err != nil { + return err + } + t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) + return nil +} + +// MPT circuit test generating. +func (t *SecureTrie) TryUpdateAlwaysHash(key, value []byte) error { + // hk := t.hashKey(key) + h := NewHasher(false) + h.sha.Reset() + h.sha.Write(key) + h.sha.Read(t.hashKeyBuf[:]) + returnHasherToPool(h) + hk := t.hashKeyBuf[:] + + err := t.trie.TryUpdate(hk, value) + if err != nil { + return err + } + t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) + return nil +} + +// Delete removes any existing value for key from the trie. +func (t *SecureTrie) Delete(key []byte) { + if err := t.TryDelete(key); err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + } +} + +// TryDelete removes any existing value for key from the trie. +// If a node was not found in the database, a MissingNodeError is returned. +func (t *SecureTrie) TryDelete(key []byte) error { + hk := t.hashKey(key) + delete(t.getSecKeyCache(), string(hk)) + return t.trie.TryDelete(hk) +} + +// GetKey returns the sha3 preimage of a hashed key that was +// previously used to store a value. +/*func (t *SecureTrie) GetKey(shaKey []byte) []byte { + if key, ok := t.getSecKeyCache()[string(shaKey)]; ok { + return key + } + return t.trie.db.preimage(common.BytesToHash(shaKey)) +}*/ + +// Commit writes all nodes and the secure hash pre-images to the trie's database. +// Nodes are stored with their sha3 hash as the key. +// +// Committing flushes nodes from memory. Subsequent Get calls will load nodes +// from the database. +func (t *SecureTrie) Commit(onleaf LeafCallback) (root common.Hash, err error) { + // Write all the pre-images to the actual disk database + /*if len(t.getSecKeyCache()) > 0 { + if t.trie.db.preimages != nil { // Ugly direct check but avoids the below write lock + t.trie.db.lock.Lock() + for hk, key := range t.secKeyCache { + t.trie.db.insertPreimage(common.BytesToHash([]byte(hk)), key) + } + t.trie.db.lock.Unlock() + } + t.secKeyCache = make(map[string][]byte) + }*/ + // Commit the trie to its intermediate node database + return t.trie.Commit(onleaf) +} + +// Hash returns the root hash of SecureTrie. It does not write to the +// database and can be used even if the trie doesn't have one. +func (t *SecureTrie) Hash() common.Hash { + return t.trie.Hash() +} + +// Copy returns a copy of SecureTrie. +func (t *SecureTrie) Copy() *SecureTrie { + cpy := *t + return &cpy +} + +// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration +// starts at the key after the given start key. +func (t *SecureTrie) NodeIterator(start []byte) NodeIterator { + return t.trie.NodeIterator(start) +} + +// hashKey returns the hash of key as an ephemeral buffer. +// The caller must not hold onto the return value because it will become +// invalid on the next call to hashKey or secKey. +func (t *SecureTrie) hashKey(key []byte) []byte { + if !oracle.PreventHashingInSecureTrie { + h := NewHasher(false) + h.sha.Reset() + h.sha.Write(key) + h.sha.Read(t.hashKeyBuf[:]) + returnHasherToPool(h) + return t.hashKeyBuf[:] + } else { + // For generating special tests for MPT circuit. + return key + } +} + +// getSecKeyCache returns the current secure key cache, creating a new one if +// ownership changed (i.e. the current secure trie is a copy of another owning +// the actual cache). +func (t *SecureTrie) getSecKeyCache() map[string][]byte { + if t != t.secKeyCacheOwner { + t.secKeyCacheOwner = t + t.secKeyCache = make(map[string][]byte) + } + return t.secKeyCache +} + +// For Merkle Patricia Trie generator. +func (t *SecureTrie) GetRoot() Node { + return t.trie.root +} diff --git a/mpt-witness-generator/trie/stacktrie.go b/mpt-witness-generator/trie/stacktrie.go new file mode 100644 index 0000000000..9f3a63a23f --- /dev/null +++ b/mpt-witness-generator/trie/stacktrie.go @@ -0,0 +1,772 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bufio" + "bytes" + "encoding/gob" + "errors" + "fmt" + "io" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/privacy-scaling-explorations/mpt-witness-generator/types" + + //"github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +var ErrCommitDisabled = errors.New("no database for committing") + +var stPool = sync.Pool{ + New: func() interface{} { + return NewStackTrie(nil) + }, +} + +func stackTrieFromPool(db ethdb.KeyValueWriter) *StackTrie { + st := stPool.Get().(*StackTrie) + st.db = db + return st +} + +func returnToPool(st *StackTrie) { + st.Reset() + stPool.Put(st) +} + +// StackTrie is a trie implementation that expects keys to be inserted +// in order. Once it determines that a subtree will no longer be inserted +// into, it will hash it and free up the memory it uses. +type StackTrie struct { + nodeType uint8 // node type (as in branch, ext, leaf) + val []byte // value contained by this node if it's a leaf + key []byte // key chunk covered by this (full|ext) node + keyOffset int // offset of the key chunk inside a full key + children [16]*StackTrie // list of children (for fullnodes and exts) + db ethdb.KeyValueWriter // Pointer to the commit db, can be nil +} + +// NewStackTrie allocates and initializes an empty trie. +func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie { + return &StackTrie{ + nodeType: emptyNode, + db: db, + } +} + +// NewFromBinary initialises a serialized stacktrie with the given db. +func NewFromBinary(data []byte, db ethdb.KeyValueWriter) (*StackTrie, error) { + var st StackTrie + if err := st.UnmarshalBinary(data); err != nil { + return nil, err + } + // If a database is used, we need to recursively add it to every child + if db != nil { + st.setDb(db) + } + return &st, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (st *StackTrie) MarshalBinary() (data []byte, err error) { + var ( + b bytes.Buffer + w = bufio.NewWriter(&b) + ) + if err := gob.NewEncoder(w).Encode(struct { + Nodetype uint8 + Val []byte + Key []byte + KeyOffset uint8 + }{ + st.nodeType, + st.val, + st.key, + uint8(st.keyOffset), + }); err != nil { + return nil, err + } + for _, child := range st.children { + if child == nil { + w.WriteByte(0) + continue + } + w.WriteByte(1) + if childData, err := child.MarshalBinary(); err != nil { + return nil, err + } else { + w.Write(childData) + } + } + w.Flush() + return b.Bytes(), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (st *StackTrie) UnmarshalBinary(data []byte) error { + r := bytes.NewReader(data) + return st.unmarshalBinary(r) +} + +func (st *StackTrie) unmarshalBinary(r io.Reader) error { + var dec struct { + Nodetype uint8 + Val []byte + Key []byte + KeyOffset uint8 + } + gob.NewDecoder(r).Decode(&dec) + st.nodeType = dec.Nodetype + st.val = dec.Val + st.key = dec.Key + st.keyOffset = int(dec.KeyOffset) + + var hasChild = make([]byte, 1) + for i := range st.children { + if _, err := r.Read(hasChild); err != nil { + return err + } else if hasChild[0] == 0 { + continue + } + var child StackTrie + child.unmarshalBinary(r) + st.children[i] = &child + } + return nil +} + +func (st *StackTrie) setDb(db ethdb.KeyValueWriter) { + st.db = db + for _, child := range st.children { + if child != nil { + child.setDb(db) + } + } +} + +func newLeaf(ko int, key, val []byte, db ethdb.KeyValueWriter) *StackTrie { + st := stackTrieFromPool(db) + st.nodeType = leafNode + st.keyOffset = ko + st.key = append(st.key, key[ko:]...) + st.val = val + return st +} + +func newExt(ko int, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie { + st := stackTrieFromPool(db) + st.nodeType = extNode + st.keyOffset = ko + st.key = append(st.key, key[ko:]...) + st.children[0] = child + return st +} + +// List all values that StackTrie#nodeType can hold +const ( + emptyNode = iota + branchNode + extNode + leafNode + hashedNode +) + +// TryUpdate inserts a (key, value) pair into the stack trie +func (st *StackTrie) TryUpdate(key, value []byte) error { + k := KeybytesToHex(key) + if len(value) == 0 { + panic("deletion not supported") + } + st.insert(k[:len(k)-1], value) + return nil +} + +func (st *StackTrie) Update(key, value []byte) { + if err := st.TryUpdate(key, value); err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + } +} + +func (st *StackTrie) Reset() { + //st.db = nil + st.key = st.key[:0] + st.val = nil + for i := range st.children { + st.children[i] = nil + } + st.nodeType = emptyNode + st.keyOffset = 0 +} + +// Helper function that, given a full key, determines the index +// at which the chunk pointed by st.keyOffset is different from +// the same chunk in the full key. +func (st *StackTrie) getDiffIndex(key []byte) int { + diffindex := 0 + for ; diffindex < len(st.key) && st.key[diffindex] == key[st.keyOffset+diffindex]; diffindex++ { + } + return diffindex +} + +// Helper function to that inserts a (key, value) pair into +// the trie. +func (st *StackTrie) insert(key, value []byte) { + if key[0] == 1 && key[1] == 0 { + fmt.Println("d") + } + switch st.nodeType { + case branchNode: /* Branch */ + idx := int(key[st.keyOffset]) + // Unresolve elder siblings + for i := idx - 1; i >= 0; i-- { + if st.children[i] != nil { + if st.children[i].nodeType != hashedNode { + st.children[i].hash(true) + } + break + } + } + // Add new child + if st.children[idx] == nil { + st.children[idx] = stackTrieFromPool(st.db) + st.children[idx].keyOffset = st.keyOffset + 1 + } + st.children[idx].insert(key, value) + case extNode: /* Ext */ + // Compare both key chunks and see where they differ + diffidx := st.getDiffIndex(key) + + // Check if chunks are identical. If so, recurse into + // the child node. Otherwise, the key has to be split + // into 1) an optional common prefix, 2) the fullnode + // representing the two differing path, and 3) a leaf + // for each of the differentiated subtrees. + if diffidx == len(st.key) { + // Ext key and key segment are identical, recurse into + // the child node. + st.children[0].insert(key, value) + return + } + // Save the original part. Depending if the break is + // at the extension's last byte or not, create an + // intermediate extension or use the extension's child + // node directly. + var n *StackTrie + if diffidx < len(st.key)-1 { + n = newExt(diffidx+1, st.key, st.children[0], st.db) + } else { + // Break on the last byte, no need to insert + // an extension node: reuse the current node + n = st.children[0] + } + // Convert to hash + n.hash(true) + var p *StackTrie + if diffidx == 0 { + // the break is on the first byte, so + // the current node is converted into + // a branch node. + st.children[0] = nil + p = st + st.nodeType = branchNode + } else { + // the common prefix is at least one byte + // long, insert a new intermediate branch + // node. + st.children[0] = stackTrieFromPool(st.db) + st.children[0].nodeType = branchNode + st.children[0].keyOffset = st.keyOffset + diffidx + p = st.children[0] + } + // Create a leaf for the inserted part + o := newLeaf(st.keyOffset+diffidx+1, key, value, st.db) + + // Insert both child leaves where they belong: + origIdx := st.key[diffidx] + newIdx := key[diffidx+st.keyOffset] + p.children[origIdx] = n + p.children[newIdx] = o + st.key = st.key[:diffidx] + + case leafNode: /* Leaf */ + // Compare both key chunks and see where they differ + diffidx := st.getDiffIndex(key) + + // Overwriting a key isn't supported, which means that + // the current leaf is expected to be split into 1) an + // optional extension for the common prefix of these 2 + // keys, 2) a fullnode selecting the path on which the + // keys differ, and 3) one leaf for the differentiated + // component of each key. + if diffidx >= len(st.key) { + panic("Trying to insert into existing key") + } + + // Check if the split occurs at the first nibble of the + // chunk. In that case, no prefix extnode is necessary. + // Otherwise, create that + var p *StackTrie + if diffidx == 0 { + // Convert current leaf into a branch + st.nodeType = branchNode + p = st + st.children[0] = nil + } else { + // Convert current node into an ext, + // and insert a child branch node. + st.nodeType = extNode + st.children[0] = NewStackTrie(st.db) + st.children[0].nodeType = branchNode + st.children[0].keyOffset = st.keyOffset + diffidx + p = st.children[0] + } + + // Create the two child leaves: the one containing the + // original value and the one containing the new value + // The child leave will be hashed directly in order to + // free up some memory. + origIdx := st.key[diffidx] + p.children[origIdx] = newLeaf(diffidx+1, st.key, st.val, st.db) + p.children[origIdx].hash(true) + + newIdx := key[diffidx+st.keyOffset] + p.children[newIdx] = newLeaf(p.keyOffset+1, key, value, st.db) + + // Finally, cut off the key part that has been passed + // over to the children. + st.key = st.key[:diffidx] + st.val = nil + case emptyNode: /* Empty */ + st.nodeType = leafNode + st.key = key[st.keyOffset:] + st.val = value + case hashedNode: + panic("trying to insert into hash") + default: + panic("invalid type") + } +} + +func (st *StackTrie) branchToHasher(doUpdate bool) *hasher { + if st.nodeType != branchNode { + panic("Converting branch to RLP: wrong node") + } + var nodes [17]Node + for i, child := range st.children { + if child == nil { + nodes[i] = nilValueNode + continue + } + child.hash(doUpdate) + if len(child.val) < 32 { + nodes[i] = rawNode(child.val) + } else { + nodes[i] = HashNode(child.val) + } + if doUpdate { + st.children[i] = nil // Reclaim mem from subtree + returnToPool(child) + } + } + nodes[16] = nilValueNode + + h := NewHasher(false) + defer returnHasherToPool(h) + h.tmp.Reset() + if err := rlp.Encode(&h.tmp, nodes); err != nil { + panic(err) + } + + // h.tmp is branch RLP + return h +} + +func (st *StackTrie) extNodeToHasher(doUpdate bool) *hasher { + if st.nodeType != extNode { + panic("Converting extension node to RLP: wrong node") + } + st.children[0].hash(doUpdate) + h := NewHasher(false) + defer returnHasherToPool(h) + h.tmp.Reset() + var valuenode Node + if len(st.children[0].val) < 32 { + valuenode = rawNode(st.children[0].val) + } else { + valuenode = HashNode(st.children[0].val) + } + n := struct { + Key []byte + Val Node + }{ + Key: HexToCompact(st.key), + Val: valuenode, + } + if err := rlp.Encode(&h.tmp, n); err != nil { + panic(err) + } + if doUpdate { + returnToPool(st.children[0]) + st.children[0] = nil // Reclaim mem from subtree + } + + // h.tmp is extension node RLP + return h +} + +// hash() hashes the node 'st' and converts it into 'hashedNode', if possible. +// Possible outcomes: +// 1. The rlp-encoded value was >= 32 bytes: +// - Then the 32-byte `hash` will be accessible in `st.val`. +// - And the 'st.type' will be 'hashedNode' +// +// 2. The rlp-encoded value was < 32 bytes +// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'. +// - And the 'st.type' will be 'hashedNode' AGAIN +// +// This method will also: +// set 'st.type' to hashedNode +// clear 'st.key' +func (st *StackTrie) hash(doUpdate bool) { + /* Shortcut if node is already hashed */ + if st.nodeType == hashedNode { + return + } + // The 'hasher' is taken from a pool, but we don't actually + // claim an instance until all children are done with their hashing, + // and we actually need one + var h *hasher + + switch st.nodeType { + case branchNode: + h = st.branchToHasher(doUpdate) + case extNode: + h = st.extNodeToHasher(doUpdate) + case leafNode: + h = NewHasher(false) + defer returnHasherToPool(h) + h.tmp.Reset() + + k := make([]byte, len(st.key)) + copy(k, st.key) + k = append(k, byte(16)) + + if doUpdate { + st.key = k + } + sz := hexToCompactInPlace(k) + n := [][]byte{k[:sz], st.val} + if err := rlp.Encode(&h.tmp, n); err != nil { + panic(err) + } + case emptyNode: + st.val = emptyRoot.Bytes() + st.key = st.key[:0] + st.nodeType = hashedNode + return + default: + panic("Invalid node type") + } + if doUpdate { + st.key = st.key[:0] + st.nodeType = hashedNode + } + if len(h.tmp) < 32 { + st.val = common.CopyBytes(h.tmp) + return + } + // Write the hash to the 'val'. We allocate a new val here to not mutate + // input values + st.val = make([]byte, 32) + h.sha.Reset() + h.sha.Write(h.tmp) + h.sha.Read(st.val) + if st.db != nil { + // TODO! Is it safe to Put the slice here? + // Do all db implementations copy the value provided? + fmt.Println("into db:", st.val) + st.db.Put(st.val, h.tmp) + } +} + +// Hash returns the hash of the current node +func (st *StackTrie) Hash() (h common.Hash) { + st.hash(true) + if len(st.val) != 32 { + // If the node's RLP isn't 32 bytes long, the node will not + // be hashed, and instead contain the rlp-encoding of the + // node. For the top level node, we need to force the hashing. + ret := make([]byte, 32) + h := NewHasher(false) + defer returnHasherToPool(h) + h.sha.Reset() + h.sha.Write(st.val) + h.sha.Read(ret) + return common.BytesToHash(ret) + } + return common.BytesToHash(st.val) +} + +// Commit will firstly hash the entrie trie if it's still not hashed +// and then commit all nodes to the associated database. Actually most +// of the trie nodes MAY have been committed already. The main purpose +// here is to commit the root node. +// +// The associated database is expected, otherwise the whole commit +// functionality should be disabled. +func (st *StackTrie) Commit() (common.Hash, error) { + if st.db == nil { + return common.Hash{}, ErrCommitDisabled + } + st.hash(true) + if len(st.val) != 32 { + // If the node's RLP isn't 32 bytes long, the node will not + // be hashed (and committed), and instead contain the rlp-encoding of the + // node. For the top level node, we need to force the hashing+commit. + ret := make([]byte, 32) + h := NewHasher(false) + defer returnHasherToPool(h) + h.sha.Reset() + h.sha.Write(st.val) + h.sha.Read(ret) + st.db.Put(ret, st.val) + return common.BytesToHash(ret), nil + } + return common.BytesToHash(st.val), nil +} + +func (st *StackTrie) getNodeFromBranchRLP(branch []byte, ind byte) []byte { + start := 2 // when branch[0] == 248 + if branch[0] == 249 { + start = 3 + } + + i := 0 + insideInd := -1 + cInd := byte(0) + for { + if start+i == len(branch)-1 { // -1 because of the last 128 (branch value) + return []byte{0} + } + b := branch[start+i] + if insideInd == -1 && b == 128 { + if cInd == ind { + return []byte{128} + } else { + cInd += 1 + } + } else if insideInd == -1 && b != 128 { + if b == 160 { + if cInd == ind { + return branch[start+i+1 : start+i+1+32] + } + insideInd = 32 + } else { + // non-hashed node + if cInd == ind { + return branch[start+i+1 : start+i+1+int(b)-192] + } + insideInd = int(b) - 192 + } + cInd += 1 + } else { + if insideInd == 1 { + insideInd = -1 + } else { + insideInd-- + } + } + + i++ + } +} + +type StackProof struct { + proofS [][]byte + proofC [][]byte +} + +func (st *StackTrie) UpdateAndGetProof(db ethdb.KeyValueReader, indexBuf, value []byte) (StackProof, error) { + proofS, err := st.GetProof(db, indexBuf) + if err != nil { + return StackProof{}, err + } + + st.Update(indexBuf, value) + + proofC, err := st.GetProof(db, indexBuf) + if err != nil { + return StackProof{}, err + } + + return StackProof{proofS, proofC}, nil +} + +func (st *StackTrie) UpdateAndGetProofs(db ethdb.KeyValueReader, list types.DerivableList) ([]StackProof, error) { + valueBuf := types.EncodeBufferPool.Get().(*bytes.Buffer) + defer types.EncodeBufferPool.Put(valueBuf) + + var proofs []StackProof + + // StackTrie requires values to be inserted in increasing hash order, which is not the + // order that `list` provides hashes in. This insertion sequence ensures that the + // order is correct. + var indexBuf []byte + for i := 1; i < list.Len() && i <= 0x7f; i++ { + indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i)) + value := types.EncodeForDerive(list, i, valueBuf) + + proof, err := st.UpdateAndGetProof(db, indexBuf, value) + if err != nil { + return nil, err + } + + proofs = append(proofs, proof) + } + if list.Len() > 0 { + indexBuf = rlp.AppendUint64(indexBuf[:0], 0) + value := types.EncodeForDerive(list, 0, valueBuf) + // TODO: get proof + st.Update(indexBuf, value) + } + for i := 0x80; i < list.Len(); i++ { + indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i)) + value := types.EncodeForDerive(list, i, valueBuf) + // TODO: get proof + st.Update(indexBuf, value) + } + + return proofs, nil +} + +func (st *StackTrie) GetProof(db ethdb.KeyValueReader, key []byte) ([][]byte, error) { + k := KeybytesToHex(key) + i := 0 + + if st.nodeType == emptyNode { + return [][]byte{}, nil + } + + // Note that when root is a leaf, this leaf should be returned even if you ask for a different key (than the key of + // of this leaf) - this is how it works in state GetProof and how it should, because this means the second change + // of the trie. The first change is going from empty trie to the trie with only a leaf. The second change is going + // from a leaf to a branch (or extension node). That means the second change requires a placeholder branch + // and when there is a placeholder branch, the circuit checks that there are only two leaves in a branch and one + // (the one not just added) is the same as in the S proof. This wouldn't work if we would have a placeholder leaf + // in the S proof (another reason is that the S proof with a placeholder leaf would be an empty trie and thus with + // a root of an empty trie - which is not the case in S proof). + if st.nodeType == leafNode { + return [][]byte{st.val}, nil + } + + var proof [][]byte + + var nodes []*StackTrie + + c := st + isHashed := false + + for i < len(k) { + if c.nodeType == extNode { + nodes = append(nodes, c) + c = st.children[0] + } else if c.nodeType == branchNode { + nodes = append(nodes, c) + c = c.children[k[i]] + if c == nil { + break + } + } else if c.nodeType == leafNode { + nodes = append(nodes, c) + break + } else if c.nodeType == hashedNode { + isHashed = true + c_rlp, error := db.Get(c.val) + if error != nil { + fmt.Println(error) + panic(error) + } + fmt.Println(c_rlp) + + proof = append(proof, c_rlp) + + for i < len(k)-1 { + node := st.getNodeFromBranchRLP(c_rlp, k[i]) + i += 1 + fmt.Println(node) + + if len(node) == 1 && node[0] == 128 { // no child at this position + break + } + + c_rlp, error = db.Get(node) + if error != nil { + fmt.Println(error) + panic(error) + } + fmt.Println(c_rlp) + + proof = append(proof, c_rlp) + } + + break + } + } + + // Differently as in the Trie, the StackTrie branch doesn't store children once it is hashed. + // For getting the proof, we need to hash the nodes, but once they are hashed we cannot add children + // to them - which is needed in MPT proof, because we need a proof for each modification (after + // the first modification, some nodes are hashed and we cannot add children to the hashed node). + + if !isHashed { + lNodes := len(nodes) + for i := lNodes - 1; i >= 0; i-- { + node := nodes[i] + fmt.Println(node) + + if node.nodeType == leafNode { + rlp, error := db.Get(node.val) + if error != nil { // TODO: avoid error when RLP + proof = append(proof, node.val) // already have RLP + } else { + proof = append(proof, rlp) + } + } else if node.nodeType == branchNode || node.nodeType == extNode { + node.hash(false) + + rlp, error := db.Get(node.val) + if error != nil { + return nil, error + } + proof = append(proof, rlp) + } + + } + } + + fmt.Println("----------") + for i := 0; i < len(proof); i++ { + fmt.Println(proof[i]) + } + + return proof, nil +} diff --git a/mpt-witness-generator/trie/trie.go b/mpt-witness-generator/trie/trie.go new file mode 100644 index 0000000000..433da4a1b6 --- /dev/null +++ b/mpt-witness-generator/trie/trie.go @@ -0,0 +1,615 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package trie implements Merkle Patricia Tries. +package trie + +import ( + "bytes" + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" +) + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // emptyState is the known hash of an empty state trie entry. + emptyState = crypto.Keccak256Hash(nil) +) + +// LeafCallback is a callback type invoked when a trie operation reaches a leaf +// node. +// +// The paths is a path tuple identifying a particular trie node either in a single +// trie (account) or a layered trie (account -> storage). Each path in the tuple +// is in the raw format(32 bytes). +// +// The hexpath is a composite hexary path identifying the trie node. All the key +// bytes are converted to the hexary nibbles and composited with the parent path +// if the trie node is in a layered trie. +// +// It's used by state sync and commit to allow handling external references +// between account and storage tries. And also it's used in the state healing +// for extracting the raw states(leaf nodes) with corresponding paths. +type LeafCallback func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error + +// Trie is a Merkle Patricia Trie. +// The zero value is an empty trie with no database. +// Use New to create a trie that sits on top of a database. +// +// Trie is not safe for concurrent use. +type Trie struct { + db *Database + root Node + // Keep track of the number leafs which have been inserted since the last + // hashing operation. This number will not directly map to the number of + // actually unhashed nodes + unhashed int +} + +// newFlag returns the cache flag value for a newly created node. +func (t *Trie) newFlag() nodeFlag { + return nodeFlag{dirty: true} +} + +// New creates a trie with an existing root node from db. +// +// If root is the zero hash or the sha3 hash of an empty string, the +// trie is initially empty and does not require a database. Otherwise, +// New will panic if db is nil and returns a MissingNodeError if root does +// not exist in the database. Accessing the trie loads nodes from db on demand. +func New(root common.Hash, db *Database) (*Trie, error) { + if db == nil { + panic("trie.New called without a database") + } + trie := &Trie{ + db: db, + } + if root != (common.Hash{}) && root != emptyRoot { + rootnode, err := trie.resolveHash(root[:], nil) + if err != nil { + return nil, err + } + trie.root = rootnode + } + return trie, nil +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at +// the key after the given start key. +func (t *Trie) NodeIterator(start []byte) NodeIterator { + return newNodeIterator(t, start) +} + +// Get returns the value for key stored in the trie. +// The value bytes must not be modified by the caller. +func (t *Trie) Get(key []byte) []byte { + res, err := t.TryGet(key) + if err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + } + return res +} + +// For Merkle Patricia Trie generator. +func (t *Trie) GetRoot() Node { + return t.root +} + +// TryGet returns the value for key stored in the trie. +// The value bytes must not be modified by the caller. +// If a node was not found in the database, a MissingNodeError is returned. +func (t *Trie) TryGet(key []byte) ([]byte, error) { + value, newroot, didResolve, err := t.tryGet(t.root, KeybytesToHex(key), 0) + if err == nil && didResolve { + t.root = newroot + } + return value, err +} + +func (t *Trie) tryGet(origNode Node, key []byte, pos int) (value []byte, newnode Node, didResolve bool, err error) { + switch n := (origNode).(type) { + case nil: + return nil, nil, false, nil + case ValueNode: + return n, n, false, nil + case *ShortNode: + if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) { + // key not found in trie + return nil, n, false, nil + } + value, newnode, didResolve, err = t.tryGet(n.Val, key, pos+len(n.Key)) + if err == nil && didResolve { + n = n.copy() + n.Val = newnode + } + return value, n, didResolve, err + case *FullNode: + value, newnode, didResolve, err = t.tryGet(n.Children[key[pos]], key, pos+1) + if err == nil && didResolve { + n = n.copy() + n.Children[key[pos]] = newnode + } + return value, n, didResolve, err + case HashNode: + child, err := t.resolveHash(n, key[:pos]) + if err != nil { + return nil, n, true, err + } + value, newnode, _, err := t.tryGet(child, key, pos) + return value, newnode, true, err + default: + panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode)) + } +} + +// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not +// possible to use keybyte-encoding as the path might contain odd nibbles. +func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) { + item, newroot, resolved, err := t.tryGetNode(t.root, compactToHex(path), 0) + if err != nil { + return nil, resolved, err + } + if resolved > 0 { + t.root = newroot + } + if item == nil { + return nil, resolved, nil + } + return item, resolved, err +} + +func (t *Trie) tryGetNode(origNode Node, path []byte, pos int) (item []byte, newnode Node, resolved int, err error) { + // If we reached the requested path, return the current node + if pos >= len(path) { + // Although we most probably have the original node expanded, encoding + // that into consensus form can be nasty (needs to cascade down) and + // time consuming. Instead, just pull the hash up from disk directly. + var hash HashNode + if node, ok := origNode.(HashNode); ok { + hash = node + } else { + hash, _ = origNode.cache() + } + if hash == nil { + return nil, origNode, 0, errors.New("non-consensus node") + } + blob, err := t.db.Node(common.BytesToHash(hash)) + return blob, origNode, 1, err + } + // Path still needs to be traversed, descend into children + switch n := (origNode).(type) { + case nil: + // Non-existent path requested, abort + return nil, nil, 0, nil + + case ValueNode: + // Path prematurely ended, abort + return nil, nil, 0, nil + + case *ShortNode: + if len(path)-pos < len(n.Key) || !bytes.Equal(n.Key, path[pos:pos+len(n.Key)]) { + // Path branches off from short node + return nil, n, 0, nil + } + item, newnode, resolved, err = t.tryGetNode(n.Val, path, pos+len(n.Key)) + if err == nil && resolved > 0 { + n = n.copy() + n.Val = newnode + } + return item, n, resolved, err + + case *FullNode: + item, newnode, resolved, err = t.tryGetNode(n.Children[path[pos]], path, pos+1) + if err == nil && resolved > 0 { + n = n.copy() + n.Children[path[pos]] = newnode + } + return item, n, resolved, err + + case HashNode: + child, err := t.resolveHash(n, path[:pos]) + if err != nil { + return nil, n, 1, err + } + item, newnode, resolved, err := t.tryGetNode(child, path, pos) + return item, newnode, resolved + 1, err + + default: + panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode)) + } +} + +// Update associates key with value in the trie. Subsequent calls to +// Get will return value. If value has length zero, any existing value +// is deleted from the trie and calls to Get will return nil. +// +// The value bytes must not be modified by the caller while they are +// stored in the trie. +func (t *Trie) Update(key, value []byte) { + if err := t.TryUpdate(key, value); err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + } +} + +// TryUpdate associates key with value in the trie. Subsequent calls to +// Get will return value. If value has length zero, any existing value +// is deleted from the trie and calls to Get will return nil. +// +// The value bytes must not be modified by the caller while they are +// stored in the trie. +// +// If a node was not found in the database, a MissingNodeError is returned. +func (t *Trie) TryUpdate(key, value []byte) error { + t.unhashed++ + k := KeybytesToHex(key) + + if len(value) != 0 { + _, n, err := t.insert(t.root, nil, k, ValueNode(value)) + if err != nil { + return err + } + t.root = n + } else { + _, n, err := t.delete(t.root, nil, k) + if err != nil { + return err + } + t.root = n + } + return nil +} + +func (t *Trie) TryUpdateAlwaysHash(key, value []byte) error { + t.unhashed++ + k := KeybytesToHex(key) + + if len(value) != 0 { + _, n, err := t.insert(t.root, nil, k, ValueNode(value)) + if err != nil { + return err + } + t.root = n + } else { + _, n, err := t.delete(t.root, nil, k) + if err != nil { + return err + } + t.root = n + } + return nil +} + +func (t *Trie) insert(n Node, prefix, key []byte, value Node) (bool, Node, error) { + if len(key) == 0 { + if v, ok := n.(ValueNode); ok { + return !bytes.Equal(v, value.(ValueNode)), value, nil + } + return true, value, nil + } + switch n := n.(type) { + case *ShortNode: + matchlen := prefixLen(key, n.Key) + // If the whole key matches, keep this short node as is + // and only update the value. + if matchlen == len(n.Key) { + dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value) + if !dirty || err != nil { + return false, n, err + } + return true, &ShortNode{n.Key, nn, t.newFlag()}, nil + } + // Otherwise branch out at the index where they differ. + branch := &FullNode{flags: t.newFlag()} + var err error + _, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val) + if err != nil { + return false, nil, err + } + _, branch.Children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value) + if err != nil { + return false, nil, err + } + // Replace this shortNode with the branch if it occurs at index 0. + if matchlen == 0 { + return true, branch, nil + } + // Otherwise, replace it with a short node leading up to the branch. + // (this is extension node) + if matchlen > 2 { + fmt.Println(matchlen) + fmt.Println("====") + } + return true, &ShortNode{key[:matchlen], branch, t.newFlag()}, nil + + case *FullNode: + dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value) + if !dirty || err != nil { + return false, n, err + } + n = n.copy() + n.flags = t.newFlag() + + n.Children[key[0]] = nn + return true, n, nil + + case nil: + return true, &ShortNode{key, value, t.newFlag()}, nil + + case HashNode: + // We've hit a part of the trie that isn't loaded yet. Load + // the node and insert into it. This leaves all child nodes on + // the path to the value in the trie. + rn, err := t.resolveHash(n, prefix) + if err != nil { + return false, nil, err + } + dirty, nn, err := t.insert(rn, prefix, key, value) + if !dirty || err != nil { + return false, rn, err + } + return true, nn, nil + + default: + panic(fmt.Sprintf("%T: invalid node: %v", n, n)) + } +} + +// Delete removes any existing value for key from the trie. +func (t *Trie) Delete(key []byte) { + if err := t.TryDelete(key); err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + } +} + +// TryDelete removes any existing value for key from the trie. +// If a node was not found in the database, a MissingNodeError is returned. +func (t *Trie) TryDelete(key []byte) error { + t.unhashed++ + k := KeybytesToHex(key) + _, n, err := t.delete(t.root, nil, k) + if err != nil { + return err + } + t.root = n + return nil +} + +// delete returns the new root of the trie with key deleted. +// It reduces the trie to minimal form by simplifying +// nodes on the way up after deleting recursively. +func (t *Trie) delete(n Node, prefix, key []byte) (bool, Node, error) { + switch n := n.(type) { + case *ShortNode: + matchlen := prefixLen(key, n.Key) + if matchlen < len(n.Key) { + return false, n, nil // don't replace n on mismatch + } + if matchlen == len(key) { + return true, nil, nil // remove n entirely for whole matches + } + // The key is longer than n.Key. Remove the remaining suffix + // from the subtrie. Child can never be nil here since the + // subtrie must contain at least two other values with keys + // longer than n.Key. + dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):]) + if !dirty || err != nil { + return false, n, err + } + switch child := child.(type) { + case *ShortNode: + // Deleting from the subtrie reduced it to another + // short node. Merge the nodes to avoid creating a + // shortNode{..., shortNode{...}}. Use concat (which + // always creates a new slice) instead of append to + // avoid modifying n.Key since it might be shared with + // other nodes. + return true, &ShortNode{concat(n.Key, child.Key...), child.Val, t.newFlag()}, nil + default: + return true, &ShortNode{n.Key, child, t.newFlag()}, nil + } + + case *FullNode: + dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:]) + if !dirty || err != nil { + return false, n, err + } + n = n.copy() + n.flags = t.newFlag() + n.Children[key[0]] = nn + + // Because n is a full node, it must've contained at least two children + // before the delete operation. If the new child value is non-nil, n still + // has at least two children after the deletion, and cannot be reduced to + // a short node. + if nn != nil { + return true, n, nil + } + // Reduction: + // Check how many non-nil entries are left after deleting and + // reduce the full node to a short node if only one entry is + // left. Since n must've contained at least two children + // before deletion (otherwise it would not be a full node) n + // can never be reduced to nil. + // + // When the loop is done, pos contains the index of the single + // value that is left in n or -2 if n contains at least two + // values. + pos := -1 + for i, cld := range &n.Children { + if cld != nil { + if pos == -1 { + pos = i + } else { + pos = -2 + break + } + } + } + if pos >= 0 { + if pos != 16 { + //fmt.Println("delete fails here", pos, n.Children, prefix, n.Children[pos]) + // If the remaining entry is a short node, it replaces + // n and its key gets the missing nibble tacked to the + // front. This avoids creating an invalid + // shortNode{..., shortNode{...}}. Since the entry + // might not be loaded yet, resolve it just for this + // check. + + // When node is not resolved in next block's absence proof, + // it must be an extension node if the state transition is + // valid, so we ignore the error here. + cnode, _ := t.resolve(n.Children[pos], prefix) + if cnode, ok := cnode.(*ShortNode); ok { + k := append([]byte{byte(pos)}, cnode.Key...) + return true, &ShortNode{k, cnode.Val, t.newFlag()}, nil + } + } + // Otherwise, n is replaced by a one-nibble short node + // containing the child. + return true, &ShortNode{[]byte{byte(pos)}, n.Children[pos], t.newFlag()}, nil + } + // n still contains at least two values and cannot be reduced. + return true, n, nil + + case ValueNode: + return true, nil, nil + + case nil: + return false, nil, nil + + case HashNode: + fmt.Println("delete hashNode", prefix, key) + // We've hit a part of the trie that isn't loaded yet. Load + // the node and delete from it. This leaves all child nodes on + // the path to the value in the trie. + rn, err := t.resolveHash(n, prefix) + if err != nil { + return false, nil, err + } + dirty, nn, err := t.delete(rn, prefix, key) + if !dirty || err != nil { + return false, rn, err + } + return true, nn, nil + + default: + panic(fmt.Sprintf("%T: invalid node: %v (%v)", n, n, key)) + } +} + +func concat(s1 []byte, s2 ...byte) []byte { + r := make([]byte, len(s1)+len(s2)) + copy(r, s1) + copy(r[len(s1):], s2) + return r +} + +func (t *Trie) resolve(n Node, prefix []byte) (Node, error) { + if n, ok := n.(HashNode); ok { + return t.resolveHash(n, prefix) + } + return n, nil +} + +func (t *Trie) resolveHash(n HashNode, prefix []byte) (Node, error) { + hash := common.BytesToHash(n) + if node := t.db.node(hash); node != nil { + return node, nil + } + return nil, &MissingNodeError{NodeHash: hash, Path: prefix} +} + +// Hash returns the root hash of the trie. It does not write to the +// database and can be used even if the trie doesn't have one. +func (t *Trie) Hash() common.Hash { + hash, cached, _ := t.hashRoot() + t.root = cached + return common.BytesToHash(hash.(HashNode)) +} + +// Commit writes all nodes to the trie's memory database, tracking the internal +// and external (for account tries) references. +func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) { + if t.db == nil { + panic("commit called on trie with nil database") + } + if t.root == nil { + return emptyRoot, nil + } + // Derive the hash for all dirty nodes first. We hold the assumption + // in the following procedure that all nodes are hashed. + rootHash := t.Hash() + h := newCommitter() + defer returnCommitterToPool(h) + + // Do a quick check if we really need to commit, before we spin + // up goroutines. This can happen e.g. if we load a trie for reading storage + // values, but don't write to it. + if _, dirty := t.root.cache(); !dirty { + return rootHash, nil + } + var wg sync.WaitGroup + if onleaf != nil { + h.onleaf = onleaf + h.leafCh = make(chan *leaf, leafChanSize) + wg.Add(1) + go func() { + defer wg.Done() + h.commitLoop(t.db) + }() + } + var newRoot HashNode + newRoot, err = h.Commit(t.root, t.db) + if onleaf != nil { + // The leafch is created in newCommitter if there was an onleaf callback + // provided. The commitLoop only _reads_ from it, and the commit + // operation was the sole writer. Therefore, it's safe to close this + // channel here. + close(h.leafCh) + wg.Wait() + } + if err != nil { + return common.Hash{}, err + } + t.root = newRoot + return rootHash, nil +} + +// hashRoot calculates the root hash of the given trie +func (t *Trie) hashRoot() (Node, Node, error) { + if t.root == nil { + return HashNode(emptyRoot.Bytes()), nil, nil + } + // If the number of changes is below 100, we let one thread handle it + h := NewHasher(t.unhashed >= 100) + defer returnHasherToPool(h) + hashed, cached := h.Hash(t.root, true) + t.unhashed = 0 + return hashed, cached, nil +} + +// Reset drops the referenced root node and cleans all internal state. +func (t *Trie) Reset() { + t.root = nil + t.unhashed = 0 +} diff --git a/mpt-witness-generator/types/access_list_tx.go b/mpt-witness-generator/types/access_list_tx.go new file mode 100644 index 0000000000..d8d08b48c5 --- /dev/null +++ b/mpt-witness-generator/types/access_list_tx.go @@ -0,0 +1,116 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +//go:generate gencodec -type AccessTuple -out gen_access_tuple.go + +// AccessList is an EIP-2930 access list. +type AccessList []AccessTuple + +// AccessTuple is the element type of an access list. +type AccessTuple struct { + Address common.Address `json:"address" gencodec:"required"` + StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` +} + +// StorageKeys returns the total number of storage keys in the access list. +func (al AccessList) StorageKeys() int { + sum := 0 + for _, tuple := range al { + sum += len(tuple.StorageKeys) + } + return sum +} + +// AccessListTx is the data of EIP-2930 access list transactions. +type AccessListTx struct { + ChainID *big.Int // destination chain ID + Nonce uint64 // nonce of sender account + GasPrice *big.Int // wei per gas + Gas uint64 // gas limit + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int // wei amount + Data []byte // contract invocation input data + AccessList AccessList // EIP-2930 access list + V, R, S *big.Int // signature values +} + +// copy creates a deep copy of the transaction data and initializes all fields. +func (tx *AccessListTx) copy() TxData { + cpy := &AccessListTx{ + Nonce: tx.Nonce, + To: tx.To, // TODO: copy pointed-to address + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, + // These are copied below. + AccessList: make(AccessList, len(tx.AccessList)), + Value: new(big.Int), + ChainID: new(big.Int), + GasPrice: new(big.Int), + V: new(big.Int), + R: new(big.Int), + S: new(big.Int), + } + copy(cpy.AccessList, tx.AccessList) + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + if tx.ChainID != nil { + cpy.ChainID.Set(tx.ChainID) + } + if tx.GasPrice != nil { + cpy.GasPrice.Set(tx.GasPrice) + } + if tx.V != nil { + cpy.V.Set(tx.V) + } + if tx.R != nil { + cpy.R.Set(tx.R) + } + if tx.S != nil { + cpy.S.Set(tx.S) + } + return cpy +} + +// accessors for innerTx. +func (tx *AccessListTx) txType() byte { return AccessListTxType } +func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID } +func (tx *AccessListTx) protected() bool { return true } +func (tx *AccessListTx) accessList() AccessList { return tx.AccessList } +func (tx *AccessListTx) data() []byte { return tx.Data } +func (tx *AccessListTx) gas() uint64 { return tx.Gas } +func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) value() *big.Int { return tx.Value } +func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } +func (tx *AccessListTx) to() *common.Address { return tx.To } + +func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) { + return tx.V, tx.R, tx.S +} + +func (tx *AccessListTx) setSignatureValues(chainID, v, r, s *big.Int) { + tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s +} diff --git a/mpt-witness-generator/types/block.go b/mpt-witness-generator/types/block.go new file mode 100644 index 0000000000..360f1eb47c --- /dev/null +++ b/mpt-witness-generator/types/block.go @@ -0,0 +1,385 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package types contains data types related to Ethereum consensus. +package types + +import ( + "encoding/binary" + "fmt" + "io" + "math/big" + "reflect" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rlp" +) + +var ( + EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + EmptyUncleHash = rlpHash([]*Header(nil)) +) + +// A BlockNonce is a 64-bit hash which proves (combined with the +// mix-hash) that a sufficient amount of computation has been carried +// out on a block. +type BlockNonce [8]byte + +// EncodeNonce converts the given integer to a block nonce. +func EncodeNonce(i uint64) BlockNonce { + var n BlockNonce + binary.BigEndian.PutUint64(n[:], i) + return n +} + +// Uint64 returns the integer value of a block nonce. +func (n BlockNonce) Uint64() uint64 { + return binary.BigEndian.Uint64(n[:]) +} + +// MarshalText encodes n as a hex string with 0x prefix. +func (n BlockNonce) MarshalText() ([]byte, error) { + return hexutil.Bytes(n[:]).MarshalText() +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (n *BlockNonce) UnmarshalText(input []byte) error { + return hexutil.UnmarshalFixedText("BlockNonce", input, n[:]) +} + +//go:generate gencodec -type Header -field-override headerMarshaling -out gen_header_json.go + +// Header represents a block header in the Ethereum blockchain. +type Header struct { + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase common.Address `json:"miner" gencodec:"required"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom Bloom `json:"logsBloom" gencodec:"required"` + Difficulty *big.Int `json:"difficulty" gencodec:"required"` + Number *big.Int `json:"number" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed" gencodec:"required"` + Time uint64 `json:"timestamp" gencodec:"required"` + Extra []byte `json:"extraData" gencodec:"required"` + MixDigest common.Hash `json:"mixHash"` + Nonce BlockNonce `json:"nonce"` + + // BaseFee was added by EIP-1559 and is ignored in legacy headers. + BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` +} + +// field type overrides for gencodec +type headerMarshaling struct { + Difficulty *hexutil.Big + Number *hexutil.Big + GasLimit hexutil.Uint64 + GasUsed hexutil.Uint64 + Time hexutil.Uint64 + Extra hexutil.Bytes + BaseFee *hexutil.Big + Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON +} + +// Hash returns the block hash of the header, which is simply the keccak256 hash of its +// RLP encoding. +func (h *Header) Hash() common.Hash { + return rlpHash(h) +} + +var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size()) + +// Size returns the approximate memory used by all internal contents. It is used +// to approximate and limit the memory consumption of various caches. +func (h *Header) Size() common.StorageSize { + return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8) +} + +// SanityCheck checks a few basic things -- these checks are way beyond what +// any 'sane' production values should hold, and can mainly be used to prevent +// that the unbounded fields are stuffed with junk data to add processing +// overhead +func (h *Header) SanityCheck() error { + if h.Number != nil && !h.Number.IsUint64() { + return fmt.Errorf("too large block number: bitlen %d", h.Number.BitLen()) + } + if h.Difficulty != nil { + if diffLen := h.Difficulty.BitLen(); diffLen > 80 { + return fmt.Errorf("too large block difficulty: bitlen %d", diffLen) + } + } + if eLen := len(h.Extra); eLen > 100*1024 { + return fmt.Errorf("too large block extradata: size %d", eLen) + } + if h.BaseFee != nil { + if bfLen := h.BaseFee.BitLen(); bfLen > 256 { + return fmt.Errorf("too large base fee: bitlen %d", bfLen) + } + } + return nil +} + +// EmptyBody returns true if there is no additional 'body' to complete the header +// that is: no transactions and no uncles. +func (h *Header) EmptyBody() bool { + return h.TxHash == EmptyRootHash && h.UncleHash == EmptyUncleHash +} + +// EmptyReceipts returns true if there are no receipts for this header/block. +func (h *Header) EmptyReceipts() bool { + return h.ReceiptHash == EmptyRootHash +} + +// Body is a simple (mutable, non-safe) data container for storing and moving +// a block's data contents (transactions and uncles) together. +type Body struct { + Transactions []*Transaction + Uncles []*Header +} + +// Block represents an entire block in the Ethereum blockchain. +type Block struct { + header *Header + uncles []*Header + transactions Transactions + + // caches + hash atomic.Value + size atomic.Value + + // Td is used by package core to store the total difficulty + // of the chain up to and including the block. + td *big.Int + + // These fields are used by package eth to track + // inter-peer block relay. + ReceivedAt time.Time + ReceivedFrom interface{} +} + +// "external" block encoding. used for eth protocol, etc. +type extblock struct { + Header *Header + Txs []*Transaction + Uncles []*Header +} + +// NewBlock creates a new block. The input data is copied, +// changes to header and to the field values will not affect the +// block. +// +// The values of TxHash, UncleHash, ReceiptHash and Bloom in header +// are ignored and set to values derived from the given txs, uncles +// and receipts. +func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher TrieHasher) *Block { + b := &Block{header: CopyHeader(header), td: new(big.Int)} + + // TODO: panic if len(txs) != len(receipts) + if len(txs) == 0 { + b.header.TxHash = EmptyRootHash + } else { + b.header.TxHash = DeriveSha(Transactions(txs), hasher) + b.transactions = make(Transactions, len(txs)) + copy(b.transactions, txs) + } + + if len(receipts) == 0 { + b.header.ReceiptHash = EmptyRootHash + } else { + b.header.ReceiptHash = DeriveSha(Receipts(receipts), hasher) + b.header.Bloom = CreateBloom(receipts) + } + + if len(uncles) == 0 { + b.header.UncleHash = EmptyUncleHash + } else { + b.header.UncleHash = CalcUncleHash(uncles) + b.uncles = make([]*Header, len(uncles)) + for i := range uncles { + b.uncles[i] = CopyHeader(uncles[i]) + } + } + + return b +} + +// NewBlockWithHeader creates a block with the given header data. The +// header data is copied, changes to header and to the field values +// will not affect the block. +func NewBlockWithHeader(header *Header) *Block { + return &Block{header: CopyHeader(header)} +} + +// CopyHeader creates a deep copy of a block header to prevent side effects from +// modifying a header variable. +func CopyHeader(h *Header) *Header { + cpy := *h + if cpy.Difficulty = new(big.Int); h.Difficulty != nil { + cpy.Difficulty.Set(h.Difficulty) + } + if cpy.Number = new(big.Int); h.Number != nil { + cpy.Number.Set(h.Number) + } + if h.BaseFee != nil { + cpy.BaseFee = new(big.Int).Set(h.BaseFee) + } + if len(h.Extra) > 0 { + cpy.Extra = make([]byte, len(h.Extra)) + copy(cpy.Extra, h.Extra) + } + return &cpy +} + +// DecodeRLP decodes the Ethereum +func (b *Block) DecodeRLP(s *rlp.Stream) error { + var eb extblock + _, size, _ := s.Kind() + if err := s.Decode(&eb); err != nil { + return err + } + b.header, b.uncles, b.transactions = eb.Header, eb.Uncles, eb.Txs + b.size.Store(common.StorageSize(rlp.ListSize(size))) + return nil +} + +// EncodeRLP serializes b into the Ethereum RLP block format. +func (b *Block) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, extblock{ + Header: b.header, + Txs: b.transactions, + Uncles: b.uncles, + }) +} + +// TODO: copies + +func (b *Block) Uncles() []*Header { return b.uncles } +func (b *Block) Transactions() Transactions { return b.transactions } + +func (b *Block) Transaction(hash common.Hash) *Transaction { + for _, transaction := range b.transactions { + if transaction.Hash() == hash { + return transaction + } + } + return nil +} + +func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) } +func (b *Block) GasLimit() uint64 { return b.header.GasLimit } +func (b *Block) GasUsed() uint64 { return b.header.GasUsed } +func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) } +func (b *Block) Time() uint64 { return b.header.Time } + +func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() } +func (b *Block) MixDigest() common.Hash { return b.header.MixDigest } +func (b *Block) Nonce() uint64 { return binary.BigEndian.Uint64(b.header.Nonce[:]) } +func (b *Block) Bloom() Bloom { return b.header.Bloom } +func (b *Block) Coinbase() common.Address { return b.header.Coinbase } +func (b *Block) Root() common.Hash { return b.header.Root } +func (b *Block) ParentHash() common.Hash { return b.header.ParentHash } +func (b *Block) TxHash() common.Hash { return b.header.TxHash } +func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash } +func (b *Block) UncleHash() common.Hash { return b.header.UncleHash } +func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) } + +func (b *Block) BaseFee() *big.Int { + if b.header.BaseFee == nil { + return nil + } + return new(big.Int).Set(b.header.BaseFee) +} + +func (b *Block) Header() *Header { return CopyHeader(b.header) } + +// Body returns the non-header content of the block. +func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} } + +// Size returns the true RLP encoded storage size of the block, either by encoding +// and returning it, or returning a previsouly cached value. +func (b *Block) Size() common.StorageSize { + if size := b.size.Load(); size != nil { + return size.(common.StorageSize) + } + c := writeCounter(0) + rlp.Encode(&c, b) + b.size.Store(common.StorageSize(c)) + return common.StorageSize(c) +} + +// SanityCheck can be used to prevent that unbounded fields are +// stuffed with junk data to add processing overhead +func (b *Block) SanityCheck() error { + return b.header.SanityCheck() +} + +type writeCounter common.StorageSize + +func (c *writeCounter) Write(b []byte) (int, error) { + *c += writeCounter(len(b)) + return len(b), nil +} + +func CalcUncleHash(uncles []*Header) common.Hash { + if len(uncles) == 0 { + return EmptyUncleHash + } + return rlpHash(uncles) +} + +// WithSeal returns a new block with the data from b but the header replaced with +// the sealed one. +func (b *Block) WithSeal(header *Header) *Block { + cpy := *header + + return &Block{ + header: &cpy, + transactions: b.transactions, + uncles: b.uncles, + } +} + +// WithBody returns a new block with the given transaction and uncle contents. +func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { + block := &Block{ + header: CopyHeader(b.header), + transactions: make([]*Transaction, len(transactions)), + uncles: make([]*Header, len(uncles)), + } + copy(block.transactions, transactions) + for i := range uncles { + block.uncles[i] = CopyHeader(uncles[i]) + } + return block +} + +// Hash returns the keccak256 hash of b's header. +// The hash is computed on the first call and cached thereafter. +func (b *Block) Hash() common.Hash { + if hash := b.hash.Load(); hash != nil { + return hash.(common.Hash) + } + v := b.header.Hash() + b.hash.Store(v) + return v +} + +type Blocks []*Block diff --git a/mpt-witness-generator/types/bloom9.go b/mpt-witness-generator/types/bloom9.go new file mode 100644 index 0000000000..1793c2adc7 --- /dev/null +++ b/mpt-witness-generator/types/bloom9.go @@ -0,0 +1,160 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "encoding/binary" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" +) + +type bytesBacked interface { + Bytes() []byte +} + +const ( + // BloomByteLength represents the number of bytes used in a header log bloom. + BloomByteLength = 256 + + // BloomBitLength represents the number of bits used in a header log bloom. + BloomBitLength = 8 * BloomByteLength +) + +// Bloom represents a 2048 bit bloom filter. +type Bloom [BloomByteLength]byte + +// BytesToBloom converts a byte slice to a bloom filter. +// It panics if b is not of suitable size. +func BytesToBloom(b []byte) Bloom { + var bloom Bloom + bloom.SetBytes(b) + return bloom +} + +// SetBytes sets the content of b to the given bytes. +// It panics if d is not of suitable size. +func (b *Bloom) SetBytes(d []byte) { + if len(b) < len(d) { + panic(fmt.Sprintf("bloom bytes too big %d %d", len(b), len(d))) + } + copy(b[BloomByteLength-len(d):], d) +} + +// Add adds d to the filter. Future calls of Test(d) will return true. +func (b *Bloom) Add(d []byte) { + b.add(d, make([]byte, 6)) +} + +// add is internal version of Add, which takes a scratch buffer for reuse (needs to be at least 6 bytes) +func (b *Bloom) add(d []byte, buf []byte) { + i1, v1, i2, v2, i3, v3 := bloomValues(d, buf) + b[i1] |= v1 + b[i2] |= v2 + b[i3] |= v3 +} + +// Big converts b to a big integer. +// Note: Converting a bloom filter to a big.Int and then calling GetBytes +// does not return the same bytes, since big.Int will trim leading zeroes +func (b Bloom) Big() *big.Int { + return new(big.Int).SetBytes(b[:]) +} + +// Bytes returns the backing byte slice of the bloom +func (b Bloom) Bytes() []byte { + return b[:] +} + +// Test checks if the given topic is present in the bloom filter +func (b Bloom) Test(topic []byte) bool { + i1, v1, i2, v2, i3, v3 := bloomValues(topic, make([]byte, 6)) + return v1 == v1&b[i1] && + v2 == v2&b[i2] && + v3 == v3&b[i3] +} + +// MarshalText encodes b as a hex string with 0x prefix. +func (b Bloom) MarshalText() ([]byte, error) { + return hexutil.Bytes(b[:]).MarshalText() +} + +// UnmarshalText b as a hex string with 0x prefix. +func (b *Bloom) UnmarshalText(input []byte) error { + return hexutil.UnmarshalFixedText("Bloom", input, b[:]) +} + +// CreateBloom creates a bloom filter out of the give Receipts (+Logs) +func CreateBloom(receipts Receipts) Bloom { + buf := make([]byte, 6) + var bin Bloom + for _, receipt := range receipts { + for _, log := range receipt.Logs { + bin.add(log.Address.Bytes(), buf) + for _, b := range log.Topics { + bin.add(b[:], buf) + } + } + } + return bin +} + +// LogsBloom returns the bloom bytes for the given logs +func LogsBloom(logs []*Log) []byte { + buf := make([]byte, 6) + var bin Bloom + for _, log := range logs { + bin.add(log.Address.Bytes(), buf) + for _, b := range log.Topics { + bin.add(b[:], buf) + } + } + return bin[:] +} + +// Bloom9 returns the bloom filter for the given data +func Bloom9(data []byte) []byte { + var b Bloom + b.SetBytes(data) + return b.Bytes() +} + +// bloomValues returns the bytes (index-value pairs) to set for the given data +func bloomValues(data []byte, hashbuf []byte) (uint, byte, uint, byte, uint, byte) { + sha := hasherPool.Get().(crypto.KeccakState) + sha.Reset() + sha.Write(data) + sha.Read(hashbuf) + hasherPool.Put(sha) + // The actual bits to flip + v1 := byte(1 << (hashbuf[1] & 0x7)) + v2 := byte(1 << (hashbuf[3] & 0x7)) + v3 := byte(1 << (hashbuf[5] & 0x7)) + // The indices for the bytes to OR in + i1 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf)&0x7ff)>>3) - 1 + i2 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf[2:])&0x7ff)>>3) - 1 + i3 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf[4:])&0x7ff)>>3) - 1 + + return i1, v1, i2, v2, i3, v3 +} + +// BloomLookup is a convenience-method to check presence int he bloom filter +func BloomLookup(bin Bloom, topic bytesBacked) bool { + return bin.Test(topic.Bytes()) +} diff --git a/mpt-witness-generator/types/dynamic_fee_tx.go b/mpt-witness-generator/types/dynamic_fee_tx.go new file mode 100644 index 0000000000..c6719a4089 --- /dev/null +++ b/mpt-witness-generator/types/dynamic_fee_tx.go @@ -0,0 +1,104 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +type DynamicFeeTx struct { + ChainID *big.Int + Nonce uint64 + GasTipCap *big.Int + GasFeeCap *big.Int + Gas uint64 + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int + Data []byte + AccessList AccessList + + // Signature values + V *big.Int `json:"v" gencodec:"required"` + R *big.Int `json:"r" gencodec:"required"` + S *big.Int `json:"s" gencodec:"required"` +} + +// copy creates a deep copy of the transaction data and initializes all fields. +func (tx *DynamicFeeTx) copy() TxData { + cpy := &DynamicFeeTx{ + Nonce: tx.Nonce, + To: tx.To, // TODO: copy pointed-to address + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, + // These are copied below. + AccessList: make(AccessList, len(tx.AccessList)), + Value: new(big.Int), + ChainID: new(big.Int), + GasTipCap: new(big.Int), + GasFeeCap: new(big.Int), + V: new(big.Int), + R: new(big.Int), + S: new(big.Int), + } + copy(cpy.AccessList, tx.AccessList) + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + if tx.ChainID != nil { + cpy.ChainID.Set(tx.ChainID) + } + if tx.GasTipCap != nil { + cpy.GasTipCap.Set(tx.GasTipCap) + } + if tx.GasFeeCap != nil { + cpy.GasFeeCap.Set(tx.GasFeeCap) + } + if tx.V != nil { + cpy.V.Set(tx.V) + } + if tx.R != nil { + cpy.R.Set(tx.R) + } + if tx.S != nil { + cpy.S.Set(tx.S) + } + return cpy +} + +// accessors for innerTx. +func (tx *DynamicFeeTx) txType() byte { return DynamicFeeTxType } +func (tx *DynamicFeeTx) chainID() *big.Int { return tx.ChainID } +func (tx *DynamicFeeTx) protected() bool { return true } +func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList } +func (tx *DynamicFeeTx) data() []byte { return tx.Data } +func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas } +func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap } +func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap } +func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } +func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } +func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } +func (tx *DynamicFeeTx) to() *common.Address { return tx.To } + +func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) { + return tx.V, tx.R, tx.S +} + +func (tx *DynamicFeeTx) setSignatureValues(chainID, v, r, s *big.Int) { + tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s +} diff --git a/mpt-witness-generator/types/hashing.go b/mpt-witness-generator/types/hashing.go new file mode 100644 index 0000000000..c47c9c90b3 --- /dev/null +++ b/mpt-witness-generator/types/hashing.go @@ -0,0 +1,118 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "bytes" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" +) + +// hasherPool holds LegacyKeccak256 hashers for rlpHash. +var hasherPool = sync.Pool{ + New: func() interface{} { return sha3.NewLegacyKeccak256() }, +} + +// deriveBufferPool holds temporary encoder buffers for DeriveSha and TX encoding. +var EncodeBufferPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// rlpHash encodes x and hashes the encoded bytes. +func rlpHash(x interface{}) (h common.Hash) { + sha := hasherPool.Get().(crypto.KeccakState) + defer hasherPool.Put(sha) + sha.Reset() + rlp.Encode(sha, x) + sha.Read(h[:]) + return h +} + +// prefixedRlpHash writes the prefix into the hasher before rlp-encoding x. +// It's used for typed transactions. +func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) { + sha := hasherPool.Get().(crypto.KeccakState) + defer hasherPool.Put(sha) + sha.Reset() + sha.Write([]byte{prefix}) + rlp.Encode(sha, x) + sha.Read(h[:]) + return h +} + +// TrieHasher is the tool used to calculate the hash of derivable list. +// This is internal, do not use. +type TrieHasher interface { + Reset() + Update([]byte, []byte) + Hash() common.Hash +} + +// DerivableList is the input to DeriveSha. +// It is implemented by the 'Transactions' and 'Receipts' types. +// This is internal, do not use these methods. +type DerivableList interface { + Len() int + EncodeIndex(int, *bytes.Buffer) +} + +func EncodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { + buf.Reset() + list.EncodeIndex(i, buf) + // It's really unfortunate that we need to do perform this copy. + // StackTrie holds onto the values until Hash is called, so the values + // written to it must not alias. + return common.CopyBytes(buf.Bytes()) +} + +// DeriveSha creates the tree hashes of transactions and receipts in a block header. +func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash { + return UpdateStackTrie(list, hasher).Hash() +} + +func UpdateStackTrie(list DerivableList, hasher TrieHasher) TrieHasher { + hasher.Reset() + + valueBuf := EncodeBufferPool.Get().(*bytes.Buffer) + defer EncodeBufferPool.Put(valueBuf) + + // StackTrie requires values to be inserted in increasing hash order, which is not the + // order that `list` provides hashes in. This insertion sequence ensures that the + // order is correct. + var indexBuf []byte + for i := 1; i < list.Len() && i <= 0x7f; i++ { + indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i)) + value := EncodeForDerive(list, i, valueBuf) + hasher.Update(indexBuf, value) + } + if list.Len() > 0 { + indexBuf = rlp.AppendUint64(indexBuf[:0], 0) + value := EncodeForDerive(list, 0, valueBuf) + hasher.Update(indexBuf, value) + } + for i := 0x80; i < list.Len(); i++ { + indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i)) + value := EncodeForDerive(list, i, valueBuf) + hasher.Update(indexBuf, value) + } + + return hasher +} diff --git a/mpt-witness-generator/types/legacy_tx.go b/mpt-witness-generator/types/legacy_tx.go new file mode 100644 index 0000000000..514010ebbd --- /dev/null +++ b/mpt-witness-generator/types/legacy_tx.go @@ -0,0 +1,112 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +// LegacyTx is the transaction data of regular Ethereum transactions. +type LegacyTx struct { + Nonce uint64 // nonce of sender account + GasPrice *big.Int // wei per gas + Gas uint64 // gas limit + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int // wei amount + Data []byte // contract invocation input data + V, R, S *big.Int // signature values +} + +// NewTransaction creates an unsigned legacy transaction. +// Deprecated: use NewTx instead. +func NewTransaction(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { + return NewTx(&LegacyTx{ + Nonce: nonce, + To: &to, + Value: amount, + Gas: gasLimit, + GasPrice: gasPrice, + Data: data, + }) +} + +// NewContractCreation creates an unsigned legacy transaction. +// Deprecated: use NewTx instead. +func NewContractCreation(nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { + return NewTx(&LegacyTx{ + Nonce: nonce, + Value: amount, + Gas: gasLimit, + GasPrice: gasPrice, + Data: data, + }) +} + +// copy creates a deep copy of the transaction data and initializes all fields. +func (tx *LegacyTx) copy() TxData { + cpy := &LegacyTx{ + Nonce: tx.Nonce, + To: tx.To, // TODO: copy pointed-to address + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, + // These are initialized below. + Value: new(big.Int), + GasPrice: new(big.Int), + V: new(big.Int), + R: new(big.Int), + S: new(big.Int), + } + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + if tx.GasPrice != nil { + cpy.GasPrice.Set(tx.GasPrice) + } + if tx.V != nil { + cpy.V.Set(tx.V) + } + if tx.R != nil { + cpy.R.Set(tx.R) + } + if tx.S != nil { + cpy.S.Set(tx.S) + } + return cpy +} + +// accessors for innerTx. +func (tx *LegacyTx) txType() byte { return LegacyTxType } +func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) } +func (tx *LegacyTx) accessList() AccessList { return nil } +func (tx *LegacyTx) data() []byte { return tx.Data } +func (tx *LegacyTx) gas() uint64 { return tx.Gas } +func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) value() *big.Int { return tx.Value } +func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } +func (tx *LegacyTx) to() *common.Address { return tx.To } + +func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) { + return tx.V, tx.R, tx.S +} + +func (tx *LegacyTx) setSignatureValues(chainID, v, r, s *big.Int) { + tx.V, tx.R, tx.S = v, r, s +} diff --git a/mpt-witness-generator/types/log.go b/mpt-witness-generator/types/log.go new file mode 100644 index 0000000000..88274e39da --- /dev/null +++ b/mpt-witness-generator/types/log.go @@ -0,0 +1,143 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "io" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rlp" +) + +//go:generate gencodec -type Log -field-override logMarshaling -out gen_log_json.go + +// Log represents a contract log event. These events are generated by the LOG opcode and +// stored/indexed by the node. +type Log struct { + // Consensus fields: + // address of the contract that generated the event + Address common.Address `json:"address" gencodec:"required"` + // list of topics provided by the contract. + Topics []common.Hash `json:"topics" gencodec:"required"` + // supplied by the contract, usually ABI-encoded + Data []byte `json:"data" gencodec:"required"` + + // Derived fields. These fields are filled in by the node + // but not secured by consensus. + // block in which the transaction was included + BlockNumber uint64 `json:"blockNumber"` + // hash of the transaction + TxHash common.Hash `json:"transactionHash" gencodec:"required"` + // index of the transaction in the block + TxIndex uint `json:"transactionIndex"` + // hash of the block in which the transaction was included + BlockHash common.Hash `json:"blockHash"` + // index of the log in the block + Index uint `json:"logIndex"` + + // The Removed field is true if this log was reverted due to a chain reorganisation. + // You must pay attention to this field if you receive logs through a filter query. + Removed bool `json:"removed"` +} + +type logMarshaling struct { + Data hexutil.Bytes + BlockNumber hexutil.Uint64 + TxIndex hexutil.Uint + Index hexutil.Uint +} + +type rlpLog struct { + Address common.Address + Topics []common.Hash + Data []byte +} + +// rlpStorageLog is the storage encoding of a log. +type rlpStorageLog rlpLog + +// legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields. +type legacyRlpStorageLog struct { + Address common.Address + Topics []common.Hash + Data []byte + BlockNumber uint64 + TxHash common.Hash + TxIndex uint + BlockHash common.Hash + Index uint +} + +// EncodeRLP implements rlp.Encoder. +func (l *Log) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}) +} + +// DecodeRLP implements rlp.Decoder. +func (l *Log) DecodeRLP(s *rlp.Stream) error { + var dec rlpLog + err := s.Decode(&dec) + if err == nil { + l.Address, l.Topics, l.Data = dec.Address, dec.Topics, dec.Data + } + return err +} + +// LogForStorage is a wrapper around a Log that flattens and parses the entire content of +// a log including non-consensus fields. +type LogForStorage Log + +// EncodeRLP implements rlp.Encoder. +func (l *LogForStorage) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, rlpStorageLog{ + Address: l.Address, + Topics: l.Topics, + Data: l.Data, + }) +} + +// DecodeRLP implements rlp.Decoder. +// +// Note some redundant fields(e.g. block number, tx hash etc) will be assembled later. +func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error { + blob, err := s.Raw() + if err != nil { + return err + } + var dec rlpStorageLog + err = rlp.DecodeBytes(blob, &dec) + if err == nil { + *l = LogForStorage{ + Address: dec.Address, + Topics: dec.Topics, + Data: dec.Data, + } + } else { + // Try to decode log with previous definition. + var dec legacyRlpStorageLog + err = rlp.DecodeBytes(blob, &dec) + if err == nil { + *l = LogForStorage{ + Address: dec.Address, + Topics: dec.Topics, + Data: dec.Data, + } + } + } + return err +} diff --git a/mpt-witness-generator/types/receipt.go b/mpt-witness-generator/types/receipt.go new file mode 100644 index 0000000000..aef09597fa --- /dev/null +++ b/mpt-witness-generator/types/receipt.go @@ -0,0 +1,397 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/big" + "unsafe" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +//go:generate gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go + +var ( + receiptStatusFailedRLP = []byte{} + receiptStatusSuccessfulRLP = []byte{0x01} +) + +// This error is returned when a typed receipt is decoded, but the string is empty. +var errEmptyTypedReceipt = errors.New("empty typed receipt bytes") + +const ( + // ReceiptStatusFailed is the status code of a transaction if execution failed. + ReceiptStatusFailed = uint64(0) + + // ReceiptStatusSuccessful is the status code of a transaction if execution succeeded. + ReceiptStatusSuccessful = uint64(1) +) + +// Receipt represents the results of a transaction. +type Receipt struct { + // Consensus fields: These fields are defined by the Yellow Paper + Type uint8 `json:"type,omitempty"` + PostState []byte `json:"root"` + Status uint64 `json:"status"` + CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"` + Bloom Bloom `json:"logsBloom" gencodec:"required"` + Logs []*Log `json:"logs" gencodec:"required"` + + // Implementation fields: These fields are added by geth when processing a transaction. + // They are stored in the chain database. + TxHash common.Hash `json:"transactionHash" gencodec:"required"` + ContractAddress common.Address `json:"contractAddress"` + GasUsed uint64 `json:"gasUsed" gencodec:"required"` + + // Inclusion information: These fields provide information about the inclusion of the + // transaction corresponding to this receipt. + BlockHash common.Hash `json:"blockHash,omitempty"` + BlockNumber *big.Int `json:"blockNumber,omitempty"` + TransactionIndex uint `json:"transactionIndex"` +} + +type receiptMarshaling struct { + Type hexutil.Uint64 + PostState hexutil.Bytes + Status hexutil.Uint64 + CumulativeGasUsed hexutil.Uint64 + GasUsed hexutil.Uint64 + BlockNumber *hexutil.Big + TransactionIndex hexutil.Uint +} + +// receiptRLP is the consensus encoding of a receipt. +type receiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + Bloom Bloom + Logs []*Log +} + +// storedReceiptRLP is the storage encoding of a receipt. +type storedReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + Logs []*LogForStorage +} + +// v4StoredReceiptRLP is the storage encoding of a receipt used in database version 4. +type v4StoredReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + TxHash common.Hash + ContractAddress common.Address + Logs []*LogForStorage + GasUsed uint64 +} + +// v3StoredReceiptRLP is the original storage encoding of a receipt including some unnecessary fields. +type v3StoredReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + Bloom Bloom + TxHash common.Hash + ContractAddress common.Address + Logs []*LogForStorage + GasUsed uint64 +} + +// NewReceipt creates a barebone transaction receipt, copying the init fields. +// Deprecated: create receipts using a struct literal instead. +func NewReceipt(root []byte, failed bool, cumulativeGasUsed uint64) *Receipt { + r := &Receipt{ + Type: LegacyTxType, + PostState: common.CopyBytes(root), + CumulativeGasUsed: cumulativeGasUsed, + } + if failed { + r.Status = ReceiptStatusFailed + } else { + r.Status = ReceiptStatusSuccessful + } + return r +} + +// EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt +// into an RLP stream. If no post state is present, byzantium fork is assumed. +func (r *Receipt) EncodeRLP(w io.Writer) error { + data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs} + if r.Type == LegacyTxType { + return rlp.Encode(w, data) + } + buf := EncodeBufferPool.Get().(*bytes.Buffer) + defer EncodeBufferPool.Put(buf) + buf.Reset() + buf.WriteByte(r.Type) + if err := rlp.Encode(buf, data); err != nil { + return err + } + return rlp.Encode(w, buf.Bytes()) +} + +// DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt +// from an RLP stream. +func (r *Receipt) DecodeRLP(s *rlp.Stream) error { + kind, _, err := s.Kind() + switch { + case err != nil: + return err + case kind == rlp.List: + // It's a legacy receipt. + var dec receiptRLP + if err := s.Decode(&dec); err != nil { + return err + } + r.Type = LegacyTxType + return r.setFromRLP(dec) + case kind == rlp.String: + // It's an EIP-2718 typed tx receipt. + b, err := s.Bytes() + if err != nil { + return err + } + if len(b) == 0 { + return errEmptyTypedReceipt + } + r.Type = b[0] + if r.Type == AccessListTxType || r.Type == DynamicFeeTxType { + var dec receiptRLP + if err := rlp.DecodeBytes(b[1:], &dec); err != nil { + return err + } + return r.setFromRLP(dec) + } + return ErrTxTypeNotSupported + default: + return rlp.ErrExpectedList + } +} + +func (r *Receipt) setFromRLP(data receiptRLP) error { + r.CumulativeGasUsed, r.Bloom, r.Logs = data.CumulativeGasUsed, data.Bloom, data.Logs + return r.setStatus(data.PostStateOrStatus) +} + +func (r *Receipt) setStatus(postStateOrStatus []byte) error { + switch { + case bytes.Equal(postStateOrStatus, receiptStatusSuccessfulRLP): + r.Status = ReceiptStatusSuccessful + case bytes.Equal(postStateOrStatus, receiptStatusFailedRLP): + r.Status = ReceiptStatusFailed + case len(postStateOrStatus) == len(common.Hash{}): + r.PostState = postStateOrStatus + default: + return fmt.Errorf("invalid receipt status %x", postStateOrStatus) + } + return nil +} + +func (r *Receipt) statusEncoding() []byte { + if len(r.PostState) == 0 { + if r.Status == ReceiptStatusFailed { + return receiptStatusFailedRLP + } + return receiptStatusSuccessfulRLP + } + return r.PostState +} + +// Size returns the approximate memory used by all internal contents. It is used +// to approximate and limit the memory consumption of various caches. +func (r *Receipt) Size() common.StorageSize { + size := common.StorageSize(unsafe.Sizeof(*r)) + common.StorageSize(len(r.PostState)) + size += common.StorageSize(len(r.Logs)) * common.StorageSize(unsafe.Sizeof(Log{})) + for _, log := range r.Logs { + size += common.StorageSize(len(log.Topics)*common.HashLength + len(log.Data)) + } + return size +} + +// ReceiptForStorage is a wrapper around a Receipt that flattens and parses the +// entire content of a receipt, as opposed to only the consensus fields originally. +type ReceiptForStorage Receipt + +// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt +// into an RLP stream. +func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error { + enc := &storedReceiptRLP{ + PostStateOrStatus: (*Receipt)(r).statusEncoding(), + CumulativeGasUsed: r.CumulativeGasUsed, + Logs: make([]*LogForStorage, len(r.Logs)), + } + for i, log := range r.Logs { + enc.Logs[i] = (*LogForStorage)(log) + } + return rlp.Encode(w, enc) +} + +// DecodeRLP implements rlp.Decoder, and loads both consensus and implementation +// fields of a receipt from an RLP stream. +func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error { + // Retrieve the entire receipt blob as we need to try multiple decoders + blob, err := s.Raw() + if err != nil { + return err + } + // Try decoding from the newest format for future proofness, then the older one + // for old nodes that just upgraded. V4 was an intermediate unreleased format so + // we do need to decode it, but it's not common (try last). + if err := decodeStoredReceiptRLP(r, blob); err == nil { + return nil + } + if err := decodeV3StoredReceiptRLP(r, blob); err == nil { + return nil + } + return decodeV4StoredReceiptRLP(r, blob) +} + +func decodeStoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { + var stored storedReceiptRLP + if err := rlp.DecodeBytes(blob, &stored); err != nil { + return err + } + if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { + return err + } + r.CumulativeGasUsed = stored.CumulativeGasUsed + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } + r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) + + return nil +} + +func decodeV4StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { + var stored v4StoredReceiptRLP + if err := rlp.DecodeBytes(blob, &stored); err != nil { + return err + } + if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { + return err + } + r.CumulativeGasUsed = stored.CumulativeGasUsed + r.TxHash = stored.TxHash + r.ContractAddress = stored.ContractAddress + r.GasUsed = stored.GasUsed + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } + r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) + + return nil +} + +func decodeV3StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { + var stored v3StoredReceiptRLP + if err := rlp.DecodeBytes(blob, &stored); err != nil { + return err + } + if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { + return err + } + r.CumulativeGasUsed = stored.CumulativeGasUsed + r.Bloom = stored.Bloom + r.TxHash = stored.TxHash + r.ContractAddress = stored.ContractAddress + r.GasUsed = stored.GasUsed + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } + return nil +} + +// Receipts implements DerivableList for receipts. +type Receipts []*Receipt + +// Len returns the number of receipts in this list. +func (rs Receipts) Len() int { return len(rs) } + +// EncodeIndex encodes the i'th receipt to w. +func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { + r := rs[i] + data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs} + switch r.Type { + case LegacyTxType: + rlp.Encode(w, data) + case AccessListTxType: + w.WriteByte(AccessListTxType) + rlp.Encode(w, data) + case DynamicFeeTxType: + w.WriteByte(DynamicFeeTxType) + rlp.Encode(w, data) + default: + // For unsupported types, write nothing. Since this is for + // DeriveSha, the error will be caught matching the derived hash + // to the block. + } +} + +// DeriveFields fills the receipts with their computed fields based on consensus +// data and contextual infos like containing block and transactions. +func (r Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, txs Transactions) error { + signer := MakeSigner(config, new(big.Int).SetUint64(number)) + + logIndex := uint(0) + if len(txs) != len(r) { + return errors.New("transaction and receipt count mismatch") + } + for i := 0; i < len(r); i++ { + // The transaction type and hash can be retrieved from the transaction itself + r[i].Type = txs[i].Type() + r[i].TxHash = txs[i].Hash() + + // block location fields + r[i].BlockHash = hash + r[i].BlockNumber = new(big.Int).SetUint64(number) + r[i].TransactionIndex = uint(i) + + // The contract address can be derived from the transaction itself + if txs[i].To() == nil { + // Deriving the signer is expensive, only do if it's actually needed + from, _ := Sender(signer, txs[i]) + r[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce()) + } + // The used gas can be calculated based on previous r + if i == 0 { + r[i].GasUsed = r[i].CumulativeGasUsed + } else { + r[i].GasUsed = r[i].CumulativeGasUsed - r[i-1].CumulativeGasUsed + } + // The derived log fields can simply be set from the block and transaction + for j := 0; j < len(r[i].Logs); j++ { + r[i].Logs[j].BlockNumber = number + r[i].Logs[j].BlockHash = hash + r[i].Logs[j].TxHash = r[i].TxHash + r[i].Logs[j].TxIndex = uint(i) + r[i].Logs[j].Index = logIndex + logIndex++ + } + } + return nil +} diff --git a/mpt-witness-generator/types/transaction.go b/mpt-witness-generator/types/transaction.go new file mode 100644 index 0000000000..9214b95ebd --- /dev/null +++ b/mpt-witness-generator/types/transaction.go @@ -0,0 +1,634 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "bytes" + "container/heap" + "errors" + "io" + "math/big" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" +) + +var ( + ErrInvalidSig = errors.New("invalid transaction v, r, s values") + ErrUnexpectedProtection = errors.New("transaction type does not supported EIP-155 protected signatures") + ErrInvalidTxType = errors.New("transaction type not valid in this context") + ErrTxTypeNotSupported = errors.New("transaction type not supported") + ErrGasFeeCapTooLow = errors.New("fee cap less than base fee") + errEmptyTypedTx = errors.New("empty typed transaction bytes") +) + +// Transaction types. +const ( + LegacyTxType = iota + AccessListTxType + DynamicFeeTxType +) + +// Transaction is an Ethereum transaction. +type Transaction struct { + inner TxData // Consensus contents of a transaction + time time.Time // Time first seen locally (spam avoidance) + + // caches + hash atomic.Value + size atomic.Value + from atomic.Value +} + +// NewTx creates a new transaction. +func NewTx(inner TxData) *Transaction { + tx := new(Transaction) + tx.setDecoded(inner.copy(), 0) + return tx +} + +// TxData is the underlying data of a transaction. +// +// This is implemented by DynamicFeeTx, LegacyTx and AccessListTx. +type TxData interface { + txType() byte // returns the type ID + copy() TxData // creates a deep copy and initializes all fields + + chainID() *big.Int + accessList() AccessList + data() []byte + gas() uint64 + gasPrice() *big.Int + gasTipCap() *big.Int + gasFeeCap() *big.Int + value() *big.Int + nonce() uint64 + to() *common.Address + + rawSignatureValues() (v, r, s *big.Int) + setSignatureValues(chainID, v, r, s *big.Int) +} + +// EncodeRLP implements rlp.Encoder +func (tx *Transaction) EncodeRLP(w io.Writer) error { + if tx.Type() == LegacyTxType { + return rlp.Encode(w, tx.inner) + } + // It's an EIP-2718 typed TX envelope. + buf := EncodeBufferPool.Get().(*bytes.Buffer) + defer EncodeBufferPool.Put(buf) + buf.Reset() + if err := tx.encodeTyped(buf); err != nil { + return err + } + return rlp.Encode(w, buf.Bytes()) +} + +// encodeTyped writes the canonical encoding of a typed transaction to w. +func (tx *Transaction) encodeTyped(w *bytes.Buffer) error { + w.WriteByte(tx.Type()) + return rlp.Encode(w, tx.inner) +} + +// MarshalBinary returns the canonical encoding of the transaction. +// For legacy transactions, it returns the RLP encoding. For EIP-2718 typed +// transactions, it returns the type and payload. +func (tx *Transaction) MarshalBinary() ([]byte, error) { + if tx.Type() == LegacyTxType { + return rlp.EncodeToBytes(tx.inner) + } + var buf bytes.Buffer + err := tx.encodeTyped(&buf) + return buf.Bytes(), err +} + +// DecodeRLP implements rlp.Decoder +func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { + kind, size, err := s.Kind() + switch { + case err != nil: + return err + case kind == rlp.List: + // It's a legacy transaction. + var inner LegacyTx + err := s.Decode(&inner) + if err == nil { + tx.setDecoded(&inner, int(rlp.ListSize(size))) + } + return err + case kind == rlp.String: + // It's an EIP-2718 typed TX envelope. + var b []byte + if b, err = s.Bytes(); err != nil { + return err + } + inner, err := tx.decodeTyped(b) + if err == nil { + tx.setDecoded(inner, len(b)) + } + return err + default: + return rlp.ErrExpectedList + } +} + +// UnmarshalBinary decodes the canonical encoding of transactions. +// It supports legacy RLP transactions and EIP2718 typed transactions. +func (tx *Transaction) UnmarshalBinary(b []byte) error { + if len(b) > 0 && b[0] > 0x7f { + // It's a legacy transaction. + var data LegacyTx + err := rlp.DecodeBytes(b, &data) + if err != nil { + return err + } + tx.setDecoded(&data, len(b)) + return nil + } + // It's an EIP2718 typed transaction envelope. + inner, err := tx.decodeTyped(b) + if err != nil { + return err + } + tx.setDecoded(inner, len(b)) + return nil +} + +// decodeTyped decodes a typed transaction from the canonical format. +func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { + if len(b) == 0 { + return nil, errEmptyTypedTx + } + switch b[0] { + case AccessListTxType: + var inner AccessListTx + err := rlp.DecodeBytes(b[1:], &inner) + return &inner, err + case DynamicFeeTxType: + var inner DynamicFeeTx + err := rlp.DecodeBytes(b[1:], &inner) + return &inner, err + default: + return nil, ErrTxTypeNotSupported + } +} + +// setDecoded sets the inner transaction and size after decoding. +func (tx *Transaction) setDecoded(inner TxData, size int) { + tx.inner = inner + tx.time = time.Now() + if size > 0 { + tx.size.Store(common.StorageSize(size)) + } +} + +func sanityCheckSignature(v *big.Int, r *big.Int, s *big.Int, maybeProtected bool) error { + if isProtectedV(v) && !maybeProtected { + return ErrUnexpectedProtection + } + + var plainV byte + if isProtectedV(v) { + chainID := deriveChainId(v).Uint64() + plainV = byte(v.Uint64() - 35 - 2*chainID) + } else if maybeProtected { + // Only EIP-155 signatures can be optionally protected. Since + // we determined this v value is not protected, it must be a + // raw 27 or 28. + plainV = byte(v.Uint64() - 27) + } else { + // If the signature is not optionally protected, we assume it + // must already be equal to the recovery id. + plainV = byte(v.Uint64()) + } + if !crypto.ValidateSignatureValues(plainV, r, s, false) { + return ErrInvalidSig + } + + return nil +} + +func isProtectedV(V *big.Int) bool { + if V.BitLen() <= 8 { + v := V.Uint64() + return v != 27 && v != 28 && v != 1 && v != 0 + } + // anything not 27 or 28 is considered protected + return true +} + +// Protected says whether the transaction is replay-protected. +func (tx *Transaction) Protected() bool { + switch tx := tx.inner.(type) { + case *LegacyTx: + return tx.V != nil && isProtectedV(tx.V) + default: + return true + } +} + +// Type returns the transaction type. +func (tx *Transaction) Type() uint8 { + return tx.inner.txType() +} + +// ChainId returns the EIP155 chain ID of the transaction. The return value will always be +// non-nil. For legacy transactions which are not replay-protected, the return value is +// zero. +func (tx *Transaction) ChainId() *big.Int { + return tx.inner.chainID() +} + +// Data returns the input data of the transaction. +func (tx *Transaction) Data() []byte { return tx.inner.data() } + +// AccessList returns the access list of the transaction. +func (tx *Transaction) AccessList() AccessList { return tx.inner.accessList() } + +// Gas returns the gas limit of the transaction. +func (tx *Transaction) Gas() uint64 { return tx.inner.gas() } + +// GasPrice returns the gas price of the transaction. +func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) } + +// GasTipCap returns the gasTipCap per gas of the transaction. +func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.gasTipCap()) } + +// GasFeeCap returns the fee cap per gas of the transaction. +func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) } + +// Value returns the ether amount of the transaction. +func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) } + +// Nonce returns the sender account nonce of the transaction. +func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() } + +// To returns the recipient address of the transaction. +// For contract-creation transactions, To returns nil. +func (tx *Transaction) To() *common.Address { + // Copy the pointed-to address. + ito := tx.inner.to() + if ito == nil { + return nil + } + cpy := *ito + return &cpy +} + +// Cost returns gas * gasPrice + value. +func (tx *Transaction) Cost() *big.Int { + total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())) + total.Add(total, tx.Value()) + return total +} + +// RawSignatureValues returns the V, R, S signature values of the transaction. +// The return values should not be modified by the caller. +func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) { + return tx.inner.rawSignatureValues() +} + +// GasFeeCapCmp compares the fee cap of two transactions. +func (tx *Transaction) GasFeeCapCmp(other *Transaction) int { + return tx.inner.gasFeeCap().Cmp(other.inner.gasFeeCap()) +} + +// GasFeeCapIntCmp compares the fee cap of the transaction against the given fee cap. +func (tx *Transaction) GasFeeCapIntCmp(other *big.Int) int { + return tx.inner.gasFeeCap().Cmp(other) +} + +// GasTipCapCmp compares the gasTipCap of two transactions. +func (tx *Transaction) GasTipCapCmp(other *Transaction) int { + return tx.inner.gasTipCap().Cmp(other.inner.gasTipCap()) +} + +// GasTipCapIntCmp compares the gasTipCap of the transaction against the given gasTipCap. +func (tx *Transaction) GasTipCapIntCmp(other *big.Int) int { + return tx.inner.gasTipCap().Cmp(other) +} + +// EffectiveGasTip returns the effective miner gasTipCap for the given base fee. +// Note: if the effective gasTipCap is negative, this method returns both error +// the actual negative value, _and_ ErrGasFeeCapTooLow +func (tx *Transaction) EffectiveGasTip(baseFee *big.Int) (*big.Int, error) { + if baseFee == nil { + return tx.GasTipCap(), nil + } + var err error + gasFeeCap := tx.GasFeeCap() + if gasFeeCap.Cmp(baseFee) == -1 { + err = ErrGasFeeCapTooLow + } + return math.BigMin(tx.GasTipCap(), gasFeeCap.Sub(gasFeeCap, baseFee)), err +} + +// EffectiveGasTipValue is identical to EffectiveGasTip, but does not return an +// error in case the effective gasTipCap is negative +func (tx *Transaction) EffectiveGasTipValue(baseFee *big.Int) *big.Int { + effectiveTip, _ := tx.EffectiveGasTip(baseFee) + return effectiveTip +} + +// EffectiveGasTipCmp compares the effective gasTipCap of two transactions assuming the given base fee. +func (tx *Transaction) EffectiveGasTipCmp(other *Transaction, baseFee *big.Int) int { + if baseFee == nil { + return tx.GasTipCapCmp(other) + } + return tx.EffectiveGasTipValue(baseFee).Cmp(other.EffectiveGasTipValue(baseFee)) +} + +// EffectiveGasTipIntCmp compares the effective gasTipCap of a transaction to the given gasTipCap. +func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) int { + if baseFee == nil { + return tx.GasTipCapIntCmp(other) + } + return tx.EffectiveGasTipValue(baseFee).Cmp(other) +} + +// Hash returns the transaction hash. +func (tx *Transaction) Hash() common.Hash { + if hash := tx.hash.Load(); hash != nil { + return hash.(common.Hash) + } + + var h common.Hash + if tx.Type() == LegacyTxType { + h = rlpHash(tx.inner) + } else { + h = prefixedRlpHash(tx.Type(), tx.inner) + } + tx.hash.Store(h) + return h +} + +// Size returns the true RLP encoded storage size of the transaction, either by +// encoding and returning it, or returning a previously cached value. +func (tx *Transaction) Size() common.StorageSize { + if size := tx.size.Load(); size != nil { + return size.(common.StorageSize) + } + c := writeCounter(0) + rlp.Encode(&c, &tx.inner) + tx.size.Store(common.StorageSize(c)) + return common.StorageSize(c) +} + +// WithSignature returns a new transaction with the given signature. +// This signature needs to be in the [R || S || V] format where V is 0 or 1. +func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) { + r, s, v, err := signer.SignatureValues(tx, sig) + if err != nil { + return nil, err + } + cpy := tx.inner.copy() + cpy.setSignatureValues(signer.ChainID(), v, r, s) + return &Transaction{inner: cpy, time: tx.time}, nil +} + +// Transactions implements DerivableList for transactions. +type Transactions []*Transaction + +// Len returns the length of s. +func (s Transactions) Len() int { return len(s) } + +// EncodeIndex encodes the i'th transaction to w. Note that this does not check for errors +// because we assume that *Transaction will only ever contain valid txs that were either +// constructed by decoding or via public API in this package. +func (s Transactions) EncodeIndex(i int, w *bytes.Buffer) { + tx := s[i] + if tx.Type() == LegacyTxType { + rlp.Encode(w, tx.inner) + } else { + tx.encodeTyped(w) + } +} + +// TxDifference returns a new set which is the difference between a and b. +func TxDifference(a, b Transactions) Transactions { + keep := make(Transactions, 0, len(a)) + + remove := make(map[common.Hash]struct{}) + for _, tx := range b { + remove[tx.Hash()] = struct{}{} + } + + for _, tx := range a { + if _, ok := remove[tx.Hash()]; !ok { + keep = append(keep, tx) + } + } + + return keep +} + +// TxByNonce implements the sort interface to allow sorting a list of transactions +// by their nonces. This is usually only useful for sorting transactions from a +// single account, otherwise a nonce comparison doesn't make much sense. +type TxByNonce Transactions + +func (s TxByNonce) Len() int { return len(s) } +func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() } +func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap +type TxWithMinerFee struct { + tx *Transaction + minerFee *big.Int +} + +// NewTxWithMinerFee creates a wrapped transaction, calculating the effective +// miner gasTipCap if a base fee is provided. +// Returns error in case of a negative effective miner gasTipCap. +func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) { + minerFee, err := tx.EffectiveGasTip(baseFee) + if err != nil { + return nil, err + } + return &TxWithMinerFee{ + tx: tx, + minerFee: minerFee, + }, nil +} + +// TxByPriceAndTime implements both the sort and the heap interface, making it useful +// for all at once sorting as well as individually adding and removing elements. +type TxByPriceAndTime []*TxWithMinerFee + +func (s TxByPriceAndTime) Len() int { return len(s) } +func (s TxByPriceAndTime) Less(i, j int) bool { + // If the prices are equal, use the time the transaction was first seen for + // deterministic sorting + cmp := s[i].minerFee.Cmp(s[j].minerFee) + if cmp == 0 { + return s[i].tx.time.Before(s[j].tx.time) + } + return cmp > 0 +} +func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s *TxByPriceAndTime) Push(x interface{}) { + *s = append(*s, x.(*TxWithMinerFee)) +} + +func (s *TxByPriceAndTime) Pop() interface{} { + old := *s + n := len(old) + x := old[n-1] + *s = old[0 : n-1] + return x +} + +// TransactionsByPriceAndNonce represents a set of transactions that can return +// transactions in a profit-maximizing sorted order, while supporting removing +// entire batches of transactions for non-executable accounts. +type TransactionsByPriceAndNonce struct { + txs map[common.Address]Transactions // Per account nonce-sorted list of transactions + heads TxByPriceAndTime // Next transaction for each unique account (price heap) + signer Signer // Signer for the set of transactions + baseFee *big.Int // Current base fee +} + +// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve +// price sorted transactions in a nonce-honouring way. +// +// Note, the input map is reowned so the caller should not interact any more with +// if after providing it to the constructor. +func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce { + // Initialize a price and received time based heap with the head transactions + heads := make(TxByPriceAndTime, 0, len(txs)) + for from, accTxs := range txs { + acc, _ := Sender(signer, accTxs[0]) + wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee) + // Remove transaction if sender doesn't match from, or if wrapping fails. + if acc != from || err != nil { + delete(txs, from) + continue + } + heads = append(heads, wrapped) + txs[from] = accTxs[1:] + } + heap.Init(&heads) + + // Assemble and return the transaction set + return &TransactionsByPriceAndNonce{ + txs: txs, + heads: heads, + signer: signer, + baseFee: baseFee, + } +} + +// Peek returns the next transaction by price. +func (t *TransactionsByPriceAndNonce) Peek() *Transaction { + if len(t.heads) == 0 { + return nil + } + return t.heads[0].tx +} + +// Shift replaces the current best head with the next one from the same account. +func (t *TransactionsByPriceAndNonce) Shift() { + acc, _ := Sender(t.signer, t.heads[0].tx) + if txs, ok := t.txs[acc]; ok && len(txs) > 0 { + if wrapped, err := NewTxWithMinerFee(txs[0], t.baseFee); err == nil { + t.heads[0], t.txs[acc] = wrapped, txs[1:] + heap.Fix(&t.heads, 0) + return + } + } + heap.Pop(&t.heads) +} + +// Pop removes the best transaction, *not* replacing it with the next one from +// the same account. This should be used when a transaction cannot be executed +// and hence all subsequent ones should be discarded from the same account. +func (t *TransactionsByPriceAndNonce) Pop() { + heap.Pop(&t.heads) +} + +// Message is a fully derived transaction and implements core.Message +// +// NOTE: In a future PR this will be removed. +type Message struct { + to *common.Address + from common.Address + nonce uint64 + amount *big.Int + gasLimit uint64 + gasPrice *big.Int + gasFeeCap *big.Int + gasTipCap *big.Int + data []byte + accessList AccessList + isFake bool +} + +func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice, gasFeeCap, gasTipCap *big.Int, data []byte, accessList AccessList, isFake bool) Message { + return Message{ + from: from, + to: to, + nonce: nonce, + amount: amount, + gasLimit: gasLimit, + gasPrice: gasPrice, + gasFeeCap: gasFeeCap, + gasTipCap: gasTipCap, + data: data, + accessList: accessList, + isFake: isFake, + } +} + +// AsMessage returns the transaction as a core.Message. +func (tx *Transaction) AsMessage(s Signer, baseFee *big.Int) (Message, error) { + msg := Message{ + nonce: tx.Nonce(), + gasLimit: tx.Gas(), + gasPrice: new(big.Int).Set(tx.GasPrice()), + gasFeeCap: new(big.Int).Set(tx.GasFeeCap()), + gasTipCap: new(big.Int).Set(tx.GasTipCap()), + to: tx.To(), + amount: tx.Value(), + data: tx.Data(), + accessList: tx.AccessList(), + isFake: false, + } + // If baseFee provided, set gasPrice to effectiveGasPrice. + if baseFee != nil { + msg.gasPrice = math.BigMin(msg.gasPrice.Add(msg.gasTipCap, baseFee), msg.gasFeeCap) + } + var err error + msg.from, err = Sender(s, tx) + return msg, err +} + +func (m Message) From() common.Address { return m.from } +func (m Message) To() *common.Address { return m.to } +func (m Message) GasPrice() *big.Int { return m.gasPrice } +func (m Message) GasFeeCap() *big.Int { return m.gasFeeCap } +func (m Message) GasTipCap() *big.Int { return m.gasTipCap } +func (m Message) Value() *big.Int { return m.amount } +func (m Message) Gas() uint64 { return m.gasLimit } +func (m Message) Nonce() uint64 { return m.nonce } +func (m Message) Data() []byte { return m.data } +func (m Message) AccessList() AccessList { return m.accessList } +func (m Message) IsFake() bool { return m.isFake } diff --git a/mpt-witness-generator/types/transaction_signing.go b/mpt-witness-generator/types/transaction_signing.go new file mode 100644 index 0000000000..1d0d2a4c75 --- /dev/null +++ b/mpt-witness-generator/types/transaction_signing.go @@ -0,0 +1,520 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "crypto/ecdsa" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" +) + +var ErrInvalidChainId = errors.New("invalid chain id for signer") + +// sigCache is used to cache the derived sender and contains +// the signer used to derive it. +type sigCache struct { + signer Signer + from common.Address +} + +// MakeSigner returns a Signer based on the given chain config and block number. +func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { + var signer Signer + switch { + case config.IsLondon(blockNumber): + signer = NewLondonSigner(config.ChainID) + case config.IsBerlin(blockNumber): + signer = NewEIP2930Signer(config.ChainID) + case config.IsEIP155(blockNumber): + signer = NewEIP155Signer(config.ChainID) + case config.IsHomestead(blockNumber): + signer = HomesteadSigner{} + default: + signer = FrontierSigner{} + } + return signer +} + +// LatestSigner returns the 'most permissive' Signer available for the given chain +// configuration. Specifically, this enables support of EIP-155 replay protection and +// EIP-2930 access list transactions when their respective forks are scheduled to occur at +// any block number in the chain config. +// +// Use this in transaction-handling code where the current block number is unknown. If you +// have the current block number available, use MakeSigner instead. +func LatestSigner(config *params.ChainConfig) Signer { + if config.ChainID != nil { + if config.LondonBlock != nil { + return NewLondonSigner(config.ChainID) + } + if config.BerlinBlock != nil { + return NewEIP2930Signer(config.ChainID) + } + if config.EIP155Block != nil { + return NewEIP155Signer(config.ChainID) + } + } + return HomesteadSigner{} +} + +// LatestSignerForChainID returns the 'most permissive' Signer available. Specifically, +// this enables support for EIP-155 replay protection and all implemented EIP-2718 +// transaction types if chainID is non-nil. +// +// Use this in transaction-handling code where the current block number and fork +// configuration are unknown. If you have a ChainConfig, use LatestSigner instead. +// If you have a ChainConfig and know the current block number, use MakeSigner instead. +func LatestSignerForChainID(chainID *big.Int) Signer { + if chainID == nil { + return HomesteadSigner{} + } + return NewLondonSigner(chainID) +} + +// SignTx signs the transaction using the given signer and private key. +func SignTx(tx *Transaction, s Signer, prv *ecdsa.PrivateKey) (*Transaction, error) { + h := s.Hash(tx) + sig, err := crypto.Sign(h[:], prv) + if err != nil { + return nil, err + } + return tx.WithSignature(s, sig) +} + +// SignNewTx creates a transaction and signs it. +func SignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) (*Transaction, error) { + tx := NewTx(txdata) + h := s.Hash(tx) + sig, err := crypto.Sign(h[:], prv) + if err != nil { + return nil, err + } + return tx.WithSignature(s, sig) +} + +// MustSignNewTx creates a transaction and signs it. +// This panics if the transaction cannot be signed. +func MustSignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) *Transaction { + tx, err := SignNewTx(prv, s, txdata) + if err != nil { + panic(err) + } + return tx +} + +// Sender returns the address derived from the signature (V, R, S) using secp256k1 +// elliptic curve and an error if it failed deriving or upon an incorrect +// signature. +// +// Sender may cache the address, allowing it to be used regardless of +// signing method. The cache is invalidated if the cached signer does +// not match the signer used in the current call. +func Sender(signer Signer, tx *Transaction) (common.Address, error) { + if sc := tx.from.Load(); sc != nil { + sigCache := sc.(sigCache) + // If the signer used to derive from in a previous + // call is not the same as used current, invalidate + // the cache. + if sigCache.signer.Equal(signer) { + return sigCache.from, nil + } + } + + addr, err := signer.Sender(tx) + if err != nil { + return common.Address{}, err + } + tx.from.Store(sigCache{signer: signer, from: addr}) + return addr, nil +} + +// Signer encapsulates transaction signature handling. The name of this type is slightly +// misleading because Signers don't actually sign, they're just for validating and +// processing of signatures. +// +// Note that this interface is not a stable API and may change at any time to accommodate +// new protocol rules. +type Signer interface { + // Sender returns the sender address of the transaction. + Sender(tx *Transaction) (common.Address, error) + + // SignatureValues returns the raw R, S, V values corresponding to the + // given signature. + SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) + ChainID() *big.Int + + // Hash returns 'signature hash', i.e. the transaction hash that is signed by the + // private key. This hash does not uniquely identify the transaction. + Hash(tx *Transaction) common.Hash + + // Equal returns true if the given signer is the same as the receiver. + Equal(Signer) bool +} + +type londonSigner struct{ eip2930Signer } + +// NewLondonSigner returns a signer that accepts +// - EIP-1559 dynamic fee transactions +// - EIP-2930 access list transactions, +// - EIP-155 replay protected transactions, and +// - legacy Homestead transactions. +func NewLondonSigner(chainId *big.Int) Signer { + return londonSigner{eip2930Signer{NewEIP155Signer(chainId)}} +} + +func (s londonSigner) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() != DynamicFeeTxType { + return s.eip2930Signer.Sender(tx) + } + V, R, S := tx.RawSignatureValues() + // DynamicFee txs are defined to use 0 and 1 as their recovery + // id, add 27 to become equivalent to unprotected Homestead signatures. + V = new(big.Int).Add(V, big.NewInt(27)) + if tx.ChainId().Cmp(s.chainId) != 0 { + return common.Address{}, ErrInvalidChainId + } + return recoverPlain(s.Hash(tx), R, S, V, true) +} + +func (s londonSigner) Equal(s2 Signer) bool { + x, ok := s2.(londonSigner) + return ok && x.chainId.Cmp(s.chainId) == 0 +} + +func (s londonSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { + txdata, ok := tx.inner.(*DynamicFeeTx) + if !ok { + return s.eip2930Signer.SignatureValues(tx, sig) + } + // Check that chain ID of tx matches the signer. We also accept ID zero here, + // because it indicates that the chain ID was not specified in the tx. + if txdata.ChainID.Sign() != 0 && txdata.ChainID.Cmp(s.chainId) != 0 { + return nil, nil, nil, ErrInvalidChainId + } + R, S, _ = decodeSignature(sig) + V = big.NewInt(int64(sig[64])) + return R, S, V, nil +} + +// Hash returns the hash to be signed by the sender. +// It does not uniquely identify the transaction. +func (s londonSigner) Hash(tx *Transaction) common.Hash { + if tx.Type() != DynamicFeeTxType { + return s.eip2930Signer.Hash(tx) + } + return prefixedRlpHash( + tx.Type(), + []interface{}{ + s.chainId, + tx.Nonce(), + tx.GasTipCap(), + tx.GasFeeCap(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), + tx.AccessList(), + }) +} + +type eip2930Signer struct{ EIP155Signer } + +// NewEIP2930Signer returns a signer that accepts EIP-2930 access list transactions, +// EIP-155 replay protected transactions, and legacy Homestead transactions. +func NewEIP2930Signer(chainId *big.Int) Signer { + return eip2930Signer{NewEIP155Signer(chainId)} +} + +func (s eip2930Signer) ChainID() *big.Int { + return s.chainId +} + +func (s eip2930Signer) Equal(s2 Signer) bool { + x, ok := s2.(eip2930Signer) + return ok && x.chainId.Cmp(s.chainId) == 0 +} + +func (s eip2930Signer) Sender(tx *Transaction) (common.Address, error) { + V, R, S := tx.RawSignatureValues() + switch tx.Type() { + case LegacyTxType: + if !tx.Protected() { + return HomesteadSigner{}.Sender(tx) + } + V = new(big.Int).Sub(V, s.chainIdMul) + V.Sub(V, big8) + case AccessListTxType: + // AL txs are defined to use 0 and 1 as their recovery + // id, add 27 to become equivalent to unprotected Homestead signatures. + V = new(big.Int).Add(V, big.NewInt(27)) + default: + return common.Address{}, ErrTxTypeNotSupported + } + if tx.ChainId().Cmp(s.chainId) != 0 { + return common.Address{}, ErrInvalidChainId + } + return recoverPlain(s.Hash(tx), R, S, V, true) +} + +func (s eip2930Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { + switch txdata := tx.inner.(type) { + case *LegacyTx: + return s.EIP155Signer.SignatureValues(tx, sig) + case *AccessListTx: + // Check that chain ID of tx matches the signer. We also accept ID zero here, + // because it indicates that the chain ID was not specified in the tx. + if txdata.ChainID.Sign() != 0 && txdata.ChainID.Cmp(s.chainId) != 0 { + return nil, nil, nil, ErrInvalidChainId + } + R, S, _ = decodeSignature(sig) + V = big.NewInt(int64(sig[64])) + default: + return nil, nil, nil, ErrTxTypeNotSupported + } + return R, S, V, nil +} + +// Hash returns the hash to be signed by the sender. +// It does not uniquely identify the transaction. +func (s eip2930Signer) Hash(tx *Transaction) common.Hash { + switch tx.Type() { + case LegacyTxType: + return rlpHash([]interface{}{ + tx.Nonce(), + tx.GasPrice(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), + s.chainId, uint(0), uint(0), + }) + case AccessListTxType: + return prefixedRlpHash( + tx.Type(), + []interface{}{ + s.chainId, + tx.Nonce(), + tx.GasPrice(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), + tx.AccessList(), + }) + default: + // This _should_ not happen, but in case someone sends in a bad + // json struct via RPC, it's probably more prudent to return an + // empty hash instead of killing the node with a panic + //panic("Unsupported transaction type: %d", tx.typ) + return common.Hash{} + } +} + +// EIP155Signer implements Signer using the EIP-155 rules. This accepts transactions which +// are replay-protected as well as unprotected homestead transactions. +type EIP155Signer struct { + chainId, chainIdMul *big.Int +} + +func NewEIP155Signer(chainId *big.Int) EIP155Signer { + if chainId == nil { + chainId = new(big.Int) + } + return EIP155Signer{ + chainId: chainId, + chainIdMul: new(big.Int).Mul(chainId, big.NewInt(2)), + } +} + +func (s EIP155Signer) ChainID() *big.Int { + return s.chainId +} + +func (s EIP155Signer) Equal(s2 Signer) bool { + eip155, ok := s2.(EIP155Signer) + return ok && eip155.chainId.Cmp(s.chainId) == 0 +} + +var big8 = big.NewInt(8) + +func (s EIP155Signer) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() != LegacyTxType { + return common.Address{}, ErrTxTypeNotSupported + } + if !tx.Protected() { + return HomesteadSigner{}.Sender(tx) + } + if tx.ChainId().Cmp(s.chainId) != 0 { + return common.Address{}, ErrInvalidChainId + } + V, R, S := tx.RawSignatureValues() + V = new(big.Int).Sub(V, s.chainIdMul) + V.Sub(V, big8) + return recoverPlain(s.Hash(tx), R, S, V, true) +} + +// SignatureValues returns signature values. This signature +// needs to be in the [R || S || V] format where V is 0 or 1. +func (s EIP155Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { + if tx.Type() != LegacyTxType { + return nil, nil, nil, ErrTxTypeNotSupported + } + R, S, V = decodeSignature(sig) + if s.chainId.Sign() != 0 { + V = big.NewInt(int64(sig[64] + 35)) + V.Add(V, s.chainIdMul) + } + return R, S, V, nil +} + +// Hash returns the hash to be signed by the sender. +// It does not uniquely identify the transaction. +func (s EIP155Signer) Hash(tx *Transaction) common.Hash { + return rlpHash([]interface{}{ + tx.Nonce(), + tx.GasPrice(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), + s.chainId, uint(0), uint(0), + }) +} + +// HomesteadTransaction implements TransactionInterface using the +// homestead rules. +type HomesteadSigner struct{ FrontierSigner } + +func (s HomesteadSigner) ChainID() *big.Int { + return nil +} + +func (s HomesteadSigner) Equal(s2 Signer) bool { + _, ok := s2.(HomesteadSigner) + return ok +} + +// SignatureValues returns signature values. This signature +// needs to be in the [R || S || V] format where V is 0 or 1. +func (hs HomesteadSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) { + return hs.FrontierSigner.SignatureValues(tx, sig) +} + +func (hs HomesteadSigner) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() != LegacyTxType { + return common.Address{}, ErrTxTypeNotSupported + } + v, r, s := tx.RawSignatureValues() + return recoverPlain(hs.Hash(tx), r, s, v, true) +} + +type FrontierSigner struct{} + +func (s FrontierSigner) ChainID() *big.Int { + return nil +} + +func (s FrontierSigner) Equal(s2 Signer) bool { + _, ok := s2.(FrontierSigner) + return ok +} + +func (fs FrontierSigner) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() != LegacyTxType { + return common.Address{}, ErrTxTypeNotSupported + } + v, r, s := tx.RawSignatureValues() + return recoverPlain(fs.Hash(tx), r, s, v, false) +} + +// SignatureValues returns signature values. This signature +// needs to be in the [R || S || V] format where V is 0 or 1. +func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) { + if tx.Type() != LegacyTxType { + return nil, nil, nil, ErrTxTypeNotSupported + } + r, s, v = decodeSignature(sig) + return r, s, v, nil +} + +// Hash returns the hash to be signed by the sender. +// It does not uniquely identify the transaction. +func (fs FrontierSigner) Hash(tx *Transaction) common.Hash { + return rlpHash([]interface{}{ + tx.Nonce(), + tx.GasPrice(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), + }) +} + +func decodeSignature(sig []byte) (r, s, v *big.Int) { + if len(sig) != crypto.SignatureLength { + panic(fmt.Sprintf("wrong size for signature: got %d, want %d", len(sig), crypto.SignatureLength)) + } + r = new(big.Int).SetBytes(sig[:32]) + s = new(big.Int).SetBytes(sig[32:64]) + v = new(big.Int).SetBytes([]byte{sig[64] + 27}) + return r, s, v +} + +func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (common.Address, error) { + if Vb.BitLen() > 8 { + return common.Address{}, ErrInvalidSig + } + V := byte(Vb.Uint64() - 27) + if !crypto.ValidateSignatureValues(V, R, S, homestead) { + return common.Address{}, ErrInvalidSig + } + // encode the signature in uncompressed format + r, s := R.Bytes(), S.Bytes() + sig := make([]byte, crypto.SignatureLength) + copy(sig[32-len(r):32], r) + copy(sig[64-len(s):64], s) + sig[64] = V + // recover the public key from the signature + pub, err := crypto.Ecrecover(sighash[:], sig) + if err != nil { + return common.Address{}, err + } + if len(pub) == 0 || pub[0] != 4 { + return common.Address{}, errors.New("invalid public key") + } + var addr common.Address + copy(addr[:], crypto.Keccak256(pub[1:])[12:]) + return addr, nil +} + +// deriveChainId derives the chain id from the given v parameter +func deriveChainId(v *big.Int) *big.Int { + if v.BitLen() <= 64 { + v := v.Uint64() + if v == 27 || v == 28 { + return new(big.Int) + } + return new(big.Int).SetUint64((v - 35) / 2) + } + v = new(big.Int).Sub(v, big.NewInt(35)) + return v.Div(v, big.NewInt(2)) +} diff --git a/mpt-witness-generator/witness/branch.go b/mpt-witness-generator/witness/branch.go new file mode 100644 index 0000000000..465cf257f5 --- /dev/null +++ b/mpt-witness-generator/witness/branch.go @@ -0,0 +1,320 @@ +package witness + +import ( + "log" + + "github.com/ethereum/go-ethereum/rlp" +) + +// isBranch takes GetProof element and returns whether the element is a branch. +func isBranch(proofEl []byte) bool { + elems, _, err := rlp.SplitList(proofEl) + check(err) + c, err1 := rlp.CountValues(elems) + check(err1) + if c != 2 && c != 17 { + log.Fatal("Proof element is neither leaf or branch") + } + return c == 17 +} + +// prepareBranchWitness takes the rows that are to be filled with branch data and it takes +// a branch as returned by GetProof. There are 19 rows for a branch and prepareBranchWitness +// fills the rows from index 1 to index 16 (index 0 is init, index 17 and 18 are for extension +// node when it applies). The parameter branchStart depends on whether it is S or C branch - +// S occupies the first 34 columns, C occupies the next 34 columns. +// The branch children are positioned each in its own row. +func prepareBranchWitness(rows [][]byte, branch []byte, branchStart int, branchRLPOffset int) { + rowInd := 1 + colInd := branchNodeRLPLen - 1 + + i := 0 + insideInd := -1 + for { + if branchRLPOffset+i == len(branch)-1 { // -1 because of the last 128 (branch value) + break + } + b := branch[branchRLPOffset+i] + if insideInd == -1 && b == 128 { + rows[rowInd][branchStart] = b + rowInd++ + } else if insideInd == -1 && b != 128 { + if b == 160 { + insideInd = 32 + colInd = branchNodeRLPLen - 2 + } else { + // non-hashed node + insideInd = int(b) - 192 + colInd = branchNodeRLPLen - 2 + } + rows[rowInd][branchStart+colInd] = b + } else { + colInd++ + rows[rowInd][branchStart+colInd] = b + if insideInd == 1 { + insideInd = -1 + rowInd++ + colInd = 0 + } else { + insideInd-- + } + } + + i++ + } +} + +func prepareBranchNode(branch1, branch2, extNode1, extNode2, extListRlpBytes []byte, extValues [][]byte, key, driftedInd, + branchC16, branchC1 byte, isBranchSPlaceholder, isBranchCPlaceholder, isExtension, isSModExtension, isCModExtension bool) Node { + extensionNode := ExtensionNode{ + ListRlpBytes: extListRlpBytes, + } + + var listRlpBytes [2][]byte + branch1RLPOffset := 1 + branch2RLPOffset := 1 + listRlpBytes1 := []byte{branch1[0]} + listRlpBytes2 := []byte{branch2[0]} + + if branch1[0] == 248 { // two RLP bytes + branch1RLPOffset = 2 + } else if branch1[0] == 249 { // three RLP bytes + branch1RLPOffset = 3 + } + + if branch2[0] == 248 { // two RLP bytes + branch2RLPOffset = 2 + } else if branch2[0] == 249 { // three RLP bytes + branch2RLPOffset = 3 + } + + if branch1[0] == 248 || branch1[0] == 249 { + listRlpBytes1 = append(listRlpBytes1, branch1[1]) + } + if branch2[0] == 248 || branch2[0] == 249 { + listRlpBytes2 = append(listRlpBytes2, branch2[1]) + } + + if branch1[0] == 249 { + listRlpBytes1 = append(listRlpBytes1, branch1[2]) + } + if branch2[0] == 249 { + listRlpBytes2 = append(listRlpBytes2, branch2[2]) + } + + listRlpBytes[0] = listRlpBytes1 + listRlpBytes[1] = listRlpBytes2 + + branchNode := BranchNode{ + ModifiedIndex: int(key), + DriftedIndex: int(driftedInd), + ListRlpBytes: listRlpBytes, + } + + extensionBranch := ExtensionBranchNode{ + IsExtension: isExtension, + IsModExtension: [2]bool{isSModExtension, isCModExtension}, + IsPlaceholder: [2]bool{isBranchSPlaceholder, isBranchCPlaceholder}, + Extension: extensionNode, + Branch: branchNode, + } + + values := make([][]byte, 17) + for i := 0; i < len(values); i++ { + values[i] = make([]byte, valueLen) + } + prepareBranchWitness(values, branch1, 0, branch1RLPOffset) + + // Just to get the modified child: + rows := make([][]byte, 17) + for i := 0; i < len(rows); i++ { + rows[i] = make([]byte, valueLen) + } + prepareBranchWitness(rows, branch2, 0, branch2RLPOffset) + values[0] = rows[1+key] + + values = append(values, extValues...) + + keccakData := [][]byte{branch1, branch2} + if isExtension { + keccakData = append(keccakData, extNode1) + keccakData = append(keccakData, extNode2) + } + node := Node{ + ExtensionBranch: &extensionBranch, + Values: values, + KeccakData: keccakData, + } + + return node +} + +// getDriftedPosition returns the position in branch to which the leaf drifted because another +// leaf has been added to the same slot. This information is stored into a branch init row. +func getDriftedPosition(leafKeyRow []byte, numberOfNibbles int) byte { + var nibbles []byte + if leafKeyRow[0] != 248 { + keyLen := int(leafKeyRow[1] - 128) + if (leafKeyRow[2] != 32) && (leafKeyRow[2] != 0) { // second term is for extension node + if leafKeyRow[2] < 32 { // extension node + nibbles = append(nibbles, leafKeyRow[2]-16) + } else { // leaf + nibbles = append(nibbles, leafKeyRow[2]-48) + } + } + for i := 0; i < keyLen-1; i++ { // -1 because the first byte doesn't have any nibbles + b := leafKeyRow[3+i] + n1 := b / 16 + n2 := b - n1*16 + nibbles = append(nibbles, n1) + nibbles = append(nibbles, n2) + } + } else { + keyLen := int(leafKeyRow[2] - 128) + if (leafKeyRow[3] != 32) && (leafKeyRow[3] != 0) { // second term is for extension node + if leafKeyRow[3] < 32 { // extension node + nibbles = append(nibbles, leafKeyRow[3]-16) + } else { // leaf + nibbles = append(nibbles, leafKeyRow[3]-48) + } + } + for i := 0; i < keyLen-1; i++ { // -1 because the first byte doesn't have any nibbles + b := leafKeyRow[4+i] + n1 := b / 16 + n2 := b - n1*16 + nibbles = append(nibbles, n1) + nibbles = append(nibbles, n2) + } + } + + return nibbles[numberOfNibbles] +} + +// addBranchAndPlaceholder adds to the rows a branch and its placeholder counterpart +// (used when one of the proofs have one branch more than the other). +func addBranchAndPlaceholder(proof1, proof2, + extNibblesS, extNibblesC [][]byte, + leafRow0, key, neighbourNode []byte, + keyIndex, extensionNodeInd int, + additionalBranch, isAccountProof, nonExistingAccountProof, + isShorterProofLastLeaf bool, branchC16, branchC1 byte, toBeHashed *[][]byte) (bool, bool, int, byte, Node) { + len1 := len(proof1) + len2 := len(proof2) + + var node Node + + numberOfNibbles := 0 + var extListRlpBytes []byte + var extValues [][]byte + for i := 0; i < 4; i++ { + extValues = append(extValues, make([]byte, valueLen)) + } + + isExtension := (len1 == len2+2) || (len2 == len1+2) + if !isExtension { + if branchC16 == 1 { + branchC16 = 0 + branchC1 = 1 + } else { + branchC16 = 1 + branchC1 = 0 + } + } else { + var numNibbles byte + if len1 > len2 { + numNibbles, extListRlpBytes, extValues = prepareExtensions(extNibblesS, extensionNodeInd, proof1[len1-3], proof1[len1-3]) + } else { + numNibbles, extListRlpBytes, extValues = prepareExtensions(extNibblesC, extensionNodeInd, proof2[len2-3], proof2[len2-3]) + } + numberOfNibbles = int(numNibbles) + + if numberOfNibbles%2 == 0 { + if branchC16 == 1 { + branchC16 = 0 + branchC1 = 1 + } else { + branchC16 = 1 + branchC1 = 0 + } + } + } + + /* + For special cases when a new extension node is inserted. + + Imagine you have an extension node at n1 n2 n3 n4 (where each of these is a nibble). + Let's say this extension node has the following nibbles as the extension: n5 n6 n7. + So at position n1 n2 n3 n4 n5 n6 n7 there is some branch. + Now we want to add a leaf at position n1 n2 n3 n4 n5 m1 where m1 != n6. + The adding algorithm walks through the trie, but it bumps into an extension node where + it should put this leaf. So a new extension node is added at position n1 n2 n3 n4 which only + has one nibble: n5. So at n1 n2 n3 n4 n5 we have a branch now. In this brach, at position m we + have a leaf, while at position n6 we have another extension node with one extension nibble: n7. + At this position (n7) we have the branch from the original extension node. + + When an extension node is inserted because of the added key, C proof will contain this new + extension node and the underlying branch. However, S proof will stop at the old extension node. + This old extension node is not part of the C proof, but we need to ensure that it is in the C trie. + We need to take into accout that in the C trie the old extension node has a shortened extension. + + The problem is where to store the old extension node. Note that in the above code the new + extension node and the underlying branch rows are prepared. For example, when len2 > len1 we + take extension node from proof2[len2 - 3] and branch from proof2[len2 - 2]. In this case, + the old extension node in proof1[len1 - 1] has been ignored. For this reason we store it + in the rows before we add a leaf. + */ + var longExtNode []byte + if len1 > len2 { + longExtNode = proof2[len2-1] + } else { + longExtNode = proof1[len1-1] + } + + // TODO: fix + var extNode []byte + if isExtension { + if len1 > len2 { + extNode = proof1[len1-3] + } else { + extNode = proof2[len2-3] + } + } + + // Note that isModifiedExtNode happens also when we have a branch instead of shortExtNode + isModifiedExtNode := !isBranch(longExtNode) && !isShorterProofLastLeaf + isSModifiedExtNode := false + isCModifiedExtNode := false + if isModifiedExtNode { + if len1 < len2 { + isSModifiedExtNode = true + } else { + isCModifiedExtNode = true + } + } + + if len1 > len2 { + // We now get the first nibble of the leaf that was turned into branch. + // This first nibble presents the position of the leaf once it moved + // into the new branch. + driftedInd := getDriftedPosition(leafRow0, numberOfNibbles) + + node = prepareBranchNode(proof1[len1-2], proof1[len1-2], extNode, extNode, extListRlpBytes, extValues, + key[keyIndex+numberOfNibbles], driftedInd, + branchC16, branchC1, false, true, isExtension, isSModifiedExtNode, isCModifiedExtNode) + + // We now get the first nibble of the leaf that was turned into branch. + // This first nibble presents the position of the leaf once it moved + // into the new branch. + } else { + // We now get the first nibble of the leaf that was turned into branch. + // This first nibble presents the position of the leaf once it moved + // into the new branch. + driftedInd := getDriftedPosition(leafRow0, numberOfNibbles) + + node = prepareBranchNode(proof2[len2-2], proof2[len2-2], extNode, extNode, extListRlpBytes, extValues, + key[keyIndex+numberOfNibbles], driftedInd, + branchC16, branchC1, true, false, isExtension, isSModifiedExtNode, isCModifiedExtNode) + } + + return isModifiedExtNode, isExtension, numberOfNibbles, branchC16, node +} diff --git a/mpt-witness-generator/witness/extension_node.go b/mpt-witness-generator/witness/extension_node.go new file mode 100644 index 0000000000..252c260ef7 --- /dev/null +++ b/mpt-witness-generator/witness/extension_node.go @@ -0,0 +1,345 @@ +package witness + +// setExtNodeSelectors sets in the branch init row the information about the extension node. +func setExtNodeSelectors(row, proofEl []byte, numberOfNibbles int, branchC16 byte) { + row[isExtensionPos] = 1 + if len(proofEl) > 56 { // 56 because there is 1 byte for length + // isCExtLongerThan55 doesn't need to be set here + row[isSExtLongerThan55Pos] = 1 + } + + if len(proofEl) < 32 { + // isExtNodeSNonHashed doesn't need to be set here + row[isExtNodeSNonHashedPos] = 1 + } + + if numberOfNibbles == 1 { + if branchC16 == 1 { + row[isExtShortC16Pos] = 1 + } else { + row[isExtShortC1Pos] = 1 + } + } else { + if numberOfNibbles%2 == 0 { + if branchC16 == 1 { + row[isExtLongEvenC16Pos] = 1 + } else { + row[isExtLongEvenC1Pos] = 1 + } + } else { + if branchC16 == 1 { + row[isExtLongOddC16Pos] = 1 + } else { + row[isExtLongOddC1Pos] = 1 + } + } + } +} + +func prepareEmptyExtensionRows(beforeModification, afterModification bool) [][]byte { + ext_row1 := make([]byte, rowLen) + ext_row2 := make([]byte, rowLen) + if !beforeModification && !afterModification { + ext_row1 = append(ext_row1, 16) + ext_row2 = append(ext_row2, 17) + } else if beforeModification { + ext_row1 = append(ext_row1, 20) + ext_row2 = append(ext_row2, 21) + } else if afterModification { + ext_row1 = append(ext_row1, 22) + ext_row2 = append(ext_row2, 23) + } + + return [][]byte{ext_row1, ext_row2} +} + +// TODO: remove when Nodes are fully implemented +func prepareExtensionRows(extNibbles [][]byte, extensionNodeInd int, proofEl1, proofEl2 []byte, beforeModification, afterModification bool) (byte, []byte, []byte) { + var extensionRowS []byte + var extensionRowC []byte + + extRows := prepareEmptyExtensionRows(beforeModification, afterModification) + extensionRowS = extRows[0] + extensionRowC = extRows[1] + prepareExtensionRow(extensionRowS, proofEl1, true) + prepareExtensionRow(extensionRowC, proofEl2, false) + + evenNumberOfNibbles := proofEl1[2] == 0 + keyLen := getExtensionNodeKeyLen(proofEl1) + numberOfNibbles := getExtensionNumberOfNibbles(proofEl1) + + // We need nibbles as witness to compute key RLC, so we set them + // into extensionRowC s_advices (we can do this because both extension + // nodes have the same key, so we can have this info only in one). + // There can be more up to 64 nibbles, but there is only 32 bytes + // in extensionRowC s_advices. So we store every second nibble (having + // the whole byte and one nibble is enough to compute the other nibble). + startNibblePos := 2 // we don't need any nibbles for case keyLen = 1 + if keyLen > 1 { + if evenNumberOfNibbles { + startNibblePos = 1 + } else { + startNibblePos = 2 + } + } + ind := 0 + for j := startNibblePos; j < len(extNibbles[extensionNodeInd]); j += 2 { + extensionRowC[branchNodeRLPLen+ind] = + extNibbles[extensionNodeInd][j] + ind++ + } + + return numberOfNibbles, extensionRowS, extensionRowC +} + +func prepareExtensions(extNibbles [][]byte, extensionNodeInd int, proofEl1, proofEl2 []byte) (byte, []byte, [][]byte) { + var values [][]byte + v1 := make([]byte, valueLen) + v2 := make([]byte, valueLen) + v3 := make([]byte, valueLen) + v4 := make([]byte, valueLen) + + listRlpBytes := prepareExtension(v1, v2, proofEl1, true) + prepareExtension(v3, v4, proofEl2, false) + + evenNumberOfNibbles := proofEl1[2] == 0 + keyLen := getExtensionNodeKeyLen(proofEl1) + numberOfNibbles := getExtensionNumberOfNibbles(proofEl1) + + // We need nibbles as witness to compute key RLC, so we set them + // into extensionRowC s_advices (we can do this because both extension + // nodes have the same key, so we can have this info only in one). + // There can be more up to 64 nibbles, but there is only 32 bytes + // in extensionRowC s_advices. So we store every second nibble (having + // the whole byte and one nibble is enough to compute the other nibble). + + startNibblePos := 2 // we don't need any nibbles for case keyLen = 1 + if keyLen > 1 { + if evenNumberOfNibbles { + startNibblePos = 1 + } else { + startNibblePos = 2 + } + } + ind := 0 + for j := startNibblePos; j < len(extNibbles[extensionNodeInd]); j += 2 { + v3[2+ind] = // TODO: check 2 + ind + extNibbles[extensionNodeInd][j] + ind++ + } + values = append(values, v1) + values = append(values, v2) + values = append(values, v3) + values = append(values, v4) + + return numberOfNibbles, listRlpBytes, values +} + +func getExtensionLenStartKey(proofEl []byte) (int, int) { + lenKey := 0 + startKey := 0 + // proofEl[1] <= 32 means only one nibble: the stored value is `16 + nibble`, note that if there are + // at least two nibbles there will be `128 + number of bytes occupied by nibbles` in proofEl[1] + if proofEl[1] <= 32 { + lenKey = 1 + startKey = 1 + } else if proofEl[0] <= 247 { + lenKey = int(proofEl[1] - 128) + startKey = 2 + } else { + lenKey = int(proofEl[2] - 128) + startKey = 3 + } + + return lenKey, startKey +} + +func getExtensionNodeKeyLen(proofEl []byte) byte { + if proofEl[1] <= 32 { + return 1 + } else if proofEl[0] <= 247 { + return proofEl[1] - 128 + } else { + return proofEl[2] - 128 + } +} + +func getExtensionNumberOfNibbles(proofEl []byte) byte { + evenNumberOfNibbles := proofEl[2] == 0 + numberOfNibbles := byte(0) + keyLen := getExtensionNodeKeyLen(proofEl) + if keyLen == 1 { + numberOfNibbles = 1 + } else if keyLen > 1 && evenNumberOfNibbles { + numberOfNibbles = (keyLen - 1) * 2 + } else if keyLen > 1 && !evenNumberOfNibbles { + numberOfNibbles = (keyLen-1)*2 + 1 + } + + return numberOfNibbles +} + +func getExtensionNodeNibbles(proofEl []byte) []byte { + lenKey, startKey := getExtensionLenStartKey(proofEl) + + var nibbles []byte + if proofEl[startKey] != 0 { + nibbles = append(nibbles, proofEl[startKey]-16) + } + for i := 0; i < lenKey-1; i++ { // -1 because the first byte doesn't have any nibbles + b := proofEl[startKey+1+i] + n1 := b / 16 + n2 := b - n1*16 + nibbles = append(nibbles, n1) + nibbles = append(nibbles, n2) + } + + return nibbles +} + +// TODO: remove when Nodes are fully implemented +func prepareExtensionRow(witnessRow, proofEl []byte, setKey bool) { + // storageProof[i]: + // [228,130,0,149,160,114,253,150,133,18,192,156,19,241,162,51,210,24,1,151,16,48,7,177,42,60,49,34,230,254,242,79,132,165,90,75,249] + // Note that the first element (228 in this case) can go much higher - for example, if there + // are 40 nibbles, this would take 20 bytes which would make the first element 248. + + // If only one nibble in key: + // [226,16,160,172,105,12... + // Could also be non-hashed branch: + // [223,16,221,198,132,32,0,0,0,1,198,132,32,0,0,0,1,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] + + // Extension node with non-hashed branch: + // List contains up to 55 bytes (192 + 55) + // [247,160,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,213,128,194,32,1,128,194,32,1,128,128,128,128,128,128,128,128,128,128,128,128,128] + + // List contains more than 55 bytes + // [248,58,159,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,217,128,196,130,32,0,1,128,196,130,32,0,1,128,128,128,128,128,128,128,128,128,128,128,128,128] + + // Note that the extension node can be much shorter than the one above - in case when + // there are less nibbles, so we cannot say that 226 appears as the first byte only + // when there are hashed nodes in the branch and there is only one nibble. + // Branch with two non-hashed nodes (that's the shortest possible branch): + // [217,128,196,130,32,0,1,128,196,130,32,0,1,128,128,128,128,128,128,128,128,128,128,128,128,128] + // Note: branch contains at least 26 bytes. 192 + 26 = 218 + + /* + If proofEl[0] <= 247 (length at most 55, so proofEl[1] doesn't specify the length of the whole + remaining stream, only of the next substream) + If proofEl[1] <= 128: + There is only 1 byte for nibbles (keyLen = 1) and this is proofEl[1]. + Else: + Nibbles are stored in more than 1 byte, proofEl[1] specifies the length of bytes. + Else: + proofEl[1] contains the length of the remaining stream. + proofEl[2] specifies the length of the bytes (for storing nibbles). + Note that we can't have only one nibble in this case. + */ + + if setKey { + witnessRow[0] = proofEl[0] + witnessRow[1] = proofEl[1] + } + + lenKey, startKey := getExtensionLenStartKey(proofEl) + if startKey == 3 { + witnessRow[2] = proofEl[2] + } + + if setKey { + for j := 0; j < lenKey; j++ { + witnessRow[startKey+j] = proofEl[startKey+j] + } + } + + encodedNodeLen := proofEl[startKey+lenKey] + nodeLen := byte(0) + start := branch2start + if encodedNodeLen > 192 { + // we have a list, that means a non-hashed node + nodeLen = encodedNodeLen - 192 + } else if encodedNodeLen == 160 { + // hashed-node + nodeLen = encodedNodeLen - 128 + } + witnessRow[start] = encodedNodeLen + for j := 0; j < int(nodeLen); j++ { + witnessRow[start+1+j] = proofEl[startKey+lenKey+1+j] + } +} + +func prepareExtension(v1, v2, proofEl []byte, setKey bool) []byte { + // storageProof[i]: + // [228,130,0,149,160,114,253,150,133,18,192,156,19,241,162,51,210,24,1,151,16,48,7,177,42,60,49,34,230,254,242,79,132,165,90,75,249] + // Note that the first element (228 in this case) can go much higher - for example, if there + // are 40 nibbles, this would take 20 bytes which would make the first element 248. + + // If only one nibble in key: + // [226,16,160,172,105,12... + // Could also be non-hashed branch: + // [223,16,221,198,132,32,0,0,0,1,198,132,32,0,0,0,1,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] + + // Extension node with non-hashed branch: + // List contains up to 55 bytes (192 + 55) + // [247,160,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,213,128,194,32,1,128,194,32,1,128,128,128,128,128,128,128,128,128,128,128,128,128] + + // List contains more than 55 bytes + // [248,58,159,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,217,128,196,130,32,0,1,128,196,130,32,0,1,128,128,128,128,128,128,128,128,128,128,128,128,128] + + // Note that the extension node can be much shorter than the one above - in case when + // there are less nibbles, so we cannot say that 226 appears as the first byte only + // when there are hashed nodes in the branch and there is only one nibble. + // Branch with two non-hashed nodes (that's the shortest possible branch): + // [217,128,196,130,32,0,1,128,196,130,32,0,1,128,128,128,128,128,128,128,128,128,128,128,128,128] + // Note: branch contains at least 26 bytes. 192 + 26 = 218 + + /* + If proofEl[0] <= 247 (length at most 55, so proofEl[1] doesn't specify the length of the whole + remaining stream, only of the next substream) + If proofEl[1] <= 128: + There is only 1 byte for nibbles (keyLen = 1) and this is proofEl[1]. + Else: + Nibbles are stored in more than 1 byte, proofEl[1] specifies the length of bytes. + Else: + proofEl[1] contains the length of the remaining stream. + proofEl[2] specifies the length of the bytes (for storing nibbles). + Note that we can't have only one nibble in this case. + */ + + var listRlpBytes []byte + listRlpBytes = append(listRlpBytes, proofEl[0]) + + lenKey, startKey := getExtensionLenStartKey(proofEl) + if lenKey != 1 { + // The descriptor now contains the key length RLP in value row: + startKey = startKey - 1 + lenKey = lenKey + 1 + } + + // TODO + if startKey == 3 { + listRlpBytes = append(listRlpBytes, proofEl[1]) + } + + if setKey { + for j := 0; j < lenKey; j++ { + v1[j] = proofEl[startKey+j] + } + } + + encodedNodeLen := proofEl[startKey+lenKey] + nodeLen := byte(0) + if encodedNodeLen > 192 { + // we have a list, that means a non-hashed node + nodeLen = encodedNodeLen - 192 + } else if encodedNodeLen == 160 { + // hashed-node + nodeLen = encodedNodeLen - 128 + } + v2[0] = encodedNodeLen + for j := 0; j < int(nodeLen); j++ { + v2[1+j] = proofEl[startKey+lenKey+1+j] + } + + return listRlpBytes +} diff --git a/mpt-witness-generator/witness/gen_witness_from_infura_blockchain_test.go b/mpt-witness-generator/witness/gen_witness_from_infura_blockchain_test.go new file mode 100644 index 0000000000..93e72ff8e3 --- /dev/null +++ b/mpt-witness-generator/witness/gen_witness_from_infura_blockchain_test.go @@ -0,0 +1,2407 @@ +package witness + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/privacy-scaling-explorations/mpt-witness-generator/oracle" + "github.com/privacy-scaling-explorations/mpt-witness-generator/state" +) + +func TestUpdateOneLevel(t *testing.T) { + ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + // hexed keys: + // [3,1,14,12,12,... + // [11,11,8,10,6,... + // We have a branch with children at position 3 and 11. + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + + // This key is turned into odd length (see hexToCompact in encoding.go to see + // odd and even length are handled differently) + v := common.BigToHash(big.NewInt(int64(17))) + addr := common.HexToAddress("0xaaaccf12580138bc2bbceeeaa111df4e42ab81ff") + + trieMod := TrieModification{ + Type: StorageChanged, + Key: ks[0], + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("UpdateOneLevel", ks[:], values, []common.Address{addr, addr}, trieModifications) +} + +func TestUpdateOneLevel1(t *testing.T) { + addr := common.HexToAddress("0x50efbf12580138bc263c95757826df4e24eb81c9") + // This address is turned into odd length (see hexToCompact in encoding.go to see + // odd and even length are handled differently) + ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: ks[1], + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("UpdateOneLevel1", ks[:], values, []common.Address{addr, addr}, trieModifications) +} + +func TestUpdateOneLevelBigVal(t *testing.T) { + ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + // hexed keys: + // [3,1,14,12,12,... + // [11,11,8,10,6,... + // We have a branch with children at position 3 and 11. + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + + // ks[0] key is turned into odd length (see hexToCompact in encoding.go to see + // odd and even length are handled differently) + // big value so that RLP is longer than 55 bytes + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + v2 := common.BytesToHash(v1) + addr := common.HexToAddress("0xaaaccf12580138bc2bbceeeaa826df4e42ab81ff") + + trieMod := TrieModification{ + Type: StorageChanged, + Key: ks[0], + Value: v2, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("UpdateOneLevelBigVal", ks[:], values, []common.Address{addr, addr}, trieModifications) +} + +func TestUpdateTwoLevels(t *testing.T) { + ks := [...]common.Hash{common.HexToHash("0x11"), common.HexToHash("0x12"), common.HexToHash("0x21")} // this has three levels + // hexed keys: + // [3,1,14,12,12,... + // [11,11,8,10,6,... + // First we have a branch with children at position 3 and 11. + // The third storage change happens at key: + // [3,10,6,3,5,7,... + // That means leaf at position 3 turns into branch with children at position 1 and 10. + // ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + + // This key is turned into even length (see hexToCompact in encoding.go to see + // odd and even length are handled differently) + v := common.BigToHash(big.NewInt(int64(17))) + addr := common.HexToAddress("0xaaaccf12580138bc2bbc957aa826df4e42ab81ff") + + trieMod := TrieModification{ + Type: StorageChanged, + Key: ks[0], + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("UpdateTwoLevels", ks[:], values, []common.Address{addr, addr, addr}, trieModifications) +} + +func TestUpdateTwoLevelsBigVal(t *testing.T) { + ks := [...]common.Hash{common.HexToHash("0x11"), common.HexToHash("0x12"), common.HexToHash("0x21")} // this has three levels + // hexed keys: + // [3,1,14,12,12,... + // [11,11,8,10,6,... + // First we have a branch with children at position 3 and 11. + // The third storage change happens at key: + // [3,10,6,3,5,7,... + // That means leaf at position 3 turns into branch with children at position 1 and 10. + // ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + + // This key is turned into even length (see hexToCompact in encoding.go to see + // odd and even length are handled differently) + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + v2 := common.BytesToHash(v1) + addr := common.HexToAddress("0xaaaccf12580138bc2bbc957aa826df4e42ab81ff") + + trieMod := TrieModification{ + Type: StorageChanged, + Key: ks[0], + Value: v2, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("UpdateTwoLevelsBigVal", ks[:], values, []common.Address{addr, addr, addr}, trieModifications) +} + +func TestUpdateThreeLevels(t *testing.T) { + ks := [...]common.Hash{ + common.HexToHash("0x11"), + common.HexToHash("0x12"), + common.HexToHash("0x21"), + common.HexToHash("0x31"), + common.HexToHash("0x32"), + common.HexToHash("0x33"), + common.HexToHash("0x34"), + common.HexToHash("0x35"), + common.HexToHash("0x36"), + common.HexToHash("0x37"), + common.HexToHash("0x38"), // + common.HexToHash("0x39"), + common.HexToHash("0x40"), + common.HexToHash("0x41"), + common.HexToHash("0x42"), + common.HexToHash("0x43"), + common.HexToHash("0x44"), + common.HexToHash("0x45"), + common.HexToHash("0x46"), + } + // ks[10] = 0x38 is at position 3 in root.Children[3].Children[8] + // nibbles + // [9,5,12,5,13,12,14,10,13,14,9,6,0,3,4,7,9,11,1,7,7,11,6,8,9,5,9,0,4,9,4,8,5,13,15,8,10,10,9,7,11,3,9,15,3,5,3,3,0,3,9,10,15,5,15,4,5,6,1,9,9,16] + // terminator flag 16 (last byte) is removed, then it remains len 61 (these are nibbles): + // [9,5,12,5,13,12,14,10,13,14,9,6,0,3,4,7,9,11,1,7,7,11,6,8,9,5,9,0,4,9,4,8,5,13,15,8,10,10,9,7,11,3,9,15,3,5,3,3,0,3,9,10,15,5,15,4,5,6,1,9,9] + + // buf (31 len): + // this is key stored in leaf: + // [57,92,93,206,173,233,96,52,121,177,119,182,137,89,4,148,133,223,138,169,123,57,243,83,48,57,175,95,69,97,153] + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0xaaaccf12580138bc263c95757826df4e42ab81ff") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + v := common.BigToHash(big.NewInt(int64(17))) + + trieMod := TrieModification{ + Type: StorageChanged, + Key: ks[10], + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("UpdateThreeLevels", ks[:], values, addresses, trieModifications) +} + +func TestFromNilToValue(t *testing.T) { + ks := [...]common.Hash{ + common.HexToHash("0x11"), + common.HexToHash("0x12"), + common.HexToHash("0x21"), + common.HexToHash("0x31"), + common.HexToHash("0x32"), + common.HexToHash("0x33"), + common.HexToHash("0x34"), + common.HexToHash("0x35"), + common.HexToHash("0x36"), + common.HexToHash("0x37"), + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x50efbf12580138bc263c95757826df4e42ab81ff") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + // This test is similar as above, but the key that is being modified has not been used yet. + + toBeModified := common.HexToHash("0x38") + v := common.BigToHash(big.NewInt(int64(17))) + + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("FromNilToValue", ks[:], values, addresses, trieModifications) +} + +func TestDelete(t *testing.T) { + ks := [...]common.Hash{ + common.HexToHash("0xaaaabbbbabab"), + common.HexToHash("0xbaaabbbbabab"), + common.HexToHash("0xcaaabbbbabab"), + common.HexToHash("0xdaaabbbbabab"), + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x50efbf12580138bc263c95757826df4e24eb81ff") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0xdaaabbbbabab") + val := common.Hash{} // empty value deletes the key + + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("Delete", ks[:], values, addresses, trieModifications) +} + +func TestUpdateOneLevelEvenAddress(t *testing.T) { + addr := common.HexToAddress("0x25efbf12580138bc263c95757826df4e24eb81c9") + // This address is turned into even length (see hexToCompact in encoding.go to see + // odd and even length are handled differently) + ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + // This is a storage slot that will be modified (the list will come from bus-mapping): + toBeModified := ks[1] + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("UpdateOneLevelEvenAddress", ks[:], values, addresses, trieModifications) +} + +func TestAddBranch(t *testing.T) { + ks := [...]common.Hash{common.HexToHash("0x11"), common.HexToHash("0x12")} + // hexed keys: + // [3,1,14,12,12,... + // [11,11,8,10,6,... + // First we have a branch with children at position 3 and 11. + // ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75acef12a01883c2b3fc57957826df4e24e8baaa") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + // This key is not in the trie yet, its nibbles: + // [3,10,6,3,5,7,... + // That means leaf at position 3 turns into branch with children at position 1 and 10. + toBeModified := common.HexToHash("0x21") + v := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("AddBranch", ks[:], values, addresses, trieModifications) +} + +func TestAddBranchLong(t *testing.T) { + ks := [...]common.Hash{common.HexToHash("0x11"), common.HexToHash("0x12")} + // hexed keys: + // [3,1,14,12,12,... + // [11,11,8,10,6,... + // First we have a branch with children at position 3 and 11. + // ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + + var values []common.Hash + // big value so that RLP will be longer than 55 bytes for the neighbouring node + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + v2 := common.BytesToHash(v1) + for i := 0; i < len(ks); i++ { + values = append(values, v2) + } + addr := common.HexToAddress("0x75acef12a01883c2b3fc57957826df4e24e8b19c") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + // This key is not in the trie yet, its nibbles: + // [3,10,6,3,5,7,... + // That means leaf at position 3 turns into branch with children at position 1 and 10. + toBeModified := common.HexToHash("0x21") + v := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("AddBranchLong", ks[:], values, addresses, trieModifications) +} + +func TestDeleteBranch(t *testing.T) { + h := common.HexToHash("0x11dd2277aa") + + ks := [...]common.Hash{ + common.HexToHash("0xaa"), + common.HexToHash("0xabcc"), + common.HexToHash("0xffdd"), + common.HexToHash("0x11dd"), + common.HexToHash("0x11dd22"), + common.HexToHash("0x11dd2233"), + common.HexToHash("0x11dd2255"), + common.HexToHash("0x11dd2277"), + h, // this leaf turns into a branch + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75acef12a0188c32b36c57957826df4e24e8b19c") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := h + v := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("DeleteBranch", ks[:], values, addresses, trieModifications) +} + +func TestDeleteBranchLong(t *testing.T) { + h := common.HexToHash("0x11dd2277aa") + + ks := [...]common.Hash{ + common.HexToHash("0xaa"), + common.HexToHash("0xabcc"), + common.HexToHash("0xffdd"), + common.HexToHash("0x11dd"), + common.HexToHash("0x11dd22"), + common.HexToHash("0x11dd2233"), + common.HexToHash("0x11dd2255"), + common.HexToHash("0x11dd2277"), + h, // this leaf turns into a branch + } + + var values []common.Hash + // big value so that RLP will be longer than 55 bytes for the neighbouring node + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + v2 := common.BytesToHash(v1) + for i := 0; i < len(ks); i++ { + values = append(values, v2) + } + addr := common.HexToAddress("0x75acef12a0188c32b36c57957826df4e24e8b19c") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := h + v := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("DeleteBranchLong", ks[:], values, addresses, trieModifications) +} + +func TestAddBranchTwoLevels(t *testing.T) { + // Test for case when branch is added in the second level. So, instead of having only + // branch1 with some nodes and then one of this nodes is replaced with a branch (that's + // the case of TestAddBranch), we have here branch1 and then inside it another + // branch: branch2. Inside brach2 we have a node which gets replaced by a branch. + // This is to test cases when the key contains odd number of nibbles as well as + // even number of nibbles. + + a := 1 + b := 1 + h := fmt.Sprintf("0xaa%d%d", a, b) + ks := []common.Hash{common.HexToHash(h)} + for i := 0; i < 33; i++ { + // just some values to get the added branch in second level (found out trying different values) + if i%2 == 0 { + a += 1 + } else { + b += 1 + } + if a == 4 && b == 3 { + continue + } + h := fmt.Sprintf("0xaa%d%d", a, b) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef12a0188c32b36c57957826df4e24e8b19c") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0xaa43") + v := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("AddBranchTwoLevels", ks[:], values, addresses, trieModifications) +} + +func TestAddBranchTwoLevelsLong(t *testing.T) { + a := 1 + b := 1 + h := fmt.Sprintf("0xaa%d%d", a, b) + ks := []common.Hash{common.HexToHash(h)} + for i := 0; i < 33; i++ { + // just some values to get the added branch in second level (found out trying different values) + if i%2 == 0 { + a += 1 + } else { + b += 1 + } + if a == 4 && b == 3 { + continue + } + h := fmt.Sprintf("0xaa%d%d", a, b) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + v2 := common.BytesToHash(v1) + for i := 0; i < len(ks); i++ { + values = append(values, v2) + } + addr := common.HexToAddress("0x75fbef1250188c32b63c57957826df4e24e8b19c") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0xaa43") + v := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("AddBranchTwoLevelsLong", ks[:], values, addresses, trieModifications) +} + +func TestDeleteBranchTwoLevels(t *testing.T) { + a := 1 + b := 1 + h := fmt.Sprintf("0xaa%d%d", a, b) + ks := []common.Hash{common.HexToHash(h)} + for i := 0; i < 33; i++ { + // just some values to get the added branch in second level (found out trying different values) + if i%2 == 0 { + a += 1 + } else { + b += 1 + } + h := fmt.Sprintf("0xaa%d%d", a, b) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef1250188c32b63c57957826df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0xaa43") + v := common.Hash{} + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("DeleteBranchTwoLevels", ks[:], values, addresses, trieModifications) +} + +func TestDeleteBranchTwoLevelsLong(t *testing.T) { + a := 1 + b := 1 + h := fmt.Sprintf("0xaa%d%d", a, b) + ks := []common.Hash{common.HexToHash(h)} + for i := 0; i < 33; i++ { + // just some values to get the added branch in second level (found out trying different values) + if i%2 == 0 { + a += 1 + } else { + b += 1 + } + h := fmt.Sprintf("0xaa%d%d", a, b) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + v2 := common.BytesToHash(v1) + for i := 0; i < len(ks); i++ { + values = append(values, v2) + } + addr := common.HexToAddress("0x75fbef21508183c2b63c57957826df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0xaa43") + v := common.Hash{} + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: v, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("DeleteBranchTwoLevelsLong", ks[:], values, addresses, trieModifications) +} + +func TestExtensionOneKeyByteSel1(t *testing.T) { + ks := [...]common.Hash{ + common.HexToHash("0x11"), + common.HexToHash("0x12"), + common.HexToHash("0x21"), + common.HexToHash("0x31"), + common.HexToHash("0x32"), + common.HexToHash("0x33"), + common.HexToHash("0x34"), + common.HexToHash("0x35"), + common.HexToHash("0x36"), + common.HexToHash("0x37"), + common.HexToHash("0x38"), // + common.HexToHash("0x39"), + common.HexToHash("0x40"), + common.HexToHash("0x42"), + common.HexToHash("0x43"), + common.HexToHash("0x44"), + common.HexToHash("0x45"), + common.HexToHash("0x46"), + common.HexToHash("0x47"), + common.HexToHash("0x48"), + common.HexToHash("0x50"), + common.HexToHash("0x51"), + common.HexToHash("0x52"), + common.HexToHash("0x53"), + common.HexToHash("0x54"), + common.HexToHash("0x55"), + common.HexToHash("0x56"), + + common.HexToHash("0x61"), // extension + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef21508183c2b63c57957826df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := ks[len(ks)-1] + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionOneKeyByteSel1", ks[:], values, addresses, trieModifications) +} + +func TestExtensionAddedOneKeyByteSel1(t *testing.T) { + a := 1 + b := 1 + h := fmt.Sprintf("0x%d%d", a, b) + ks := []common.Hash{common.HexToHash(h)} + for i := 0; i < 33; i++ { + // just some values to get the added branch in second level (found out trying different values) + if i%2 == 0 { + a += 1 + } else { + b += 1 + } + h := fmt.Sprintf("0x%d%d", a, b) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x50efbf12580138bc263c95757826df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0x1818") + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionAddedOneKeyByteSel1", ks[:], values, addresses, trieModifications) +} + +func TestExtensionDeletedOneKeyByteSel1(t *testing.T) { + a := 1 + b := 1 + h := fmt.Sprintf("0x%d%d", a, b) + ks := []common.Hash{common.HexToHash(h)} + for i := 0; i < 33; i++ { + // just some values to get the added branch in second level (found out trying different values) + if i%2 == 0 { + a += 1 + } else { + b += 1 + } + h := fmt.Sprintf("0x%d%d", a, b) + ks = append(ks, common.HexToHash(h)) + } + toBeModified := common.HexToHash("0x1818") + ks = append(ks, toBeModified) + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x50efbf12580138bc263c95757826df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + val := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionDeletedOneKeyByteSel1", ks[:], values, addresses, trieModifications) +} + +func TestExtensionOneKeyByteSel2(t *testing.T) { + a := 0 + h := fmt.Sprintf("0xca%d", a) + ks := []common.Hash{common.HexToHash(h)} + for i := 0; i < 876; i++ { + a += 1 + h := fmt.Sprintf("0xca%d", a) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef2150818c32b36c57957226df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0xca644") + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionOneKeyByteSel2", ks[:], values, addresses, trieModifications) +} + +func TestExtensionAddedOneKeyByteSel2(t *testing.T) { + a := 0 + h := fmt.Sprintf("0xca%d", a) + ks := []common.Hash{common.HexToHash(h)} + toBeModifiedStr := "0xca644" + toBeModified := common.HexToHash(toBeModifiedStr) + for i := 0; i < 876; i++ { + a += 1 + h := fmt.Sprintf("0xca%d", a) + if h == toBeModifiedStr { + continue + } + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef2150818c32b36c57957226df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionAddedOneKeyByteSel2", ks[:], values, addresses, trieModifications) +} + +func TestExtensionDeletedOneKeyByteSel2(t *testing.T) { + a := 0 + h := fmt.Sprintf("0xca%d", a) + ks := []common.Hash{common.HexToHash(h)} + toBeModifiedStr := "0xca644" + toBeModified := common.HexToHash(toBeModifiedStr) + for i := 0; i < 876; i++ { + a += 1 + h := fmt.Sprintf("0xca%d", a) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef2150818c32b36c57957226df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + val := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionDeletedOneKeyByteSel2", ks[:], values, addresses, trieModifications) +} + +func TestExtensionTwoKeyBytesSel1(t *testing.T) { + // Extension node which has key longer than 1 (2 in this test). This is needed because RLP takes + // different positions. + // Key length > 1 (130 means there are two bytes for key; 160 means there are 32 hash values after it): + // [228 130 0 149 160 ... + // Key length = 1 (no byte specifying the length of key): + // [226 16 160 ... + a := 0 + h := fmt.Sprintf("0x%d", a) + ks := []common.Hash{common.HexToHash(h)} + for i := 0; i < 176; i++ { + // just some values to get the extension with key length > 1 (found out trying different values) + a += 1 + h := fmt.Sprintf("0x%d", a) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef21508183c2b63c59757826df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0x172") + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionTwoKeyBytesSel1", ks[:], values, addresses, trieModifications) +} + +func TestExtensionAddedTwoKeyBytesSel1(t *testing.T) { + a := 0 + h := fmt.Sprintf("0x%d", a) + ks := []common.Hash{common.HexToHash(h)} + toBeModifiedStr := "0x172" + toBeModified := common.HexToHash(toBeModifiedStr) + for i := 0; i < 176; i++ { + // just some values to get the extension with key length > 1 (found out trying different values) + a += 1 + h := fmt.Sprintf("0x%d", a) + if h == toBeModifiedStr { + continue + } + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef21508183c2b63c59757826df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionAddedTwoKeyBytesSel1", ks[:], values, addresses, trieModifications) +} + +func TestExtensionDeletedTwoKeyBytesSel1(t *testing.T) { + a := 0 + h := fmt.Sprintf("0x%d", a) + ks := []common.Hash{common.HexToHash(h)} + toBeModifiedStr := "0x172" + toBeModified := common.HexToHash(toBeModifiedStr) + for i := 0; i < 176; i++ { + // just some values to get the extension with key length > 1 (found out trying different values) + a += 1 + h := fmt.Sprintf("0x%d", a) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef21508183c2b63c59757826df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + val := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionDeletedTwoKeyBytesSel1", ks[:], values, addresses, trieModifications) +} + +func TestExtensionTwoKeyBytesSel2(t *testing.T) { + a := 0 + h := fmt.Sprintf("0x2ea%d", a) + ks := []common.Hash{common.HexToHash(h)} + for i := 0; i < 876; i++ { + a += 1 + h := fmt.Sprintf("0x2ea%d", a) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef2150818c32b36c57957226df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0x2ea772") + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionTwoKeyBytesSel2", ks[:], values, addresses, trieModifications) +} + +func TestExtensionAddedTwoKeyBytesSel2(t *testing.T) { + a := 0 + h := fmt.Sprintf("0x2ea%d", a) + ks := []common.Hash{common.HexToHash(h)} + toBeModifiedStr := "0x2ea772" + toBeModified := common.HexToHash(toBeModifiedStr) + for i := 0; i < 876; i++ { + a += 1 + h := fmt.Sprintf("0x2ea%d", a) + if h == toBeModifiedStr { + continue + } + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef2150818c32b36c57957226df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionAddedTwoKeyBytesSel2", ks[:], values, addresses, trieModifications) +} + +func TestExtensionDeletedTwoKeyBytesSel2(t *testing.T) { + a := 0 + h := fmt.Sprintf("0x2ea%d", a) + ks := []common.Hash{common.HexToHash(h)} + toBeModifiedStr := "0x2ea772" + toBeModified := common.HexToHash(toBeModifiedStr) + for i := 0; i < 876; i++ { + a += 1 + h := fmt.Sprintf("0x2ea%d", a) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef2150818c32b36c57957226df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + val := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionDeletedTwoKeyBytesSel2", ks[:], values, addresses, trieModifications) +} + +func TestExtensionInFirstStorageLevel(t *testing.T) { + ks := []common.Hash{common.HexToHash("0x12")} + + for i := 0; i < 10; i++ { + h := fmt.Sprintf("0x%d", i) + ks = append(ks, common.HexToHash(h)) + } + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75fbef2150818c32b36c57957226df4e24eb81c9") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + toBeModified := common.HexToHash("0x1") + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("ExtensionInFirstStorageLevel", ks[:], values, addresses, trieModifications) +} + +func TestExtensionInFirstStorageLevelOneKeyByte(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + key1 := common.HexToHash("0x12") + val1 := common.BigToHash(big.NewInt(int64(1))) + + statedb.SetState(addr, key1, val1) + + h := fmt.Sprintf("0x%d", 1) + key2 := common.HexToHash(h) + statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + + toBeModified := common.HexToHash("0x1") + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtensionInFirstStorageLevelOneKeyByte", trieModifications, statedb) +} + +func TestExtensionAddedInFirstStorageLevelOneKeyByte(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + key1 := common.HexToHash("0x12") + val1 := common.BigToHash(big.NewInt(int64(1))) + + statedb.SetState(addr, key1, val1) + + toBeModified := common.HexToHash("0x1") + // statedb.SetState(addr, toBeModified, val1) + statedb.IntermediateRoot(false) + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtensionAddedInFirstStorageLevelOneKeyByte", trieModifications, statedb) +} + +func TestExtensionInFirstStorageLevelTwoKeyBytes(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + key1 := common.HexToHash("0x12") + val1 := common.BigToHash(big.NewInt(int64(1))) + + statedb.SetState(addr, key1, val1) + + toBeModified := common.HexToHash("0xa617") + statedb.SetState(addr, toBeModified, val1) + statedb.IntermediateRoot(false) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtensionInFirstStorageLevelTwoKeyBytes", trieModifications, statedb) +} + +func TestExtensionAddedInFirstStorageLevelTwoKeyBytes(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + key1 := common.HexToHash("0x12") + val1 := common.BigToHash(big.NewInt(int64(1))) + + statedb.SetState(addr, key1, val1) + + toBeModified := common.HexToHash("0xa617") + // statedb.SetState(addr, toBeModified, val1) + statedb.IntermediateRoot(false) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtensionAddedInFirstStorageLevelTwoKeyBytes", trieModifications, statedb) +} + +func TestExtensionThreeKeyBytesSel2(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50feb1f2580138bc623c97557286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + for i := 0; i < 14; i++ { + h := fmt.Sprintf("0x%d", i) + key2 := common.HexToHash(h) + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, key2, val1) + } + + toBeModified := common.HexToHash("0x13234") + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, toBeModified, val1) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtensionThreeKeyBytesSel2", trieModifications, statedb) +} + +func TestExtensionAddedThreeKeyBytesSel2(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50feb1f2580138bc623c97557286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + for i := 0; i < 14; i++ { + h := fmt.Sprintf("0x%d", i) + key2 := common.HexToHash(h) + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, key2, val1) + } + + toBeModified := common.HexToHash("0x13234") + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtensionAddedThreeKeyBytesSel2", trieModifications, statedb) +} + +func TestExtensionDeletedThreeKeyBytesSel2(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50feb1f2580138bc623c97557286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + for i := 0; i < 14; i++ { + h := fmt.Sprintf("0x%d", i) + key2 := common.HexToHash(h) + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, key2, val1) + } + + toBeModified := common.HexToHash("0x13234") + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, toBeModified, val1) + + val := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtensionDeletedThreeKeyBytesSel2", trieModifications, statedb) +} + +func TestExtensionThreeKeyBytes(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50fbe1f25aa0843b623c97557286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + for i := 0; i < 140; i++ { + h := fmt.Sprintf("0x%d", i) + key2 := common.HexToHash(h) + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, key2, val1) + } + + // Let's get a key which makes extension node at the first level. + // (set the breakpoint in trie.go, line 313) + for i := 0; i < 1000; i++ { + h := fmt.Sprintf("0x2111d%d", i) + key2 := common.HexToHash(h) + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + + // v := common.Hash{} // empty value deletes the key + // statedb.SetState(addr, key2, v) + } + + toBeModified := common.HexToHash("0x333") + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: toBeModified, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtensionThreeKeyBytes", trieModifications, statedb) +} + +func TestOnlyLeafInStorageProof(t *testing.T) { + blockNum := 14209217 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + statedb.DisableLoadingRemoteAccounts() + + h := fmt.Sprintf("0x%d", 0) + addr := common.HexToAddress(h) + // statedb.IntermediateRoot(false) + statedb.CreateAccount(addr) + + accountProof, _, _, _, err := statedb.GetProof(addr) + fmt.Println(len(accountProof)) + check(err) + + h = fmt.Sprintf("0x2111d%d", 0) + key2 := common.HexToHash(h) + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + + // storageProof, _, _, _, err := statedb.GetStorageProof(addr, key2) + // check(err) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key2, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("OnlyLeafInStorageProof", trieModifications, statedb) +} + +func TestStorageLeafInFirstLevelAfterPlaceholder(t *testing.T) { + blockNum := 14209217 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + statedb.DisableLoadingRemoteAccounts() + + h := fmt.Sprintf("0x%d", 0) + addr := common.HexToAddress(h) + // statedb.IntermediateRoot(false) + statedb.CreateAccount(addr) + + accountProof, _, _, _, err := statedb.GetProof(addr) + fmt.Println(len(accountProof)) + check(err) + + h1 := fmt.Sprintf("0x2111d%d", 0) + key1 := common.HexToHash(h1) + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, key1, val1) + statedb.IntermediateRoot(false) + + // storageProof, _, _, _, err := statedb.GetStorageProof(addr, key2) + // check(err) + + h2 := fmt.Sprintf("0x2111%d", 0) + key2 := common.HexToHash(h2) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key2, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("StorageLeafInFirstLevelAfterPlaceholder", trieModifications, statedb) +} + +func TestLeafAddedToEmptyTrie(t *testing.T) { + blockNum := 14209217 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + statedb.DisableLoadingRemoteAccounts() + + h := fmt.Sprintf("0x%d", 0) + addr := common.HexToAddress(h) + // statedb.IntermediateRoot(false) + statedb.CreateAccount(addr) + + accountProof, _, _, _, err := statedb.GetProof(addr) + fmt.Println(len(accountProof)) + check(err) + + // emptyTrieHash := statedb.StorageTrie(addr).Hash() + // fmt.Println(emptyTrieHash.Bytes()) + + h = fmt.Sprintf("0x2111d%d", 0) + key2 := common.HexToHash(h) + // val1 := common.BigToHash(big.NewInt(int64(1))) + // statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + + // storageProof, _, _, _, err := statedb.GetStorageProof(addr, key2) + // check(err) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key2, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("LeafAddedToEmptyTrie", trieModifications, statedb) +} + +func TestDeleteToEmptyTrie(t *testing.T) { + blockNum := 14209217 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + statedb.DisableLoadingRemoteAccounts() + + h := fmt.Sprintf("0x%d", 0) + addr := common.HexToAddress(h) + // statedb.IntermediateRoot(false) + statedb.CreateAccount(addr) + + accountProof, _, _, _, err := statedb.GetProof(addr) + fmt.Println(len(accountProof)) + check(err) + + h = fmt.Sprintf("0x2111d%d", 0) + key2 := common.HexToHash(h) + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + + // storageProof, _, _, _, err := statedb.GetStorageProof(addr, key2) + // check(err) + + val := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageChanged, + Key: key2, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("DeleteToEmptyTrie", trieModifications, statedb) +} + +func TestUpdateTwoModifications(t *testing.T) { + ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0xaaaccf12580138bc2bbceeeaa111df4e42ab81ff") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + v1 := common.BigToHash(big.NewInt(int64(17))) + v2 := common.BigToHash(big.NewInt(int64(17))) + + trieMod1 := TrieModification{ + Type: StorageChanged, + Key: ks[0], + Value: v1, + Address: addr, + } + trieMod2 := TrieModification{ + Type: StorageChanged, + Key: ks[1], + Value: v2, + Address: addr, + } + + trieModifications := []TrieModification{trieMod1, trieMod2} + + updateStateAndPrepareWitness("UpdateTwoModifications", ks[:], values, addresses, trieModifications) +} + +func TestNonceModCShort(t *testing.T) { + blockNum := 14766377 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x68D5a6E78BD8734B7d190cbD98549B72bFa0800B") + + trieMod := TrieModification{ + Type: NonceChanged, + Nonce: 33, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("NonceModCShort", trieModifications, statedb) +} + +func TestNonceModCLong(t *testing.T) { + blockNum := 14766377 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x68D5a6E78BD8734B7d190cbD98549B72bFa0800B") + + trieMod := TrieModification{ + Type: NonceChanged, + Nonce: 142, // for long needs to be >= 128 + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("NonceModCLong", trieModifications, statedb) +} + +func TestBalanceModCShort(t *testing.T) { + blockNum := 14766377 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x68D5a6E78BD8734B7d190cbD98549B72bFa0800B") + + trieMod := TrieModification{ + Type: BalanceChanged, + Balance: big.NewInt(98), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("BalanceModCShort", trieModifications, statedb) +} + +func TestBalanceModCLong(t *testing.T) { + blockNum := 14766377 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x68D5a6E78BD8734B7d190cbD98549B72bFa0800B") + + trieMod := TrieModification{ + Type: BalanceChanged, + Balance: big.NewInt(439), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("BalanceModCLong", trieModifications, statedb) +} + +func TestAddAccount(t *testing.T) { + blockNum := 1 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + addr := common.HexToAddress("0xaaaccf12580138bc2bbceeeaa111df4e42ab81ab") + statedb.IntermediateRoot(false) + + trieMod := TrieModification{ + Address: addr, + Type: AccountCreate, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AddAccount", trieModifications, statedb) +} + +func TestDeleteAccount(t *testing.T) { + blockNum := 1 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + addr := common.HexToAddress("0xaaaccf12580138bc2bbceeeaa111df4e42ab81ab") + statedb.CreateAccount(addr) + statedb.IntermediateRoot(false) + + trieMod := TrieModification{ + Address: addr, + Type: AccountDestructed, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("DeleteAccount", trieModifications, statedb) +} + +func TestImplicitlyCreateAccountWithNonce(t *testing.T) { + // When there is a change in an account that does not exist, a placeholder account leaf is added + // as a witness. The last branch contains information about the leaf at `modified_node` being just + // a placeholder and the circuit ensures that when the leaf is a placeholder, the branch (last branch) + // children at `modified_node` is nil. + blockNum := 1 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + addr := common.HexToAddress("0xaabccf12580138bc2bbceeeaa111df4e42ab81ab") + + trieMod := TrieModification{ + Type: NonceChanged, + Nonce: 142, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ImplicitlyCreateAccountWithNonce", trieModifications, statedb) +} + +func TestImplicitlyCreateAccountWithBalance(t *testing.T) { + blockNum := 1 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + addr := common.HexToAddress("0xaabccf12580138bc2bbceeeaa111df4e42ab81ab") + + trieMod := TrieModification{ + Type: BalanceChanged, + Balance: big.NewInt(7), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ImplicitlyCreateAccountWithBalance", trieModifications, statedb) +} + +func TestImplicitlyCreateAccountWithCodeHash(t *testing.T) { + blockNum := 1 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + addr := common.HexToAddress("0xaabccf12580138bc2bbceeeaa111df4e42ab81ab") + codeHash := []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + + trieMod := TrieModification{ + Type: BalanceChanged, + CodeHash: codeHash, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ImplicitlyCreateAccountWithCodeHash", trieModifications, statedb) +} + +func TestAccountAddPlaceholderBranch(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + // We need an account that doesn't exist yet. + i := 21 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + trieMod := TrieModification{ + Type: BalanceChanged, // implicitly creating account + Balance: big.NewInt(23), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AccountAddPlaceholderBranch", trieModifications, statedb) +} + +func TestAccountDeletePlaceholderBranch(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 21 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + statedb.CreateAccount(addr) + + trieMod := TrieModification{ + Type: AccountDestructed, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AccountDeletePlaceholderBranch", trieModifications, statedb) +} + +func TestAccountAddPlaceholderExtension(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + // We need an account that doesn't exist yet. + i := 40 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + trieMod := TrieModification{ + Type: BalanceChanged, // implicitly creating account + Balance: big.NewInt(23), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AccountAddPlaceholderExtension", trieModifications, statedb) +} + +func TestAccountDeletePlaceholderExtension(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 40 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + statedb.CreateAccount(addr) + + trieMod := TrieModification{ + Type: AccountDestructed, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AccountDeletePlaceholderExtension", trieModifications, statedb) +} + +// Branch has nil at the specified address. +func TestNonExistingAccountNilObject(t *testing.T) { + // At the account address, there is a nil object. + blockNum := 1 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + addr := common.HexToAddress("0xaaaccf12580138bc2bbceeeaa111df4e42ab81ab") + statedb.IntermediateRoot(false) + + trieMod := TrieModification{ + Address: addr, + Type: AccountDoesNotExist, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("NonExistingAccountNilObject", trieModifications, statedb) +} + +// Branch has a leaf at the specified address (not really at the specified address, but at the one +// that partially overlaps with the specified one). +func TestNonExistingAccount(t *testing.T) { + // The leaf is returned that doesn't have the required address - but the two addresses overlaps in all nibbles up to + // to the position in branch. + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 21 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + trieMod := TrieModification{ + Address: addr, + Type: AccountDoesNotExist, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("NonExistingAccount", trieModifications, statedb) +} + +// Account proof after placeholder branch deeper in the trie (branch placeholder not in the +// first or second level). +func TestAccountBranchPlaceholderDeeper(t *testing.T) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + h := fmt.Sprintf("0xa21%d", 0) + addr := common.HexToAddress(h) + // Implicitly create account such that the account from the first level will be + // replaced by a branch. + trieMod := TrieModification{ + Type: BalanceChanged, + Balance: big.NewInt(23), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitnessSpecial("AccountBranchPlaceholderDeeper", trieModifications, statedb, 0) +} + +func TestLeafInLastLevel(t *testing.T) { + /* + We have an extension node as a root. This extension node key in compact form + is an array of length 32 (160 - 128): 16, 0, 0, ..., 0. + That means 63 nibbles that are all zero: 0 (16 - 16), 0, ..., 0. + The last nibble of key1 (1) and key2 (3) presents the position in branch. + In this case, in a leaf, there is only one key byte: 32. + + storageProof[0] + []uint8 len: 56, cap: 56, [247,160,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,213,128,194,32,1,128,194,32,1,128,128,128,128,128,128,128,128,128,128,128,128,128] + storageProof[1] + []uint8 len: 22, cap: 22, [213,128,194,32,1,128,194,32,1,128,128,128,128,128,128,128,128,128,128,128,128,128] + storageProof[2] + []uint8 len: 3, cap: 3, [194,32,1] + + Other examples: + last level, long value + []uint8 len: 36, cap: 36, [227,32,161,160,187,239,170,18,88,1,56,188,38,60,149,117,120,38,223,78,36,235,129,201,170,170,170,170,170,170,170,170,170,170,170,170] + not last level, short value + []uint8 len: 5, cap: 5, [196,130,32,0,1] + + Note: the "normal" leaf looks like: + short: + [226,160,59,138,106,70,105,186,37,13,38,205,122,69,158,202,157,33,95,131,7,227,58,235,229,3,121,188,90,54,23,236,52,68,1] + + long: + [248,67,160,59,138,106,70,105,186,37,13,38,205,122,69,158,202,157,33,95,131,7,227,58,235,229,3,121,188,90,54,23,236,52,68,161,160,... + */ + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + statedb.CreateAccount(addr) + + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + key1 := common.HexToHash("0x1") + val1 := common.BigToHash(big.NewInt(int64(1))) + + statedb.SetState(addr, key1, val1) + + key2 := common.HexToHash("0x3") + statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + /* + The two keys are the same except in the last nibble: + key1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1] + key2 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3] + */ + + storageProof, _, _, _, err := statedb.GetStorageProof(addr, key1) + check(err) + + fmt.Println(storageProof[0]) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key1, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("LeafInLastLevel", trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +func TestLeafWithOneNibble(t *testing.T) { + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + statedb.CreateAccount(addr) + + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + key1 := common.HexToHash("0x10") + val1 := common.BigToHash(big.NewInt(int64(1))) + + statedb.SetState(addr, key1, val1) + + key2 := common.HexToHash("0x30") + statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + + storageProof, _, _, _, err := statedb.GetStorageProof(addr, key1) + check(err) + + fmt.Println(storageProof[0]) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key1, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("LeafWithOneNibble", trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +/* +storageProof[0] +[]uint8 len: 56, cap: 56, [247,149,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,160,131,32,100,192,26,10,249,123,103,55,126,227,156,51,43,248,141,13,184,86,199,239,167,52,34,242,212,138,29,106,251,72] +storageProof[1] +[]uint8 len: 46, cap: 46, [237,128,206,140,48,0,0,0,0,0,0,0,0,0,0,0,17,128,206,140,48,0,0,0,0,0,0,0,0,0,0,0,17,128,128,128,128,128,128,128,128,128,128,128,128,128] +storageProof[2] +[]uint8 len: 15, cap: 15, [206,140,48,0,0,0,0,0,0,0,0,0,0,0,17] +*/ +func TestLeafWithMoreNibbles(t *testing.T) { + // non-hashed leaf, leaf not in last level + + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + statedb.CreateAccount(addr) + + oracle.PreventHashingInSecureTrie = true + + // Let us make the extension node shorter than 55 (although this than causes branch to be hashed): + key1 := common.HexToHash("0x100000000000000000000000") + + val1 := common.BigToHash(big.NewInt(int64(17))) + + statedb.SetState(addr, key1, val1) + + key2 := common.HexToHash("0x300000000000000000000000") + statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + + storageProof, _, _, _, err := statedb.GetStorageProof(addr, key1) + check(err) + + fmt.Println(storageProof[0]) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key1, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("LeafWithMoreNibbles", trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +// Note: this requires MockProver with config param 11 +/* +func TestNonHashedBranchInBranch(t *testing.T) { + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + statedb.CreateAccount(addr) + + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + val1 := common.BigToHash(big.NewInt(int64(1))) + + key1Hex := "0x1000000000000000000000000000000000000000000000000000000000000000" + key2Hex := "0x2000000000000000000000000000000000000000000000000000000000000000" + key1 := common.HexToHash(key1Hex) + key2 := common.HexToHash(key2Hex) + fmt.Println(key2) + + statedb.SetState(addr, key2, val1) + + iters := 57 // making the last branch shorter than 32 bytes + for i := 0; i < iters; i++ { + fmt.Println("====") + fmt.Println(key1) + + statedb.SetState(addr, key1, val1) + + if i == iters - 1 { + break + } + + key1Hex = replaceAtIndex(key1Hex, 49, i + 3) // 49 is 1, 50 is 2, ... + key1 = common.HexToHash(key1Hex) + } + + statedb.IntermediateRoot(false) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key1, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + GenerateProof("NonHashedBranchInBranch", trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +func replaceAtIndex(in string, r rune, i int) string { + out := []rune(in) + out[i] = r + return string(out) +} + +// Note: this requires MockProver with config param 11 +func TestNonHashedExtensionNodeInBranch(t *testing.T) { + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + statedb.CreateAccount(addr) + + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + val1 := common.BigToHash(big.NewInt(int64(1))) + + key1Hex := "0x1000000000000000000000000000000000000000000000000000000000000000" + key2Hex := "0x2000000000000000000000000000000000000000000000000000000000000000" + key1 := common.HexToHash(key1Hex) + key2 := common.HexToHash(key2Hex) + fmt.Println(key2) + + statedb.SetState(addr, key2, val1) + + iters := 58 // make the extension node shorter than 32 + for i := 0; i < iters; i++ { + statedb.SetState(addr, key1, val1) + + if i == iters - 1 { + break + } + + makeExtension := false + if i == iters - 2 { + makeExtension = true + } + + if !makeExtension { + key1Hex = replaceAtIndex(key1Hex, 49, i + 3) + } else { + key1Hex = replaceAtIndex(key1Hex, 49, i + 1 + 3) + } + + key1 = common.HexToHash(key1Hex) + } + + statedb.IntermediateRoot(false) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key1, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + GenerateProof("NonHashedExtensionNodeInBranch", trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +// Note: this requires MockProver with config param 11 +func TestNonHashedExtensionNodeInBranchTwoNibbles(t *testing.T) { + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + statedb.CreateAccount(addr) + + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + val1 := common.BigToHash(big.NewInt(int64(1))) + + key1Hex := "0x1000000000000000000000000000000000000000000000000000000000000000" + key2Hex := "0x2000000000000000000000000000000000000000000000000000000000000000" + key1 := common.HexToHash(key1Hex) + key2 := common.HexToHash(key2Hex) + fmt.Println(key2) + + statedb.SetState(addr, key2, val1) + + iters := 58 // make the extension node shorter than 32 + for i := 0; i < iters; i++ { + statedb.SetState(addr, key1, val1) + + if i == iters - 1 { + break + } + + makeExtension := false + if i == iters - 2 { + makeExtension = true + } + + if !makeExtension { + key1Hex = replaceAtIndex(key1Hex, 49, i + 3) + } else { + key1Hex = replaceAtIndex(key1Hex, 49, i + 2 + 3) // +2 to have two nibbles + } + + key1 = common.HexToHash(key1Hex) + } + + statedb.IntermediateRoot(false) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key1, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + GenerateProof("NonHashedExtensionNodeInBranchTwoNibbles", trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} +*/ + +func TestBranchAfterExtNode(t *testing.T) { + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x40efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + statedb.CreateAccount(addr) + + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + key1Hex := "0x1000000000000000000000000" + key2Hex := "0x2000000000000000000000000" + key1 := common.HexToHash(key1Hex) + key2 := common.HexToHash(key2Hex) + + val1 := common.BigToHash(big.NewInt(int64(111))) + val2 := common.BigToHash(big.NewInt(int64(222))) + + statedb.SetState(addr, key1, val1) + fmt.Println(key1) + + statedb.SetState(addr, key2, val2) + fmt.Println(key2) + + statedb.IntermediateRoot(false) + + key1Hex += "1" + key3 := common.HexToHash(key1Hex) + statedb.SetState(addr, key3, val2) + fmt.Println(key3) + + statedb.IntermediateRoot(false) + + val := common.BigToHash(big.NewInt(int64(17))) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key1, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("BranchAfterExtNode", trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +func TestNonExistingStorage(t *testing.T) { + ks := [...]common.Hash{common.HexToHash("0x11"), common.HexToHash("0x12")} + // hexed keys: + // [3,1,14,12,12,... + // [11,11,8,10,6,... + // First we have a branch with children at position 3 and 11. + // ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75acef12a01883c2b3fc57957826df4e24e8baaa") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + // This key is not in the trie yet, its nibbles: + // [3,10,6,3,5,7,... + trieMod := TrieModification{ + Type: StorageDoesNotExist, + Key: common.HexToHash("0x21"), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("NonExistingStorage", ks[:], values, addresses, trieModifications) +} + +func TestNonExistingStorageLong(t *testing.T) { + ks := [...]common.Hash{common.HexToHash("0x11"), common.HexToHash("0x12")} + + var values []common.Hash + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + v2 := common.BytesToHash(v1) + for i := 0; i < len(ks); i++ { + values = append(values, v2) + } + addr := common.HexToAddress("0x75acef12a01883c2b3fc57957826df4e24e8b19c") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + trieMod := TrieModification{ + Type: StorageDoesNotExist, + Key: common.HexToHash("0x21"), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("NonExistingStorageLong", ks[:], values, addresses, trieModifications) +} + +func TestNonExistingStorageNil(t *testing.T) { + // Nil in branch. + ks := [...]common.Hash{common.HexToHash("0x11"), common.HexToHash("0x12")} + // hexed keys: + // [3,1,14,12,12,... + // [11,11,8,10,6,... + // First we have a branch with children at position 3 and 11. + // ks := [...]common.Hash{common.HexToHash("0x12"), common.HexToHash("0x21")} + + var values []common.Hash + for i := 0; i < len(ks); i++ { + values = append(values, common.BigToHash(big.NewInt(int64(i+1)))) // don't put 0 value because otherwise nothing will be set (if 0 is prev value), see state_object.go line 279 + } + addr := common.HexToAddress("0x75acef12a01883c2b3fc57957826df4e24e8baaa") + var addresses []common.Address + for i := 0; i < len(ks); i++ { + addresses = append(addresses, addr) + } + + // This key is not in the trie yet, its nibbles: + // [3,10,6,3,5,7,... + trieMod := TrieModification{ + Type: StorageDoesNotExist, + Key: common.HexToHash("0x22"), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + updateStateAndPrepareWitness("NonExistingStorageNil", ks[:], values, addresses, trieModifications) +} diff --git a/mpt-witness-generator/witness/gen_witness_from_local_blockchain_test.go b/mpt-witness-generator/witness/gen_witness_from_local_blockchain_test.go new file mode 100644 index 0000000000..aab3b97734 --- /dev/null +++ b/mpt-witness-generator/witness/gen_witness_from_local_blockchain_test.go @@ -0,0 +1,815 @@ +package witness + +import ( + "fmt" + "math/big" + "os" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/privacy-scaling-explorations/mpt-witness-generator/oracle" + "github.com/privacy-scaling-explorations/mpt-witness-generator/state" +) + +func SkipIfNoGeth(t *testing.T) { + if os.Getenv("NO_GETH") != "" { + t.Skip("Skipping test that requires geth") + } +} + +func TestNonExistingAccountNilObjectInFirstLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 21 + h := fmt.Sprintf("0x%d", i) + // There is one branch in the state trie, but at this address there is only a nil object: + addr := common.HexToAddress(h) + + trieMod := TrieModification{ + Address: addr, + Type: AccountDoesNotExist, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("NonExistingAccountNilObjectInFirstLevel", trieModifications, statedb) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestNonExistingAccountInFirstLevel(t *testing.T) { + SkipIfNoGeth(t) + // Only one element in the trie - the account with "wrong" address. + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 10 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + trieMod := TrieModification{ + Type: AccountDoesNotExist, + Balance: big.NewInt(23), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitnessSpecial("NonExistingAccountInFirstLevel", trieModifications, statedb, 4) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestNonExistingAccountAfterFirstLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 22 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + trieMod := TrieModification{ + Address: addr, + Type: AccountDoesNotExist, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("NonExistingAccountAfterFirstLevel", trieModifications, statedb) + + oracle.NodeUrl = oracle.RemoteUrl +} + +// Account leaf after one branch. No storage proof. +func TestAccountAfterFirstLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 21 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + trieMod := TrieModification{ + Type: BalanceChanged, + Balance: big.NewInt(23), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AccountAfterFirstLevel", trieModifications, statedb) + + oracle.NodeUrl = oracle.RemoteUrl +} + +// Account leaf in first level in C proof, placeholder leaf in S proof. No storage proof. +func TestAccountInFirstLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 21 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + trieMod := TrieModification{ + Type: NonceChanged, + Balance: big.NewInt(23), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitnessSpecial("AccountInFirstLevel", trieModifications, statedb, 1) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestAccountExtensionInFirstLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + h := fmt.Sprintf("0xa21%d", 0) + addr := common.HexToAddress(h) + found := false + for i := 0; i < 100000; i++ { + h := fmt.Sprintf("0xa21%d", i) + addr = common.HexToAddress(h) + + statedb.CreateAccount(addr) + statedb.IntermediateRoot(false) + + oracle.PrefetchAccount(statedb.Db.BlockNumber, addr, nil) + proof1, _, _, _, err := statedb.GetProof(addr) + check(err) + + for j := 0; j < len(proof1)-1; j++ { + if proof1[j][0] < 248 { // searching extension node + found = true + } + } + + if found { + break + } + } + + trieMod := TrieModification{ + Type: NonceChanged, + Balance: big.NewInt(23), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitnessSpecial("AccountExtensionInFirstLevel", trieModifications, statedb, 5) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestAccountBranchPlaceholder(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + h := fmt.Sprintf("0xab%d", 0) + addr := common.HexToAddress(h) + // Implicitly create account such that the account from the first level will be + // replaced by a branch. + trieMod := TrieModification{ + Type: NonceChanged, + Balance: big.NewInt(23), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AccountBranchPlaceholder", trieModifications, statedb) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestAccountBranchPlaceholderInFirstLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + /* + for i := 0; i < 100000; i++ { + h := fmt.Sprintf("0xa21%d", i) + addr := common.HexToAddress(h) + + oracle.PrefetchAccount(statedb.Db.BlockNumber, addr, nil) + proof1, _, _, _, err := statedb.GetProof(addr) + check(err) + + statedb.CreateAccount(addr) + statedb.IntermediateRoot(false) + + // addrHash1 := crypto.Keccak256Hash(addr.Bytes()) + // addrHash1[31] = (addrHash1[31] + 1) % 255 // just some change + + // oracle.PrefetchAccount(statedb.Db.BlockNumber, addr1, nil) + // proof11, _, _, err := statedb.GetProofByHash(common.BytesToHash(addrHash1.Bytes())) + + statedb.CreateAccount(addr) + statedb.IntermediateRoot(false) + + proof2, _, _, _, err := statedb.GetProof(addr) + check(err) + if len(proof1) + 1 == len(proof2) && len(proof1) == 1 { + elems, _, err := rlp.SplitList(proof1[len(proof1)-1]) + if err != nil { + fmt.Println("decode error", err) + } + switch c, _ := rlp.CountValues(elems); c { + case 2: + fmt.Println("2") + case 17: + default: + fmt.Println("invalid number of list elements") + } + } + } + */ + + h := fmt.Sprintf("0xab%d", 0) + addr := common.HexToAddress(h) + // Implicitly create account such that the account from the first level will be + // replaced by a branch. + trieMod := TrieModification{ + Type: BalanceChanged, + Balance: big.NewInt(23), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitnessSpecial("AccountBranchPlaceholderInFirstLevel", trieModifications, statedb, 3) // don't use the same number as in the test above + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestStorageInFirstAccountInFirstLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 21 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + trieMod := TrieModification{ + Type: StorageChanged, + Key: common.HexToHash("0x12"), + Value: common.BigToHash(big.NewInt(int64(17))), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitnessSpecial("StorageInFirstAccountInFirstLevel", trieModifications, statedb, 1) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestExtensionTwoNibblesInEvenLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + h := fmt.Sprintf("0xa21%d", 0) + addr := common.HexToAddress(h) + found := false + for i := 0; i < 100000; i++ { + h := fmt.Sprintf("0xa21%d", i) + addr = common.HexToAddress(h) + + statedb.CreateAccount(addr) + statedb.IntermediateRoot(false) + + oracle.PrefetchAccount(statedb.Db.BlockNumber, addr, nil) + proof1, _, _, _, err := statedb.GetProof(addr) + check(err) + + for j := 0; j < len(proof1)-1; j++ { + if proof1[j][0] == 228 && proof1[j][1] == 130 && j%2 == 0 { + fmt.Println(proof1[j]) + found = true + } + } + + if found { + break + } + } + + trieMod := TrieModification{ + Type: NonceChanged, + Nonce: 33, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AccountExtensionTwoNibblesInEvenLevel", trieModifications, statedb) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestExtensionThreeNibblesInEvenLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + h := fmt.Sprintf("0xa21%d", 0) + addr := common.HexToAddress(h) + found := false + for i := 0; i < 100000; i++ { + h := fmt.Sprintf("0xa21%d", i) + addr = common.HexToAddress(h) + + statedb.CreateAccount(addr) + statedb.IntermediateRoot(false) + + oracle.PrefetchAccount(statedb.Db.BlockNumber, addr, nil) + proof1, _, _, _, err := statedb.GetProof(addr) + check(err) + + for j := 0; j < len(proof1)-1; j++ { + if proof1[j][0] == 228 && proof1[j][1] == 130 && j%2 == 1 { + fmt.Println(proof1[j]) + found = true + } + } + + if found { + break + } + } + + trieMod := TrieModification{ + Type: NonceChanged, + Nonce: 33, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AccountExtensionThreeNibblesInEvenLevel", trieModifications, statedb) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestExtensionThreeNibblesInOddLevel(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + h := fmt.Sprintf("0xa21%d", 0) + addr := common.HexToAddress(h) + found := false + for i := 0; i < 100000; i++ { + h := fmt.Sprintf("0xa21%d", i) + addr = common.HexToAddress(h) + + statedb.CreateAccount(addr) + statedb.IntermediateRoot(false) + + oracle.PrefetchAccount(statedb.Db.BlockNumber, addr, nil) + proof1, _, _, _, err := statedb.GetProof(addr) + check(err) + + for j := 0; j < len(proof1)-1; j++ { + if proof1[j][0] == 228 && proof1[j][1] == 130 && proof1[j][2] != 0 && j%2 == 0 { + fmt.Println(proof1[j]) + found = true + } + } + + if found { + break + } + } + + trieMod := TrieModification{ + Type: NonceChanged, + Nonce: 33, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("AccountExtensionThreeNibblesInOddLevel", trieModifications, statedb) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestStorageInFirstLevelNonExisting(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 21 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, common.HexToHash("0x11"), val1) + statedb.IntermediateRoot(false) + + trieMod := TrieModification{ + Type: StorageDoesNotExist, + Key: common.HexToHash("0x12"), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("StorageInFirstLevelNonExisting", trieModifications, statedb) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func TestStorageInFirstLevelNonExistingLong(t *testing.T) { + SkipIfNoGeth(t) + // geth --dev --http --ipcpath ~/Library/Ethereum/geth.ipc + oracle.NodeUrl = oracle.LocalUrl + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + i := 21 + h := fmt.Sprintf("0x%d", i) + addr := common.HexToAddress(h) + + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + val1 := common.BytesToHash(v1) + statedb.SetState(addr, common.HexToHash("0x11"), val1) + statedb.IntermediateRoot(false) + + trieMod := TrieModification{ + Type: StorageDoesNotExist, + Key: common.HexToHash("0x12"), + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("StorageInFirstLevelNonExistingLong", trieModifications, statedb) + + oracle.NodeUrl = oracle.RemoteUrl +} + +func ExtNodeInserted(key1, key2, key3 common.Hash, testName string) { + oracle.NodeUrl = oracle.LocalUrl + + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + statedb.CreateAccount(addr) + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + // make the value long to have a hashed branch + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + val1 := common.BytesToHash(v1) + statedb.SetState(addr, key1, val1) + + statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + + v1 = common.FromHex("0xbb") + val := common.BytesToHash(v1) + trieMod := TrieModification{ + Type: StorageChanged, + Key: key3, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness(testName, trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +func ExtNodeDeleted(key1, key2, key3 common.Hash, testName string) { + oracle.NodeUrl = oracle.LocalUrl + + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + statedb.CreateAccount(addr) + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + // make the value long to have a hashed branch + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + val1 := common.BytesToHash(v1) + statedb.SetState(addr, key1, val1) + statedb.SetState(addr, key2, val1) + statedb.SetState(addr, key3, val1) + + statedb.IntermediateRoot(false) + + val := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageChanged, + Key: key3, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness(testName, trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +func TestExtNodeInsertedBefore6After1FirstLevel(t *testing.T) { + t.Skip("To be fixed") + key1 := common.HexToHash("0x1234561000000000000000000000000000000000000000000000000000000000") + // key1 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 1 * 16, 0, ..., 0] + + key2 := common.HexToHash("0x1234563000000000000000000000000000000000000000000000000000000000") + // key2 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 3 * 16, 0, ..., 0] + // We now have an extension node with nibbles: [1, 2, 3, 4, 5, 6]. + + // The branch will be inserted at 0x1234, it will have nodes at 4 and 5: + key3 := common.HexToHash("0x1234400000000000000000000000000000000000000000000000000000000000") + + ExtNodeInserted(key1, key2, key3, "ExtNodeInsertedBefore6After1FirstLevel") +} + +/* +func TestExtNodeDeletedBefore6After1FirstLevel(t *testing.T) { + key1 := common.HexToHash("0x1234561000000000000000000000000000000000000000000000000000000000") + // key1 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 1 * 16, 0, ..., 0] + + key2 := common.HexToHash("0x1234563000000000000000000000000000000000000000000000000000000000") + // key2 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 3 * 16, 0, ..., 0] + // We now have an extension node with nibbles: [1, 2, 3, 4, 5, 6]. + + key3 := common.HexToHash("0x1234400000000000000000000000000000000000000000000000000000000000") + + ExtNodeDeleted(key1, key2, key3, "ExtNodeDeletedBefore6After1FirstLevel") +} + +func TestExtNodeInsertedBefore6After2FirstLevel(t *testing.T) { + key1 := common.HexToHash("0x1234561000000000000000000000000000000000000000000000000000000000") + // key1 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 1 * 16, 0, ..., 0] + + key2 := common.HexToHash("0x1234563000000000000000000000000000000000000000000000000000000000") + // key2 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 3 * 16, 0, ..., 0] + + key3 := common.HexToHash("0x1235400000000000000000000000000000000000000000000000000000000000") + // key3 bytes: [1 * 16 + 2, 3 * 16 + 5, 4 * 16 + 0, 0, ..., 0] + + ExtNodeInserted(key1, key2, key3, "ExtNodeInsertedBefore6After2FirstLevel") +} + +func TestExtNodeInsertedBefore6After4FirstLevel(t *testing.T) { + key1 := common.HexToHash("0x1234561000000000000000000000000000000000000000000000000000000000") + // key1 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 1 * 16, 0, ..., 0] + + key2 := common.HexToHash("0x1234563000000000000000000000000000000000000000000000000000000000") + // key2 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 3 * 16, 0, ..., 0] + + key3 := common.HexToHash("0x1635400000000000000000000000000000000000000000000000000000000000") + + ExtNodeInserted(key1, key2, key3, "ExtNodeInsertedBefore6After4FirstLevel") +} + +func TestExtNodeInsertedBefore5After3FirstLevel(t *testing.T) { + key1 := common.HexToHash("0x2345610000000000000000000000000000000000000000000000000000000000") + key2 := common.HexToHash("0x2345630000000000000000000000000000000000000000000000000000000000") + key3 := common.HexToHash("0x2635400000000000000000000000000000000000000000000000000000000000") + + ExtNodeInserted(key1, key2, key3, "ExtNodeInsertedBefore5After3FirstLevel") +} + +func TestExtNodeInsertedBefore5After2FirstLevel(t *testing.T) { + key1 := common.HexToHash("0x2345610000000000000000000000000000000000000000000000000000000000") + key2 := common.HexToHash("0x2345630000000000000000000000000000000000000000000000000000000000") + key3 := common.HexToHash("0x2335400000000000000000000000000000000000000000000000000000000000") + + ExtNodeInserted(key1, key2, key3, "ExtNodeInsertedBefore5After2FirstLevel") +} + +func TestExtNodeInsertedBefore5After1FirstLevel(t *testing.T) { + key1 := common.HexToHash("0x2345610000000000000000000000000000000000000000000000000000000000") + key2 := common.HexToHash("0x2345630000000000000000000000000000000000000000000000000000000000") + key3 := common.HexToHash("0x2343540000000000000000000000000000000000000000000000000000000000") + + ExtNodeInserted(key1, key2, key3, "ExtNodeInsertedBefore5After1FirstLevel") +} + +func TestExtNodeInsertedBefore4After1(t *testing.T) { + oracle.NodeUrl = oracle.LocalUrl + + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + statedb.CreateAccount(addr) + + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + val0 := common.BigToHash(big.NewInt(int64(1))) + key0 := common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000") + statedb.SetState(addr, key0, val0) + + key00 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") + statedb.SetState(addr, key00, val0) + + key1 := common.HexToHash("0x1234561000000000000000000000000000000000000000000000000000000000") + // key1 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 1 * 16, 0, ..., 0] + + // make the value long to have a hashed branch + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + val1 := common.BytesToHash(v1) + // val1 := common.BigToHash(big.NewInt(int64(1))) + statedb.SetState(addr, key1, val1) + + key2 := common.HexToHash("0x1234563000000000000000000000000000000000000000000000000000000000") + // key2 bytes: [1 * 16 + 2, 3 * 16 + 4, 5 * 16 + 6, 3 * 16, 0, ..., 0] + + // We now have an extension node with nibbles: [3, 4, 5, 6]. + + statedb.SetState(addr, key2, val1) + statedb.IntermediateRoot(false) + + key3 := common.HexToHash("0x1234400000000000000000000000000000000000000000000000000000000000") + // After adding key3 we will have an extension node with nibbles [3, 4] + // and another one with nibbles [5, 6]. + + v1 = common.FromHex("0xbb") + val := common.BytesToHash(v1) + trieMod := TrieModification{ + Type: StorageMod, + Key: key3, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtNodeInsertedBefore4After1", trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +func TestExtNodeDeletedBefore4After1(t *testing.T) { + oracle.NodeUrl = oracle.LocalUrl + + blockNum := 0 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + addr := common.HexToAddress("0x50efbf12580138bc623c95757286df4e24eb81c9") + + statedb.DisableLoadingRemoteAccounts() + + statedb.CreateAccount(addr) + + oracle.PreventHashingInSecureTrie = true // to store the unchanged key + + val0 := common.BigToHash(big.NewInt(int64(1))) + key0 := common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000") + statedb.SetState(addr, key0, val0) + + key00 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") + statedb.SetState(addr, key00, val0) + + key1 := common.HexToHash("0x1234561000000000000000000000000000000000000000000000000000000000") + + // make the value long to have a hashed branch + v1 := common.FromHex("0xbbefaa12580138bc263c95757826df4e24eb81c9aaaaaaaaaaaaaaaaaaaaaaaa") + val1 := common.BytesToHash(v1) + statedb.SetState(addr, key1, val1) + + key2 := common.HexToHash("0x1234563000000000000000000000000000000000000000000000000000000000") + statedb.SetState(addr, key2, val1) + + key3 := common.HexToHash("0x1234400000000000000000000000000000000000000000000000000000000000") + statedb.SetState(addr, key3, val1) + statedb.IntermediateRoot(false) + + val := common.Hash{} // empty value deletes the key + trieMod := TrieModification{ + Type: StorageMod, + Key: key3, + Value: val, + Address: addr, + } + trieModifications := []TrieModification{trieMod} + + prepareWitness("ExtNodeDeletedBefore4After1", trieModifications, statedb) + + oracle.PreventHashingInSecureTrie = false +} + +func TestExtNodeInNewBranchFirstLevel(t *testing.T) { + key1 := common.HexToHash("0x2345610000000000000000000000000000000000000000000000000000000000") + key2 := common.HexToHash("0x2345630000000000000000000000000000000000000000000000000000000000") + key3 := common.HexToHash("0x6354000000000000000000000000000000000000000000000000000000000000") + + ExtNodeInserted(key1, key2, key3, "ExtNodeInsertedInNewBranchFirstLevel") +} + +func TestExtNodeDeletedBranchDeletedFirstLevel(t *testing.T) { + key1 := common.HexToHash("0x2345610000000000000000000000000000000000000000000000000000000000") + key2 := common.HexToHash("0x2345630000000000000000000000000000000000000000000000000000000000") + key3 := common.HexToHash("0x6354000000000000000000000000000000000000000000000000000000000000") + + ExtNodeDeleted(key1, key2, key3, "ExtNodeDeletedBranchDeletedFirstLevel") +} + +func TestExtNodeInsertedExtShortIsBranchFirstLevel(t *testing.T) { + key1 := common.HexToHash("0x2345610000000000000000000000000000000000000000000000000000000000") + key2 := common.HexToHash("0x2345630000000000000000000000000000000000000000000000000000000000") + key3 := common.HexToHash("0x2345100000000000000000000000000000000000000000000000000000000000") + + ExtNodeInserted(key1, key2, key3, "ExtNodeInsertedExtShortIsBranchFirstLevel") +} + +func TestExtNodeDeletedExtShortIsBranchFirstLevel(t *testing.T) { + key1 := common.HexToHash("0x2345610000000000000000000000000000000000000000000000000000000000") + key2 := common.HexToHash("0x2345630000000000000000000000000000000000000000000000000000000000") + key3 := common.HexToHash("0x2345100000000000000000000000000000000000000000000000000000000000") + + ExtNodeDeleted(key1, key2, key3, "ExtNodeInsertedExtShortIsBranchFirstLevel") +} +*/ diff --git a/mpt-witness-generator/witness/gen_witness_transactions_test.go b/mpt-witness-generator/witness/gen_witness_transactions_test.go new file mode 100644 index 0000000000..442885246c --- /dev/null +++ b/mpt-witness-generator/witness/gen_witness_transactions_test.go @@ -0,0 +1,101 @@ +package witness + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/privacy-scaling-explorations/mpt-witness-generator/trie" + "github.com/privacy-scaling-explorations/mpt-witness-generator/types" +) + +func createTransaction(ind int) *types.Transaction { + key, _ := crypto.GenerateKey() + signer := types.LatestSigner(params.TestChainConfig) + + amount := math.BigPow(2, int64(ind)) + price := big.NewInt(300000) + data := make([]byte, 100) + tx := types.NewTransaction(uint64(ind), common.Address{}, amount, 123457, price, data) + signedTx, err := types.SignTx(tx, signer, key) + if err != nil { + panic(err) + } + + return signedTx +} + +func TestTransactions(t *testing.T) { + t.Skip("failing test") + txs := make([]*types.Transaction, 70) + key, _ := crypto.GenerateKey() + signer := types.LatestSigner(params.TestChainConfig) + + for i := range txs { + amount := math.BigPow(2, int64(i)) + price := big.NewInt(300000) + data := make([]byte, 100) + tx := types.NewTransaction(uint64(i), common.Address{}, amount, 123457, price, data) + signedTx, err := types.SignTx(tx, signer, key) + if err != nil { + panic(err) + } + txs[i] = signedTx + } + + db := rawdb.NewMemoryDatabase() + stackTrie := trie.NewStackTrie(db) + + stackTrie.UpdateAndGetProofs(db, types.Transactions(txs)) + + /* + rowsTransactions, toBeHashedAcc, _ := + convertProofToWitness(statedb, addr, accountProof, accountProof1, aExtNibbles1, aExtNibbles2, accountAddr, aNode, true, tMod.Type == NonExistingAccount, false, isShorterProofLastLeaf) + */ + + fmt.Println("===") +} + +// No update for each step, just final proof. +func TestGetProof(t *testing.T) { + txs := make([]*types.Transaction, 70) + key, _ := crypto.GenerateKey() + signer := types.LatestSigner(params.TestChainConfig) + + for i := range txs { + amount := math.BigPow(2, int64(i)) + price := big.NewInt(300000) + data := make([]byte, 100) + tx := types.NewTransaction(uint64(i), common.Address{}, amount, 123457, price, data) + signedTx, err := types.SignTx(tx, signer, key) + if err != nil { + panic(err) + } + txs[i] = signedTx + } + + db := rawdb.NewMemoryDatabase() + stackTrie := trie.NewStackTrie(db) + + // Update the trie with transactions: + types.DeriveSha(types.Transactions(txs), stackTrie) + + var indexBuf []byte + indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(1)) + + proofS, err := stackTrie.GetProof(db, indexBuf) + if err != nil { + fmt.Println(err) + return + } + + fmt.Println(proofS) + + fmt.Println("===") +} diff --git a/mpt-witness-generator/witness/leaf.go b/mpt-witness-generator/witness/leaf.go new file mode 100644 index 0000000000..39f36be95e --- /dev/null +++ b/mpt-witness-generator/witness/leaf.go @@ -0,0 +1,561 @@ +package witness + +import ( + "math" + + "github.com/ethereum/go-ethereum/common" + "github.com/privacy-scaling-explorations/mpt-witness-generator/trie" +) + +func prepareEmptyNonExistingStorageRow() []byte { + // nonExistingStorageRow is used only for proof that nothing is stored at a particular storage key + nonExistingStorageRow := make([]byte, valueLen) + + return nonExistingStorageRow +} + +func prepareNonExistingStorageRow(leafC, keyNibbles []byte, noLeaf bool) ([]byte, []byte) { + // nonExistingStorageRow is used only for proof that nothing is stored at a particular storage key + nonExistingStorageRow := prepareEmptyNonExistingStorageRow() + + var wrongRlpBytes []byte + wrongRlpBytes = append(wrongRlpBytes, leafC[0]) + start := 2 + if leafC[0] == 248 { + start = 3 + wrongRlpBytes = append(wrongRlpBytes, leafC[1]) + } + keyLenC := int(leafC[start-1]) - 128 + keyRowC := make([]byte, valueLen) + for i := 0; i < keyLenC; i++ { + keyRowC[i] = leafC[start-1+i] + } + + offset := 0 + nibblesNum := (keyLenC - 1) * 2 + + nonExistingStorageRow[0] = leafC[start-1] + if keyRowC[1] != 32 { // odd number of nibbles + nibblesNum = nibblesNum + 1 + nonExistingStorageRow[1] = keyNibbles[64-nibblesNum] + 48 + offset = 1 + } else { + nonExistingStorageRow[1] = 32 + } + // Get the last nibblesNum of address: + remainingNibbles := keyNibbles[64-nibblesNum : 64] // exclude the last one as it is not a nibble + for i := 0; i < keyLenC-1; i++ { + nonExistingStorageRow[2+i] = remainingNibbles[2*i+offset]*16 + remainingNibbles[2*i+1+offset] + } + + return wrongRlpBytes, nonExistingStorageRow +} + +func getNonceBalanceValue(leaf []byte, keyLen int) ([]byte, []byte, int) { + nonceStart := 3 + keyLen + 1 + 1 + 1 + 1 + + var nonceRlpLen byte + var balanceStart int + var nonce []byte + + // If the first nonce byte is > 128, it means it presents (nonce_len - 128), + // if the first nonce byte is <= 128, the actual nonce value is < 128 and is exactly this first byte + // (however, when nonce = 0, the actual value that is stored is 128) + if leaf[nonceStart] <= 128 { + // only one nonce byte + nonceRlpLen = 1 + nonce = leaf[nonceStart : nonceStart+int(nonceRlpLen)] + balanceStart = nonceStart + int(nonceRlpLen) + } else { + nonceRlpLen = leaf[nonceStart] - 128 + nonce = leaf[nonceStart : nonceStart+int(nonceRlpLen)+1] + balanceStart = nonceStart + int(nonceRlpLen) + 1 + } + + var balanceRlpLen byte + var storageStart int + if leaf[balanceStart] <= 128 { + // only one balance byte + balanceRlpLen = 1 + storageStart = balanceStart + int(balanceRlpLen) + } else { + balanceRlpLen = leaf[balanceStart] - 128 + storageStart = balanceStart + int(balanceRlpLen) + 1 + } + + nonceVal := make([]byte, valueLen) + balanceVal := make([]byte, valueLen) + copy(nonceVal, nonce) + var balance []byte + if balanceRlpLen == 1 { + balance = leaf[balanceStart : balanceStart+int(balanceRlpLen)] + } else { + balance = leaf[balanceStart : balanceStart+int(balanceRlpLen)+1] + } + copy(balanceVal, balance) + + return nonceVal, balanceVal, storageStart +} + +func getStorageRootCodeHashValue(leaf []byte, storageStart int) ([]byte, []byte) { + storageRootValue := make([]byte, valueLen) + codeHashValue := make([]byte, valueLen) + storageRlpLen := leaf[storageStart] - 128 + if storageRlpLen != 32 { + panic("Account leaf RLP 3") + } + storage := leaf[storageStart : storageStart+32+1] + for i := 0; i < 33; i++ { + storageRootValue[i] = storage[i] + } + codeHashStart := storageStart + int(storageRlpLen) + 1 + codeHashRlpLen := leaf[codeHashStart] - 128 + if codeHashRlpLen != 32 { + panic("Account leaf RLP 4") + } + codeHash := leaf[codeHashStart : codeHashStart+32+1] + for i := 0; i < 33; i++ { + codeHashValue[i] = codeHash[i] + } + + return storageRootValue, codeHashValue +} + +func prepareAccountLeafNode(addr common.Address, addrh []byte, leafS, leafC, neighbourNode, addressNibbles []byte, isPlaceholder, isSModExtension, isCModExtension bool) Node { + // For non existing account proof there are two cases: + // 1. A leaf is returned that is not at the required address (wrong leaf). + // 2. A branch is returned as the last element of getProof and + // there is nil object at address position. Placeholder account leaf is added in this case. + values := make([][]byte, 12) + + keyLenS := int(leafS[2]) - 128 + keyLenC := int(leafC[2]) - 128 + keyRowS := make([]byte, valueLen) + keyRowC := make([]byte, valueLen) + + for i := 2; i < 3+keyLenS; i++ { + keyRowS[i-2] = leafS[i] + } + for i := 2; i < 3+keyLenC; i++ { + keyRowC[i-2] = leafC[i] + } + + var listRlpBytes [2][]byte + listRlpBytes[0] = make([]byte, 2) + listRlpBytes[1] = make([]byte, 2) + for i := 0; i < 2; i++ { + listRlpBytes[0][i] = leafS[i] + } + for i := 0; i < 2; i++ { + listRlpBytes[1][i] = leafC[i] + } + + var valueRlpBytes [2][]byte + valueRlpBytes[0] = make([]byte, 2) + valueRlpBytes[1] = make([]byte, 2) + + var valueListRlpBytes [2][]byte + valueListRlpBytes[0] = make([]byte, 2) + valueListRlpBytes[1] = make([]byte, 2) + + driftedRlpBytes := []byte{0} + keyDrifted := make([]byte, valueLen) + if neighbourNode != nil { + keyDrifted, _, driftedRlpBytes, _ = prepareStorageLeafInfo(neighbourNode, false, false) + } + + wrongValue := make([]byte, valueLen) + wrongRlpBytes := make([]byte, 2) + + // For non existing account proof, keyRowS (=keyRowC in this case) stores the key of + // the wrong leaf. We store the key of the required leaf (which doesn't exist) + // in nonExistingAccountRow. + + // wrongValue is used only for proof that account doesn't exist + + offset := 0 + nibblesNum := (keyLenC - 1) * 2 + wrongRlpBytes[0] = leafC[0] + wrongRlpBytes[1] = leafC[1] + wrongValue[0] = leafC[2] // length + if leafC[3] != 32 { // odd number of nibbles + nibblesNum = nibblesNum + 1 + wrongValue[1] = addressNibbles[64-nibblesNum] + 48 + offset = 1 + } else { + wrongValue[1] = 32 + } + // Get the last nibblesNum of address: + remainingNibbles := addressNibbles[64-nibblesNum : 64] // exclude the last one as it is not a nibble + for i := 0; i < keyLenC-1; i++ { + wrongValue[2+i] = remainingNibbles[2*i+offset]*16 + remainingNibbles[2*i+1+offset] + } + + rlpStringSecondPartLenS := leafS[3+keyLenS] - 183 + if rlpStringSecondPartLenS != 1 { + panic("Account leaf RLP at this position should be 1 (S)") + } + rlpStringSecondPartLenC := leafC[3+keyLenC] - 183 + if rlpStringSecondPartLenC != 1 { + panic("Account leaf RLP at this position should be 1 (C)") + } + rlpStringLenS := leafS[3+keyLenS+1] + rlpStringLenC := leafC[3+keyLenC+1] + + // [248,112,157,59,158,160,175,159,65,212,107,23,98,208,38,205,150,63,244,2,185,236,246,95,240,224,191,229,27,102,202,231,184,80,248,78 + // In this example RLP, there are first 36 bytes of a leaf. + // 157 means there are 29 bytes for key (157 - 128). + // Positions 32-35: 184, 80, 248, 78. + // 184 - 183 = 1 means length of the second part of a string. + // 80 means length of a string. + // 248 - 247 = 1 means length of the second part of a list. + // 78 means length of a list. + + rlpListSecondPartLenS := leafS[3+keyLenS+1+1] - 247 + if rlpListSecondPartLenS != 1 { + panic("Account leaf RLP 1 (S)") + } + rlpListSecondPartLenC := leafC[3+keyLenC+1+1] - 247 + if rlpListSecondPartLenC != 1 { + panic("Account leaf RLP 1 (C)") + } + + rlpListLenS := leafS[3+keyLenS+1+1+1] + if rlpStringLenS != rlpListLenS+2 { + panic("Account leaf RLP 2 (S)") + } + + rlpListLenC := leafC[3+keyLenC+1+1+1] + if rlpStringLenC != rlpListLenC+2 { + panic("Account leaf RLP 2 (C)") + } + + storageStartS := 0 + storageStartC := 0 + nonceValueS := make([]byte, valueLen) + nonceValueC := make([]byte, valueLen) + balanceValueS := make([]byte, valueLen) + balanceValueC := make([]byte, valueLen) + if !isPlaceholder { + nonceValueS, balanceValueS, storageStartS = getNonceBalanceValue(leafS, keyLenS) + nonceValueC, balanceValueC, storageStartC = getNonceBalanceValue(leafC, keyLenC) + } + + valueRlpBytes[0][0] = leafS[3+keyLenS] + valueRlpBytes[0][1] = leafS[3+keyLenS+1] + + valueRlpBytes[1][0] = leafC[3+keyLenC] + valueRlpBytes[1][1] = leafC[3+keyLenC+1] + + valueListRlpBytes[0][0] = leafS[3+keyLenS+1+1] + valueListRlpBytes[0][1] = leafS[3+keyLenS+1+1+1] + + valueListRlpBytes[1][0] = leafC[3+keyLenC+1+1] + valueListRlpBytes[1][1] = leafC[3+keyLenC+1+1+1] + + storageRootValueS := make([]byte, valueLen) + storageRootValueC := make([]byte, valueLen) + codeHashValueS := make([]byte, valueLen) + codeHashValueC := make([]byte, valueLen) + if !isPlaceholder { + storageRootValueS, codeHashValueS = getStorageRootCodeHashValue(leafS, storageStartS) + storageRootValueC, codeHashValueC = getStorageRootCodeHashValue(leafC, storageStartC) + } + + values[AccountKeyS] = keyRowS + values[AccountKeyC] = keyRowC + values[AccountNonceS] = nonceValueS + values[AccountBalanceS] = balanceValueS + values[AccountStorageS] = storageRootValueS + values[AccountCodehashS] = codeHashValueS + values[AccountNonceC] = nonceValueC + values[AccountBalanceC] = balanceValueC + values[AccountStorageC] = storageRootValueC + values[AccountCodehashC] = codeHashValueC + values[AccountDrifted] = keyDrifted + values[AccountWrong] = wrongValue + + leaf := AccountNode{ + Address: addr, + Key: addrh, + ListRlpBytes: listRlpBytes, + ValueRlpBytes: valueRlpBytes, + ValueListRlpBytes: valueListRlpBytes, + DriftedRlpBytes: driftedRlpBytes, + WrongRlpBytes: wrongRlpBytes, + IsModExtension: [2]bool{isSModExtension, isCModExtension}, + } + keccakData := [][]byte{leafS, leafC, addr.Bytes()} + if neighbourNode != nil { + keccakData = append(keccakData, neighbourNode) + } + node := Node{ + Account: &leaf, + Values: values, + KeccakData: keccakData, + } + + return node +} + +// prepareLeafAndPlaceholderNode prepares a leaf node and its placeholder counterpart +// (used when one of the proofs does not have a leaf). +func prepareLeafAndPlaceholderNode(addr common.Address, addrh []byte, proof1, proof2 [][]byte, storage_key common.Hash, key []byte, nonExistingAccountProof, isAccountProof, isSModExtension, isCModExtension bool) Node { + len1 := len(proof1) + len2 := len(proof2) + + // We don't have a leaf in the shorter proof, but we will add it there + // as a placeholder. + if isAccountProof { + var leafS []byte + var leafC []byte + if len1 > len2 { + leafS = proof1[len1-1] + leafC = proof1[len1-1] // placeholder + } else { + leafC = proof2[len2-1] + leafS = proof2[len2-1] // placeholder + } + + // When generating a proof that account doesn't exist, the length of both proofs is the same (doesn't reach + // this code). + return prepareAccountLeafNode(addr, addrh, leafS, leafC, nil, key, false, isSModExtension, isCModExtension) + } else { + var leaf []byte + isSPlaceholder := false + isCPlaceholder := false + + if len1 > len2 { + leaf = proof1[len1-1] + isCPlaceholder = true + } else { + leaf = proof2[len2-1] + isSPlaceholder = true + } + + return prepareStorageLeafNode(leaf, leaf, nil, storage_key, key, false, isSPlaceholder, isCPlaceholder, isSModExtension, isCModExtension) + } +} + +// getLeafKeyLen returns the leaf key length given the key index (how many key nibbles have +// been used in the branches / extension nodes above the leaf). +func getLeafKeyLen(keyIndex int) int { + return int(math.Floor(float64(64-keyIndex)/float64(2))) + 1 +} + +// setStorageLeafKeyRLP sets the RLP byte that encodes key length of the storage leaf +// to correspond to the number of keys used in the branches / extension nodes above the placeholder leaf. +func setStorageLeafKeyRLP(leaf *[]byte, key []byte, keyIndex int) { + isEven := keyIndex%2 == 0 + remainingNibbles := key[keyIndex:] + keyLen := getLeafKeyLen(keyIndex) + (*leaf)[1] = byte(keyLen) + 128 + if isEven { + (*leaf)[2] = 32 + } else { + (*leaf)[2] = remainingNibbles[0] + 48 + } +} + +func prepareAccountLeafPlaceholderNode(addr common.Address, addrh, key []byte, keyIndex int) Node { + isEven := keyIndex%2 == 0 + keyLen := int(math.Floor(float64(64-keyIndex)/float64(2))) + 1 + remainingNibbles := key[keyIndex:] + offset := 0 + leaf := make([]byte, rowLen) + leaf[0] = 248 + leaf[1] = byte(keyLen) + 73 + leaf[2] = byte(keyLen) + 128 + leaf[3+keyLen] = 184 + leaf[3+keyLen+1+1] = 248 + leaf[3+keyLen+1+1+1] = leaf[3+keyLen+1] - 2 + if isEven { + leaf[3] = 32 + } else { + leaf[3] = remainingNibbles[0] + 48 + offset = 1 + } + for i := 0; i < keyLen-1; i++ { + leaf[4+i] = remainingNibbles[2*i+offset]*16 + remainingNibbles[2*i+1+offset] + } + + node := prepareAccountLeafNode(addr, addrh, leaf, leaf, nil, key, true, false, false) + + node.Account.ValueRlpBytes[0][0] = 184 + node.Account.ValueRlpBytes[0][1] = 70 + node.Account.ValueRlpBytes[1][0] = 184 + node.Account.ValueRlpBytes[1][1] = 70 + + node.Account.ValueListRlpBytes[0][0] = 248 + node.Account.ValueListRlpBytes[0][1] = 68 + node.Account.ValueListRlpBytes[1][0] = 248 + node.Account.ValueListRlpBytes[1][1] = 68 + + node.Values[AccountStorageS][0] = 160 + node.Values[AccountStorageC][0] = 160 + node.Values[AccountCodehashS][0] = 160 + node.Values[AccountCodehashC][0] = 160 + + return node +} + +func prepareStorageLeafPlaceholderNode(storage_key common.Hash, key []byte, keyIndex int) Node { + leaf := make([]byte, rowLen) + setStorageLeafKeyRLP(&leaf, key, keyIndex) + keyLen := getLeafKeyLen(keyIndex) + leaf[0] = 192 + 1 + byte(keyLen) + 1 + + return prepareStorageLeafNode(leaf, leaf, nil, storage_key, key, false, true, true, false, false) +} + +func prepareStorageLeafInfo(row []byte, valueIsZero, isPlaceholder bool) ([]byte, []byte, []byte, []byte) { + var keyRlp []byte + var valueRlp []byte + + var keyRlpLen byte + var valueRlpLen byte + + key := make([]byte, valueLen) + value := make([]byte, valueLen) + + var setValue = func(keyLen, offset byte) { + if !isPlaceholder { + valueRlp = row[keyLen+offset : keyLen+offset+valueRlpLen] + if !valueIsZero { + copy(value, row[keyLen+offset+valueRlpLen:]) + } + } else { + valueRlp = []byte{0} + } + } + + if len(row) < 32 { // the node doesn't get hashed in this case + // 192 + 32 = 224 + if row[1] < 128 { + // last level: [194,32,1] + // or + // only one nibble in a leaf (as soon as the leaf has two nibbles, row[1] will have 128 + length) + // [194,48,1] - this one contains nibble 0 = 48 - 48 + keyRlpLen = 1 + keyLen := byte(1) + keyRlp = row[:keyRlpLen] + copy(key, row[keyRlpLen:keyLen+1]) + valueRlpLen = 1 + offset := byte(1) + // If placeholder, we leave the value to be 0. + setValue(keyLen, offset) + } else { + // [196,130,32,0,1] + /* + keyLen := row[1] - 128 + copy(key, row[:keyLen+2]) + copy(value, row[keyLen+2:]) + */ + keyRlpLen = 1 + keyLen := row[1] - 128 + keyRlp = row[:keyRlpLen] + copy(key, row[keyRlpLen:keyLen+2]) + valueRlpLen = 1 + offset := byte(2) + // If placeholder, we leave the value to be 0. + setValue(keyLen, offset) + } + } else if row[0] == 248 { + // [248,67,160,59,138,106,70,105,186,37,13,38,205,122,69,158,202,157,33,95,131,7,227,58,235,229,3,121,188,90,54,23,236,52,68,161,160,... + keyRlpLen = 2 + keyLen := row[2] - 128 + keyRlp = row[:keyRlpLen] + copy(key, row[keyRlpLen:keyLen+3]) + valueRlpLen = 1 + offset := byte(3) + // there are two RLP meta data bytes which are put in s_rlp1 and s_rlp2, + // value starts in s_advices[0] + setValue(keyLen, offset) + } else { + if row[1] < 128 { + // last level: + // [227,32,161,160,187,239,170,18,88,1,56,188,38,60,149,117,120,38,223,78,36,235,129,201,170,170,170,170,170,170,170,170,170,170,170,170] + // one nibble: + // [227,48,161,160,187,239,170,18,88,1,56,188,38,60,149,117,120,38,223,78,36,235,129,201,170,170,170,170,170,170,170,170,170,170,170,170] + key[0] = row[0] + key[1] = row[1] + keyLen := byte(2) + offset := byte(0) + valueRlpLen = 1 + // If placeholder, we leave the value to be 0. + setValue(keyLen, offset) + } else { + // [226,160,59,138,106,70,105,186,37,13,38[227,32,161,160,187,239,170,18,88,1,56,188,38,60,149,117,120,38,223,78,36,235,129,201,170,170,170,170,170,170,170,170,170,170,170,170] + keyRlpLen = 1 + keyLen := row[1] - 128 + keyRlp = row[:keyRlpLen] + copy(key, row[keyRlpLen:keyLen+2]) + valueRlpLen = 1 + offset := byte(2) + // If placeholder, we leave the value to be 0. + setValue(keyLen, offset) + } + } + + return key, value, keyRlp, valueRlp +} + +func prepareStorageLeafNode(leafS, leafC, neighbourNode []byte, storage_key common.Hash, key []byte, nonExistingStorageProof, isSPlaceholder, isCPlaceholder, isSModExtension, isCModExtension bool) Node { + var rows [][]byte + + keyS, valueS, listRlpBytes1, valueRlpBytes1 := prepareStorageLeafInfo(leafS, false, isSPlaceholder) + + rows = append(rows, keyS) + rows = append(rows, valueS) + + keyC, valueC, listRlpBytes2, valueRlpBytes2 := prepareStorageLeafInfo(leafC, false, isCPlaceholder) + + rows = append(rows, keyC) + rows = append(rows, valueC) + + var listRlpBytes [2][]byte + listRlpBytes[0] = listRlpBytes1 + listRlpBytes[1] = listRlpBytes2 + + var valueRlpBytes [2][]byte + valueRlpBytes[0] = valueRlpBytes1 + valueRlpBytes[1] = valueRlpBytes2 + + driftedRlpBytes := []byte{0} + keyDrifted := make([]byte, valueLen) + if neighbourNode != nil { + keyDrifted, _, driftedRlpBytes, _ = prepareStorageLeafInfo(neighbourNode, false, false) + } + rows = append(rows, keyDrifted) + + var nonExistingStorageRow []byte + var wrongRlpBytes []byte + if nonExistingStorageProof { + noLeaf := false + wrongRlpBytes, nonExistingStorageRow = prepareNonExistingStorageRow(leafC, key, noLeaf) + } else { + nonExistingStorageRow = prepareEmptyNonExistingStorageRow() + } + rows = append(rows, nonExistingStorageRow) + + leaf := StorageNode{ + Address: storage_key, + Key: trie.HexToKeybytes(key), + ListRlpBytes: listRlpBytes, + DriftedRlpBytes: driftedRlpBytes, + WrongRlpBytes: wrongRlpBytes, + ValueRlpBytes: valueRlpBytes, + IsModExtension: [2]bool{isSModExtension, isCModExtension}, + } + keccakData := [][]byte{leafS, leafC, storage_key.Bytes()} + if neighbourNode != nil { + keccakData = append(keccakData, neighbourNode) + } + node := Node{ + Values: rows, + Storage: &leaf, + KeccakData: keccakData, + } + + return node +} diff --git a/mpt-witness-generator/witness/modified_extension_node.go b/mpt-witness-generator/witness/modified_extension_node.go new file mode 100644 index 0000000000..ad4d0073de --- /dev/null +++ b/mpt-witness-generator/witness/modified_extension_node.go @@ -0,0 +1,197 @@ +package witness + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/privacy-scaling-explorations/mpt-witness-generator/state" + "github.com/privacy-scaling-explorations/mpt-witness-generator/trie" +) + +// prepareModExtensionNode adds rows for a modified extension node before and after modification. +// These rows are added only when an existing extension node gets shortened or elongated (in terms +// of the extension node nibbles) because of another extension node being added or deleted. +// The rows added are somewhat exceptional as otherwise they do not appear. +func prepareModExtensionNode(statedb *state.StateDB, addr common.Address, rows *[][]byte, proof1, proof2, + extNibblesS, extNibblesC [][]byte, + key, neighbourNode []byte, + keyIndex, extensionNodeInd, numberOfNibbles int, + additionalBranch, isAccountProof, nonExistingAccountProof, + isShorterProofLastLeaf bool, branchC16, branchC1 byte, toBeHashed *[][]byte) Node { + len1 := len(proof1) + len2 := len(proof2) + + var longExtNode []byte + if len1 > len2 { + longExtNode = proof2[len2-1] + } else { + longExtNode = proof1[len1-1] + } + + var extNibbles [][]byte + if len1 > len2 { + extNibbles = extNibblesC + } else { + extNibbles = extNibblesS + } + + numberOfNibbles0, extensionRowS, extensionRowC := + prepareExtensionRows(extNibbles, extensionNodeInd, longExtNode, longExtNode, true, false) + + extNodeSelectors := make([]byte, rowLen) + setExtNodeSelectors(extNodeSelectors, longExtNode, int(numberOfNibbles0), branchC16) + extNodeSelectors = append(extNodeSelectors, 24) + + _, extListRlpBytesS, extValuesS := prepareExtensions(extNibbles, extensionNodeInd, longExtNode, longExtNode) + /* + b := []byte{249, 1, 49, 128} // We don't really need a branch info (only extension node). + longNode := prepareBranchNode(b, b, longExtNode, longExtNode, extListRlpBytes, extValues, + key[keyIndex], key[keyIndex], branchC16, branchC1, false, false, true, false, false) + */ + + var extRows [][]byte + // We need to prove the old extension node is in S proof (when ext. node inserted). + extRows = append(extRows, extNodeSelectors) + extRows = append(extRows, extensionRowS) + extRows = append(extRows, extensionRowC) + + *rows = append(*rows, extRows...) + // addForHashing(longExtNode, toBeHashed) + + // Get nibbles of the extension node that gets shortened because of the newly insertd + // extension node: + longNibbles := getExtensionNodeNibbles(longExtNode) + + ind := byte(keyIndex) + byte(numberOfNibbles) // where the old and new extension nodes start to be different + // diffNibble := oldNibbles[ind] + longExtNodeKey := make([]byte, len(key)) + copy(longExtNodeKey, key) + // We would like to retrieve the shortened extension node from the trie via GetProof or + // GetStorageProof (depending whether it is an account proof or storage proof), + // the key where we find its underlying branch is `oldExtNodeKey`. + for j := ind; int(j) < keyIndex+len(longNibbles); j++ { + // keyIndex is where the nibbles of the old and new extension node start + longExtNodeKey[j] = longNibbles[j-byte(keyIndex)] + } + + k := trie.HexToKeybytes(longExtNodeKey) + ky := common.BytesToHash(k) + var proof [][]byte + var err error + if isAccountProof { + proof, _, _, _, err = statedb.GetProof(addr) + } else { + proof, _, _, _, err = statedb.GetStorageProof(addr, ky) + } + check(err) + + // There is no short extension node when `len(longNibbles) - numberOfNibbles = 1`, in this case there + // is simply a branch instead. + shortExtNodeIsBranch := len(longNibbles)-numberOfNibbles == 1 + if shortExtNodeIsBranch { + (*rows)[len(*rows)-branchRows-9][isShortExtNodeBranch] = 1 + } + + var shortExtNode []byte + /* + extNodeSelectors1 := make([]byte, rowLen) + emptyExtRows := prepareEmptyExtensionRows(false, true) + extensionRowS1 := emptyExtRows[0] + extensionRowC1 := emptyExtRows[1] + */ + + var extListRlpBytesC []byte + var extValuesC [][]byte + + if !shortExtNodeIsBranch { + if len2 > len1 { + isItBranch := isBranch(proof[len(proof)-1]) + + // Note that `oldExtNodeKey` has nibbles properly set only up to the end of nibbles, + // this is enough to get the old extension node by `GetProof` or `GetStorageProof` - + // we will get its underlying branch, but sometimes also the leaf in a branch if + // the nibble will correspond to the leaf (we left the nibbles from + // `keyIndex + len(oldNibbles)` the same as the nibbles in the new extension node). + + if isItBranch { // last element in a proof is a branch + shortExtNode = proof[len(proof)-2] + } else { // last element in a proof is a leaf + shortExtNode = proof[len(proof)-3] + } + } else { + // Needed only for len1 > len2 + (*rows)[len(*rows)-branchRows-9][driftedPos] = longNibbles[numberOfNibbles] + + shortNibbles := longNibbles[numberOfNibbles+1:] + compact := trie.HexToCompact(shortNibbles) + longStartBranch := 2 + (longExtNode[1] - 128) // cannot be "short" in terms of having the length at position 0; TODO: extension with length at position 2 not supported (the probability very small) + + if len(shortNibbles) > 1 { + // add RLP2: + compact = append([]byte{128 + byte(len(compact))}, compact...) + } + + shortExtNode = append(compact, longExtNode[longStartBranch:]...) + + // add RLP1: + shortExtNode = append([]byte{192 + byte(len(shortExtNode))}, shortExtNode...) + } + + // Get the nibbles of the shortened extension node: + nibbles := getExtensionNodeNibbles(shortExtNode) + + // Enable `prepareExtensionRows` call: + extNibbles = append(extNibbles, nibbles) + + /* + var numberOfNibbles1 byte + numberOfNibbles1, extensionRowS1, extensionRowC1 = + prepareExtensionRows(extNibbles, extensionNodeInd + 1, shortExtNode, shortExtNode, false, true) + */ + + _, extListRlpBytesC, extValuesC = prepareExtensions(extNibbles, extensionNodeInd+1, shortExtNode, shortExtNode) + /* + shortNode = prepareBranchNode(b, b, shortExtNode, shortExtNode, extListRlpBytes, extValues, + key[keyIndex], key[keyIndex], branchC16, branchC1, false, false, true, false, false) + + setExtNodeSelectors(extNodeSelectors1, shortExtNode, int(numberOfNibbles1), branchC16) + */ + // extNodeSelectors1 = append(extNodeSelectors1, 25) + } /* else { + if len1 > len2 { + // Needed only for len1 > len2 + (*rows)[len(*rows)-branchRows-9][driftedPos] = longNibbles[numberOfNibbles] + } + + extNodeSelectors1 = append(extNodeSelectors1, 25) + } + */ + + // The shortened extension node is needed as a witness to be able to check in a circuit + // that the shortened extension node and newly added leaf (that causes newly inserted + // extension node) are the only nodes in the newly inserted extension node. + /* + *rows = append(*rows, extNodeSelectors1) + *rows = append(*rows, extensionRowS1) + *rows = append(*rows, extensionRowC1) + */ + + listRlpBytes := [2][]byte{extListRlpBytesS, extListRlpBytesC} + modExtensionNode := ModExtensionNode{ + ListRlpBytes: listRlpBytes, + } + + var values [][]byte + extValuesS = append(extValuesS[:1], extValuesS[2:]...) + extValuesC = append(extValuesC[:1], extValuesC[2:]...) + values = append(values, extValuesS...) + values = append(values, extValuesC...) + + keccakData := [][]byte{} + keccakData = append(keccakData, longExtNode) + keccakData = append(keccakData, shortExtNode) + + return Node{ + ModExtension: &modExtensionNode, + Values: values, + KeccakData: keccakData, + } +} diff --git a/mpt-witness-generator/witness/nodes.go b/mpt-witness-generator/witness/nodes.go new file mode 100644 index 0000000000..9377f99649 --- /dev/null +++ b/mpt-witness-generator/witness/nodes.go @@ -0,0 +1,199 @@ +package witness + +import ( + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/privacy-scaling-explorations/mpt-witness-generator/oracle" +) + +type BranchNode struct { + ModifiedIndex int + DriftedIndex int + ListRlpBytes [2][]byte +} + +func (n *BranchNode) MarshalJSON() ([]byte, error) { + listRlpBytes1 := base64ToString(n.ListRlpBytes[0]) + listRlpBytes2 := base64ToString(n.ListRlpBytes[1]) + jsonResult := fmt.Sprintf(`{"modified_index": %d, "drifted_index": %d, "list_rlp_bytes":[%s,%s]}`, + n.ModifiedIndex, n.DriftedIndex, listRlpBytes1, listRlpBytes2) + return []byte(jsonResult), nil +} + +type ExtensionNode struct { + ListRlpBytes []byte +} + +func (n *ExtensionNode) MarshalJSON() ([]byte, error) { + listRlpBytes := base64ToString(n.ListRlpBytes) + jsonResult := fmt.Sprintf(`{"list_rlp_bytes":%s}`, listRlpBytes) + return []byte(jsonResult), nil +} + +// When marshalling, []byte encodes as a base64-encoded string. +func base64ToString(bs []byte) string { + var s string + if bs == nil { + f := make([]string, valueLen) + s = "[" + for i := 0; i < len(f); i++ { + if i != len(f)-1 { + s += "0, " + } else { + s += "0]" + } + } + } else { + s = strings.Join(strings.Fields(fmt.Sprintf("%d", bs)), ",") + } + + return s +} + +type StartNode struct { + DisablePreimageCheck bool `json:"disable_preimage_check"` + ProofType string `json:"proof_type"` +} + +type ExtensionBranchNode struct { + IsExtension bool `json:"is_extension"` + // IsModExtension = true for the extension node that gets replaced by a shorter (in terms of nibbles) + // extension node. IsModExtension is not set to true for the newly appeared extension node (nibbles + // of the extension node that caused replacement + nibbles of the newly appeared extension node = + // nibbles of the original extension node). + IsModExtension [2]bool `json:"is_mod_extension"` + IsPlaceholder [2]bool `json:"is_placeholder"` + Extension ExtensionNode `json:"extension"` + Branch BranchNode `json:"branch"` +} + +type ModExtensionNode struct { + ListRlpBytes [2][]byte +} + +func (n *ModExtensionNode) MarshalJSON() ([]byte, error) { + listRlpBytes1 := base64ToString(n.ListRlpBytes[0]) + listRlpBytes2 := base64ToString(n.ListRlpBytes[1]) + jsonResult := fmt.Sprintf(`{"list_rlp_bytes":[%s,%s]}`, listRlpBytes1, listRlpBytes2) + return []byte(jsonResult), nil +} + +type AccountNode struct { + Address common.Address + Key []byte + ListRlpBytes [2][]byte + ValueRlpBytes [2][]byte + ValueListRlpBytes [2][]byte + DriftedRlpBytes []byte + WrongRlpBytes []byte + IsModExtension [2]bool `json:"is_mod_extension"` +} + +func (n *AccountNode) MarshalJSON() ([]byte, error) { + address := base64ToString(n.Address.Bytes()) + key := base64ToString(n.Key) + listRlpBytes1 := base64ToString(n.ListRlpBytes[0]) + listRlpBytes2 := base64ToString(n.ListRlpBytes[1]) + valueRlpBytes1 := base64ToString(n.ValueRlpBytes[0]) + valueRlpBytes2 := base64ToString(n.ValueRlpBytes[1]) + valueListRlpBytes1 := base64ToString(n.ValueListRlpBytes[0]) + valueListRlpBytes2 := base64ToString(n.ValueListRlpBytes[1]) + driftedRlpBytes := base64ToString(n.DriftedRlpBytes) + wrongRlpBytes := base64ToString(n.WrongRlpBytes) + jsonResult := fmt.Sprintf(`{"address":%s, "key":%s, "list_rlp_bytes":[%s,%s], "value_rlp_bytes":[%s,%s], "value_list_rlp_bytes":[%s,%s], "drifted_rlp_bytes":%s, "wrong_rlp_bytes":%s, "is_mod_extension": [%t, %t]}`, + address, key, listRlpBytes1, listRlpBytes2, valueRlpBytes1, valueRlpBytes2, valueListRlpBytes1, valueListRlpBytes2, + driftedRlpBytes, wrongRlpBytes, n.IsModExtension[0], n.IsModExtension[1]) + return []byte(jsonResult), nil +} + +type StorageNode struct { + Address common.Hash `json:"address"` + Key []byte `json:"key"` + ListRlpBytes [2][]byte `json:"list_rlp_bytes"` + ValueRlpBytes [2][]byte `json:"value_rlp_bytes"` + DriftedRlpBytes []byte `json:"drifted_rlp_bytes"` + WrongRlpBytes []byte `json:"wrong_rlp_bytes"` + IsModExtension [2]bool `json:"is_mod_extension"` +} + +func (n *StorageNode) MarshalJSON() ([]byte, error) { + address := base64ToString(n.Address.Bytes()) + key := base64ToString(n.Key) + listRlpBytes1 := base64ToString(n.ListRlpBytes[0]) + listRlpBytes2 := base64ToString(n.ListRlpBytes[1]) + valueRlpBytes1 := base64ToString(n.ValueRlpBytes[0]) + valueRlpBytes2 := base64ToString(n.ValueRlpBytes[1]) + driftedRlpBytes := base64ToString(n.DriftedRlpBytes) + wrongRlpBytes := base64ToString(n.WrongRlpBytes) + jsonResult := fmt.Sprintf(`{"address":%s, "key":%s, "list_rlp_bytes":[%s,%s], "value_rlp_bytes":[%s,%s], "drifted_rlp_bytes":%s, "wrong_rlp_bytes":%s, "is_mod_extension": [%t, %t]}`, + address, key, listRlpBytes1, listRlpBytes2, valueRlpBytes1, valueRlpBytes2, driftedRlpBytes, wrongRlpBytes, n.IsModExtension[0], n.IsModExtension[1]) + return []byte(jsonResult), nil +} + +type JSONableValues [][]byte + +func (u JSONableValues) MarshalJSON() ([]byte, error) { + var result string + if u == nil { + result = "[]" + } else { + result = strings.Join(strings.Fields(fmt.Sprintf("%d", u)), ",") + } + return []byte(result), nil +} + +/* +Note: using pointers for fields to be null when not set (otherwise the field is set to default value +when marshalling). +*/ +type Node struct { + Start *StartNode `json:"start"` + ExtensionBranch *ExtensionBranchNode `json:"extension_branch"` + Account *AccountNode `json:"account"` + Storage *StorageNode `json:"storage"` + ModExtension *ModExtensionNode `json:"mod_extension"` + Values JSONableValues `json:"values"` + KeccakData JSONableValues `json:"keccak_data"` +} + +func GetStartNode(proofType string, sRoot, cRoot common.Hash, specialTest byte) Node { + s := StartNode{ + DisablePreimageCheck: oracle.PreventHashingInSecureTrie || specialTest == 5, + ProofType: proofType, + } + var values [][]byte + var values1 []byte + var values2 []byte + values1 = append(values1, 160) + values1 = append(values1, sRoot.Bytes()...) + values1 = append(values1, 0) + values2 = append(values2, 160) + values2 = append(values2, cRoot.Bytes()...) + values2 = append(values2, 0) + + values = append(values, values1) + values = append(values, values2) + + return Node{ + Start: &s, + Values: values, + } +} + +func GetEndNode() Node { + e := StartNode{ + DisablePreimageCheck: false, + ProofType: "Disabled", + } + + endValues1, endValues2 := make([]byte, valueLen), make([]byte, valueLen) + endValues1[0], endValues2[0] = 160, 160 + endValues := [][]byte{endValues1, endValues2} + + return Node{ + Start: &e, + Values: endValues, + } +} diff --git a/mpt-witness-generator/witness/prepare_witness.go b/mpt-witness-generator/witness/prepare_witness.go new file mode 100644 index 0000000000..f51c566412 --- /dev/null +++ b/mpt-witness-generator/witness/prepare_witness.go @@ -0,0 +1,513 @@ +package witness + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/privacy-scaling-explorations/mpt-witness-generator/oracle" + "github.com/privacy-scaling-explorations/mpt-witness-generator/state" + "github.com/privacy-scaling-explorations/mpt-witness-generator/trie" +) + +const branchNodeRLPLen = 2 // we have two positions for RLP meta data +const branch2start = branchNodeRLPLen + 32 +const branchRows = 19 // 1 (init) + 16 (children) + 2 (extension S and C) + +// rowLen - each branch node has 2 positions for RLP meta data and 32 positions for hash +const rowLen = branch2start + 2 + 32 + 1 // +1 is for info about what type of row is it +const valueLen = 34 +const driftedPos = 13 +const isExtensionPos = 14 + +// extension key even or odd is about nibbles - that determines whether the first byte (not +// considering RLP bytes) is 0 or 1 (see encoding.go hexToCompact) +const isExtShortC16Pos = 21 +const isExtShortC1Pos = 22 +const isExtLongEvenC16Pos = 23 +const isExtLongEvenC1Pos = 24 +const isExtLongOddC16Pos = 25 +const isExtLongOddC1Pos = 26 + +// short/long means having one or more than one nibbles +const isSExtLongerThan55Pos = 27 +const isExtNodeSNonHashedPos = 31 + +// nibbles_counter_pos = 33, set in the assign function. +const isShortExtNodeBranch = 36 + +type AccountRowType int64 + +const ( + AccountKeyS AccountRowType = iota + AccountKeyC + AccountNonceS + AccountBalanceS + AccountStorageS + AccountCodehashS + AccountNonceC + AccountBalanceC + AccountStorageC + AccountCodehashC + AccountDrifted + AccountWrong +) + +type ProofType int64 + +const ( + Disabled ProofType = iota + NonceChanged + BalanceChanged + CodeHashChanged + AccountDestructed + AccountDoesNotExist + StorageChanged + StorageDoesNotExist + AccountCreate +) + +type TrieModification struct { + Type ProofType + Key common.Hash + Value common.Hash + Address common.Address + Nonce uint64 + Balance *big.Int + CodeHash []byte +} + +// GetWitness is to be used by external programs to generate the witness. +func GetWitness(nodeUrl string, blockNum int, trieModifications []TrieModification) []Node { + blockNumberParent := big.NewInt(int64(blockNum)) + oracle.NodeUrl = nodeUrl + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + for i := 0; i < len(trieModifications); i++ { + // TODO: remove SetState (using it now just because this particular key might + // not be set and we will obtain empty storageProof) + v := common.BigToHash(big.NewInt(int64(17))) + statedb.SetState(trieModifications[i].Address, trieModifications[i].Key, v) + // TODO: enable GetState to get the preimages - + // GetState calls GetCommittedState which calls PrefetchStorage to get the preimages + // statedb.GetState(addr, keys[i]) + } + + return obtainTwoProofsAndConvertToWitness(trieModifications, statedb, 0) +} + +func obtainAccountProofAndConvertToWitness(i int, tMod TrieModification, tModsLen int, statedb *state.StateDB, specialTest byte) []Node { + statedb.IntermediateRoot(false) + + addr := tMod.Address + addrh := crypto.Keccak256(addr.Bytes()) + accountAddr := trie.KeybytesToHex(addrh) + + // This needs to called before oracle.PrefetchAccount, otherwise oracle.PrefetchAccount + // will cache the proof and won't return it. + // Calling oracle.PrefetchAccount after statedb.SetStateObjectIfExists is needed only + // for cases when statedb.loadRemoteAccountsIntoStateObjects = false. + statedb.SetStateObjectIfExists(tMod.Address) + + oracle.PrefetchAccount(statedb.Db.BlockNumber, tMod.Address, nil) + accountProof, aNeighbourNode1, aExtNibbles1, isLastLeaf1, err := statedb.GetProof(addr) + check(err) + + var nodes []Node + + sRoot := statedb.GetTrie().Hash() + + if tMod.Type == NonceChanged { + statedb.SetNonce(addr, tMod.Nonce) + } else if tMod.Type == BalanceChanged { + statedb.SetBalance(addr, tMod.Balance) + } else if tMod.Type == CodeHashChanged { + statedb.SetCode(addr, tMod.CodeHash) + } else if tMod.Type == AccountCreate { + statedb.CreateAccount(tMod.Address) + } else if tMod.Type == AccountDestructed { + statedb.DeleteAccount(tMod.Address) + } + // No statedb change in case of AccountDoesNotExist. + + statedb.IntermediateRoot(false) + + cRoot := statedb.GetTrie().Hash() + + accountProof1, aNeighbourNode2, aExtNibbles2, isLastLeaf2, err := statedb.GetProof(addr) + check(err) + + if tMod.Type == AccountDoesNotExist && len(accountProof) == 0 { + // If there is only one account in the state trie and we want to prove for some + // other account that it doesn't exist. + // We get the root node (the only account) and put it as the only element of the proof, + // it will act as a "wrong" leaf. + account, err := statedb.GetTrieRootElement() + check(err) + accountProof = make([][]byte, 1) + accountProof[0] = account + accountProof1 = make([][]byte, 1) + accountProof1[0] = account + } + + addrh, accountAddr, accountProof, accountProof1, sRoot, cRoot = modifyAccountProofSpecialTests(addrh, accountAddr, sRoot, cRoot, accountProof, accountProof1, aNeighbourNode2, specialTest) + aNode := aNeighbourNode2 + isShorterProofLastLeaf := isLastLeaf1 + if len(accountProof) > len(accountProof1) { + // delete operation + aNode = aNeighbourNode1 + isShorterProofLastLeaf = isLastLeaf2 + } + + proofType := "NonceChanged" + if tMod.Type == BalanceChanged { + proofType = "BalanceChanged" + } else if tMod.Type == AccountDestructed { + proofType = "AccountDestructed" + } else if tMod.Type == AccountDoesNotExist { + proofType = "AccountDoesNotExist" + } else if tMod.Type == CodeHashChanged { + proofType = "CodeHashExists" // TODO: change when it changes in the circuit + } + + nodes = append(nodes, GetStartNode(proofType, sRoot, cRoot, specialTest)) + + nodesAccount := + convertProofToWitness(statedb, addr, addrh, accountProof, accountProof1, aExtNibbles1, aExtNibbles2, tMod.Key, accountAddr, aNode, true, tMod.Type == AccountDoesNotExist, false, isShorterProofLastLeaf) + nodes = append(nodes, nodesAccount...) + nodes = append(nodes, GetEndNode()) + + return nodes +} + +// obtainTwoProofsAndConvertToWitness obtains the GetProof proof before and after the modification for each +// of the modification. It then converts the two proofs into an MPT circuit witness. Witness is thus +// prepared for each of the modifications and the witnesses are chained together - the final root of +// the previous witness is the same as the start root of the current witness. +func obtainTwoProofsAndConvertToWitness(trieModifications []TrieModification, statedb *state.StateDB, specialTest byte) []Node { + statedb.IntermediateRoot(false) + var nodes []Node + + for i := 0; i < len(trieModifications); i++ { + tMod := trieModifications[i] + if tMod.Type == StorageChanged || tMod.Type == StorageDoesNotExist { + kh := crypto.Keccak256(tMod.Key.Bytes()) + if oracle.PreventHashingInSecureTrie { + kh = tMod.Key.Bytes() + } + keyHashed := trie.KeybytesToHex(kh) + + addr := tMod.Address + addrh := crypto.Keccak256(addr.Bytes()) + accountAddr := trie.KeybytesToHex(addrh) + + oracle.PrefetchAccount(statedb.Db.BlockNumber, tMod.Address, nil) + // oracle.PrefetchStorage(statedb.Db.BlockNumber, addr, tMod.Key, nil) + + if specialTest == 1 { + statedb.CreateAccount(addr) + } + + accountProof, aNeighbourNode1, aExtNibbles1, aIsLastLeaf1, err := statedb.GetProof(addr) + check(err) + storageProof, neighbourNode1, extNibbles1, isLastLeaf1, err := statedb.GetStorageProof(addr, tMod.Key) + check(err) + + sRoot := statedb.GetTrie().Hash() + + if tMod.Type == StorageChanged { + statedb.SetState(addr, tMod.Key, tMod.Value) + statedb.IntermediateRoot(false) + } + + cRoot := statedb.GetTrie().Hash() + + proofType := "StorageChanged" + if tMod.Type == StorageDoesNotExist { + proofType = "StorageDoesNotExist" + } + + accountProof1, aNeighbourNode2, aExtNibbles2, aIsLastLeaf2, err := statedb.GetProof(addr) + check(err) + + storageProof1, neighbourNode2, extNibbles2, isLastLeaf2, err := statedb.GetStorageProof(addr, tMod.Key) + check(err) + + aNode := aNeighbourNode2 + aIsLastLeaf := aIsLastLeaf1 + if len(accountProof) > len(accountProof1) { + // delete operation + aNode = aNeighbourNode1 + aIsLastLeaf = aIsLastLeaf2 + } + + node := neighbourNode2 + isLastLeaf := isLastLeaf1 + if len(storageProof) > len(storageProof1) { + // delete operation + node = neighbourNode1 + isLastLeaf = isLastLeaf2 + } + + if specialTest == 1 { + if len(accountProof1) != 2 { + panic("account should be in the second level (one branch above it)") + } + accountProof, accountProof1, sRoot, cRoot = modifyAccountSpecialEmptyTrie(addrh, accountProof1[len(accountProof1)-1]) + } + + // Needs to be after `specialTest == 1` preparation: + nodes = append(nodes, GetStartNode(proofType, sRoot, cRoot, specialTest)) + + // In convertProofToWitness, we can't use account address in its original form (non-hashed), because + // of the "special" test for which we manually manipulate the "hashed" address and we don't have a preimage. + // TODO: addr is used for calling GetProof for modified extension node only, might be done in a different way + nodesAccount := + convertProofToWitness(statedb, addr, addrh, accountProof, accountProof1, aExtNibbles1, aExtNibbles2, tMod.Key, accountAddr, aNode, true, tMod.Type == AccountDoesNotExist, false, aIsLastLeaf) + nodes = append(nodes, nodesAccount...) + nodesStorage := + convertProofToWitness(statedb, addr, addrh, storageProof, storageProof1, extNibbles1, extNibbles2, tMod.Key, keyHashed, node, false, false, tMod.Type == StorageDoesNotExist, isLastLeaf) + nodes = append(nodes, nodesStorage...) + nodes = append(nodes, GetEndNode()) + } else { + nodes = obtainAccountProofAndConvertToWitness(i, tMod, len(trieModifications), statedb, specialTest) + } + } + + return nodes +} + +// prepareWitness obtains the GetProof proof before and after the modification for each +// of the modification. It then converts the two proofs into an MPT circuit witness for each of +// the modifications and stores it into a file. +func prepareWitness(testName string, trieModifications []TrieModification, statedb *state.StateDB) { + nodes := obtainTwoProofsAndConvertToWitness(trieModifications, statedb, 0) + StoreNodes(testName, nodes) +} + +// prepareWitnessSpecial obtains the GetProof proof before and after the modification for each +// of the modification. It then converts the two proofs into an MPT circuit witness for each of +// the modifications and stores it into a file. It is named special as the flag specialTest +// instructs the function obtainTwoProofsAndConvertToWitness to prepare special trie states, like moving +// the account leaf in the first trie level. +func prepareWitnessSpecial(testName string, trieModifications []TrieModification, statedb *state.StateDB, specialTest byte) { + nodes := obtainTwoProofsAndConvertToWitness(trieModifications, statedb, specialTest) + StoreNodes(testName, nodes) +} + +// updateStateAndPrepareWitness updates the state according to the specified keys and values and then +// prepares a witness for the proof before given modifications and after. +// This function is used when some specific trie state needs to be prepared before the actual modifications +// take place and for which the witness is needed. +func updateStateAndPrepareWitness(testName string, keys, values []common.Hash, addresses []common.Address, + trieModifications []TrieModification) { + blockNum := 13284469 + blockNumberParent := big.NewInt(int64(blockNum)) + blockHeaderParent := oracle.PrefetchBlock(blockNumberParent, true, nil) + database := state.NewDatabase(blockHeaderParent) + statedb, _ := state.New(blockHeaderParent.Root, database, nil) + + statedb.DisableLoadingRemoteAccounts() + + // Set the state needed for the test: + for i := 0; i < len(keys); i++ { + statedb.SetState(addresses[i], keys[i], values[i]) + } + + prepareWitness(testName, trieModifications, statedb) +} + +// convertProofToWitness takes two GetProof proofs (before and after a single modification) and prepares +// a witness for the MPT circuit. Alongside, it prepares the byte streams that need to be hashed +// and inserted into the Keccak lookup table. +func convertProofToWitness(statedb *state.StateDB, addr common.Address, addrh []byte, proof1, proof2, extNibblesS, extNibblesC [][]byte, storage_key common.Hash, key []byte, neighbourNode []byte, + isAccountProof, nonExistingAccountProof, nonExistingStorageProof, isShorterProofLastLeaf bool) []Node { + rows := make([][]byte, 0) + toBeHashed := make([][]byte, 0) + + minLen := len(proof1) + if len(proof2) < minLen { + minLen = len(proof2) + } + + keyIndex := 0 + len1 := len(proof1) + len2 := len(proof2) + + // When a value in the trie is updated, both proofs are of the same length. + // Otherwise, when a value is added (not updated) and there is no node which needs to be changed + // into a branch, one proof has a leaf and one does not have it. + // The third option is when a value is added and the existing leaf is turned into a branch, + // in this case we have an additional branch in C proof (when deleting a value causes + // that a branch with two leaves turns into a leaf, we have an additional branch in S proof). + + additionalBranch := false + if len1 < len2 && len1 > 0 { // len = 0 when trie trie is empty + // Check if the last proof element in the shorter proof is a leaf - + // if it is, then there is an additional branch. + additionalBranch = !isBranch(proof1[len1-1]) + } else if len2 < len1 && len2 > 0 { + additionalBranch = !isBranch(proof2[len2-1]) + } + + upTo := minLen + if (len1 != len2) && additionalBranch { + upTo = minLen - 1 + } + + var isExtension bool + extensionNodeInd := 0 + + var extListRlpBytes []byte + var extValues [][]byte + for i := 0; i < 4; i++ { + extValues = append(extValues, make([]byte, valueLen)) + } + + var nodes []Node + + branchC16 := byte(0) + branchC1 := byte(1) + for i := 0; i < upTo; i++ { + if !isBranch(proof1[i]) { + if i != len1-1 { // extension node + var numberOfNibbles byte + isExtension = true + numberOfNibbles, extListRlpBytes, extValues = prepareExtensions(extNibblesS, extensionNodeInd, proof1[i], proof2[i]) + + keyIndex += int(numberOfNibbles) + extensionNodeInd++ + continue + } + + l := len(proof1) + var node Node + if isAccountProof { + node = prepareAccountLeafNode(addr, addrh, proof1[l-1], proof2[l-1], nil, key, false, false, false) + } else { + node = prepareStorageLeafNode(proof1[l-1], proof2[l-1], nil, storage_key, key, nonExistingStorageProof, false, false, false, false) + } + + nodes = append(nodes, node) + } else { + switchC16 := true // If not extension node, switchC16 = true. + if isExtension { + keyLen := getExtensionNodeKeyLen(proof1[i-1]) + if keyLen == 1 { + switchC16 = false + } else { + if proof1[i-1][2] != 0 { // If even, switch16 = true. + switchC16 = false + } + } + } + if switchC16 { + if branchC16 == 1 { + branchC16 = 0 + branchC1 = 1 + } else { + branchC16 = 1 + branchC1 = 0 + } + } + + var extNode1 []byte = nil + var extNode2 []byte = nil + if isExtension { + extNode1 = proof1[i-1] + extNode2 = proof2[i-1] + } + + bNode := prepareBranchNode(proof1[i], proof2[i], extNode1, extNode2, extListRlpBytes, extValues, + key[keyIndex], key[keyIndex], branchC16, branchC1, false, false, isExtension, false, false) + nodes = append(nodes, bNode) + + keyIndex += 1 + + isExtension = false + } + } + + if len1 != len2 { + if additionalBranch { + leafRow0 := proof1[len1-1] // To compute the drifted position. + if len1 > len2 { + leafRow0 = proof2[len2-1] + } + + isModifiedExtNode, _, numberOfNibbles, branchC16, bNode := addBranchAndPlaceholder(proof1, proof2, extNibblesS, extNibblesC, + leafRow0, key, neighbourNode, + keyIndex, extensionNodeInd, additionalBranch, + isAccountProof, nonExistingAccountProof, isShorterProofLastLeaf, branchC16, branchC1, &toBeHashed) + + nodes = append(nodes, bNode) + + if isAccountProof { + // Add account leaf after branch placeholder: + var node Node + if !isModifiedExtNode { + node = prepareAccountLeafNode(addr, addrh, proof1[len1-1], proof2[len2-1], neighbourNode, key, false, false, false) + } else { + isSModExtension := false + isCModExtension := false + if len2 > len1 { + isSModExtension = true + } else { + isCModExtension = true + } + node = prepareLeafAndPlaceholderNode(addr, addrh, proof1, proof2, storage_key, key, nonExistingAccountProof, isAccountProof, isSModExtension, isCModExtension) + } + nodes = append(nodes, node) + } else { + // Add storage leaf after branch placeholder + var node Node + if !isModifiedExtNode { + node = prepareStorageLeafNode(proof1[len1-1], proof2[len2-1], neighbourNode, storage_key, key, nonExistingStorageProof, false, false, false, false) + } else { + isSModExtension := false + isCModExtension := false + if len2 > len1 { + isSModExtension = true + } else { + isCModExtension = true + } + node = prepareLeafAndPlaceholderNode(addr, addrh, proof1, proof2, storage_key, key, nonExistingAccountProof, isAccountProof, isSModExtension, isCModExtension) + } + nodes = append(nodes, node) + } + + // When a proof element is a modified extension node (new extension node appears at the position + // of the existing extension node), additional rows are added (extension node before and after + // modification). + if isModifiedExtNode { + // TODO + modExtensionNode := prepareModExtensionNode(statedb, addr, &rows, proof1, proof2, extNibblesS, extNibblesC, key, neighbourNode, + keyIndex, extensionNodeInd, numberOfNibbles, additionalBranch, + isAccountProof, nonExistingAccountProof, isShorterProofLastLeaf, branchC16, branchC1, &toBeHashed) + // node = append(nodes, modExtensionNode) + fmt.Println(modExtensionNode) + } + } else { + node := prepareLeafAndPlaceholderNode(addr, addrh, proof1, proof2, storage_key, key, nonExistingAccountProof, isAccountProof, false, false) + nodes = append(nodes, node) + } + } else if isBranch(proof2[len(proof2)-1]) { + // Account proof has drifted leaf as the last row, storage proof has non-existing-storage row + // as the last row. + // When non existing proof and only the branches are returned, we add a placeholder leaf. + // This is to enable the lookup (in account leaf row), most constraints are disabled for these rows. + + if isAccountProof { + node := prepareAccountLeafPlaceholderNode(addr, addrh, key, keyIndex) + nodes = append(nodes, node) + } else { + node := prepareStorageLeafPlaceholderNode(storage_key, key, keyIndex) + nodes = append(nodes, node) + } + } + + return nodes +} diff --git a/mpt-witness-generator/witness/test_tools.go b/mpt-witness-generator/witness/test_tools.go new file mode 100644 index 0000000000..dbb65041cc --- /dev/null +++ b/mpt-witness-generator/witness/test_tools.go @@ -0,0 +1,225 @@ +package witness + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/privacy-scaling-explorations/mpt-witness-generator/trie" +) + +// moveAccountFromSecondToFirstLevel moves an account from the second level to the first level (key stored in a leaf +// gets longer). The function is used to enable tests with an account being in the first trie level. +func moveAccountFromSecondToFirstLevel(firstNibble byte, account []byte) []byte { + newAccount := make([]byte, len(account)+1) + newAccount[0] = account[0] + newAccount[1] = account[1] + 1 + newAccount[2] = 161 + newAccount[3] = 32 + // The following code relies on the account being in the second level (and not being + // after an extension node). + newAccount[4] = firstNibble*16 + account[3] - 48 + for i := 0; i < 31; i++ { + newAccount[5+i] = account[4+i] + } + for i := 0; i < int(account[1]-33); i++ { + newAccount[4+32+i] = account[35+i] + } + + return newAccount +} + +// moveAccountFromThirdToSecondLevel moves the account from the third level to the second level (key stored in a leaf +// gets longer). +func moveAccountFromThirdToSecondLevel(addrh []byte, account []byte) []byte { + // account = [248, 105, 160, 32, 77, 78,...] + newAccount := make([]byte, len(account)) + copy(newAccount, account) + // The following code relies on the account being in the third level (and not being + // after an extension node). + posInBranch := addrh[0] % 16 + newAccount[3] = 48 + posInBranch + + return newAccount +} + +// modifyAccountSpecialEmptyTrie prepares an account leaf in the first trie level for C proof and +// a placeholder leaf in S proof. +func modifyAccountSpecialEmptyTrie(addrh []byte, accountProof1Last []byte) ([][]byte, [][]byte, common.Hash, common.Hash) { + firstNibble := addrh[0] / 16 + newAccount := moveAccountFromSecondToFirstLevel(firstNibble, accountProof1Last) + + newAccount1 := make([]byte, len(accountProof1Last)+1) + copy(newAccount1, newAccount) + + accountProof := make([][]byte, 1) + accountProof[0] = newAccount + accountProof1 := make([][]byte, 1) + accountProof1[0] = newAccount1 + + // leaf in S proof is a placeholder, thus newAccount needs to have an empty trie hash + // for the root: + emptyTrieHash := []byte{86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33} + rootStart := len(newAccount) - 64 - 1 + + for i := 0; i < 32; i++ { + newAccount[rootStart+i] = emptyTrieHash[i] + } + + hasher := trie.NewHasher(false) + sRoot := common.BytesToHash(hasher.HashData(newAccount)) + cRoot := common.BytesToHash(hasher.HashData(newAccount1)) + + return accountProof, accountProof1, sRoot, cRoot +} + +// modifyAccountProofSpecialTests modifies S and C account proofs to serve for special tests - like moving +// the account leaf in the first trie level. +func modifyAccountProofSpecialTests(addrh, accountAddr []byte, sRoot, cRoot common.Hash, accountProof, accountProof1 [][]byte, aNeighbourNode2 []byte, specialTest byte) ([]byte, []byte, [][]byte, [][]byte, common.Hash, common.Hash) { + if specialTest == 1 { + account := accountProof1[len(accountProof1)-1] + if len(accountProof1) != 2 { + panic("account should be in the second level (one branch above it)") + } + firstNibble := addrh[0] / 16 + newAccount := moveAccountFromSecondToFirstLevel(firstNibble, account) + + newAccount1 := make([]byte, len(account)+1) + copy(newAccount1, newAccount) + + // change nonce: + newAccount1[3+33+4] = 1 + + accountProof = make([][]byte, 1) + accountProof[0] = newAccount + accountProof1 = make([][]byte, 1) + accountProof1[0] = newAccount1 + + hasher := trie.NewHasher(false) + sRoot = common.BytesToHash(hasher.HashData(newAccount)) + cRoot = common.BytesToHash(hasher.HashData(newAccount1)) + } else if specialTest == 3 { + if len(accountProof) != 2 && len(accountProof1) != 3 { + panic("account should be in the second level (one branch above it)") + } + accountS := accountProof[len(accountProof)-1] + account1Pos := addrh[0] / 16 + // driftedPos := ((addrh[0] / 16) + 1) % 16 // something else than the first nibble of addrh + driftedPos := byte(0) // TODO: remove hardcoding + // addresses of both account now differ only in the first nibble (this is not needed, + // it is just in this construction) + newAccount := moveAccountFromSecondToFirstLevel(driftedPos, accountS) + + hasher := trie.NewHasher(false) + + firstNibble := accountProof[1][3] - 48 + // [248, 81, 128, 128, ... + branch := accountProof1[len(accountProof1)-2] + branch1 := make([]byte, len(branch)) + for i := 0; i < len(branch1); i++ { + branch1[i] = 128 + } + branch1[0] = branch[0] + branch1[1] = branch[1] + + // drifted leaf (aNeighbourNode2) has one nibble more after moved one level up, we need to recompute the hash + fmt.Println(driftedPos) + aNeighbourNode2[3] = 48 + firstNibble + driftedLeafHash := common.BytesToHash(hasher.HashData(aNeighbourNode2)) + // branch is now one level higher, both leaves are at different positions now + // (one nibble to the left) + + branch1[2+int(driftedPos)] = 160 + for i := 0; i < 32; i++ { + branch1[2+int(driftedPos)+1+i] = driftedLeafHash[i] + } + + accountC3 := accountProof1[len(accountProof1)-1] + newAccountC2 := moveAccountFromThirdToSecondLevel(addrh, accountC3) + + driftedLeafHash2 := common.BytesToHash(hasher.HashData(newAccountC2)) + branch1[2+32+int(account1Pos)] = 160 + for i := 0; i < 32; i++ { + branch1[2+32+int(account1Pos)+1+i] = driftedLeafHash2[i] + } + + // Let us have placeholder branch in the first level + accountProof = make([][]byte, 1) + accountProof[0] = newAccount + accountProof1 = make([][]byte, 2) + accountProof1[0] = branch1 + accountProof1[1] = newAccountC2 + + sRoot = common.BytesToHash(hasher.HashData(accountProof[0])) + cRoot = common.BytesToHash(hasher.HashData(accountProof1[0])) + } else if specialTest == 4 { + // This test simulates having only one account in the state trie: + account := []byte{248, 106, 161, 32, 252, 237, 52, 8, 133, 130, 180, 167, 143, 97, 28, 115, 102, 25, 94, 62, 148, 249, 8, 6, 55, 244, 16, 75, 187, 208, 208, 127, 251, 120, 61, 73, 184, 70, 248, 68, 128, 128, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112} + + // Note: the requested address (for which the account doesn't exist) should have + // a different address as the only one in the trie. + + accountProof = make([][]byte, 1) + accountProof[0] = account + accountProof1 = make([][]byte, 1) + accountProof1[0] = account + + hasher := trie.NewHasher(false) + sRoot = common.BytesToHash(hasher.HashData(accountProof[0])) + cRoot = common.BytesToHash(hasher.HashData(accountProof1[0])) + } else if specialTest == 5 { + ext := []byte{226, 24, 160, 194, 200, 39, 82, 205, 97, 69, 91, 92, 98, 218, 180, 101, 42, 171, 150, 75, 251, 147, 154, 59, 215, 26, 164, 201, 90, 199, 185, 190, 205, 167, 64} + branch := []byte{248, 81, 128, 128, 128, 160, 53, 8, 52, 235, 77, 44, 138, 235, 20, 250, 15, 188, 176, 83, 178, 108, 212, 224, 40, 146, 117, 31, 154, 215, 103, 179, 234, 32, 168, 86, 167, 44, 128, 128, 128, 128, 128, 160, 174, 121, 120, 114, 157, 43, 164, 140, 103, 235, 28, 242, 186, 33, 76, 152, 157, 197, 109, 149, 229, 229, 22, 189, 233, 207, 92, 195, 82, 121, 240, 3, 128, 128, 128, 128, 128, 128, 128} + // The original proof returns `ext` and `branch` in 2. and 3. level. We move them to 1. and 2. level. + + fmt.Println(ext) + fmt.Println(branch) + + newAddrBytes := make([]byte, 32) + newAddrNibbles := make([]byte, 65) + newAddrNibbles[64] = accountAddr[16] + for i := 0; i < 63; i++ { + newAddrNibbles[i] = accountAddr[i+1] + } + newAddrNibbles[63] = accountAddr[0] + + for i := 0; i < 32; i++ { + newAddrBytes[i] = newAddrNibbles[2*i]*16 + newAddrNibbles[2*i+1] + } + + // We need to fix leaf key (adding last nibble): + // Original leaf: + // leaf := []byte{248, 104, 159, 59, 114, 3, 66, 104, 61, 61, 61, 175, 101, 56, 194, 213, 150, 208, 62, 118, 28, 175, 138, 112, 119, 76, 88, 109, 21, 102, 195, 8, 18, 185, 184, 70, 248, 68, 128, 128, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112} + leaf := []byte{248, 105, 160, 32, 59, 114, 3, 66, 104, 61, 61, 61, 175, 101, 56, 194, 213, 150, 208, 62, 118, 28, 175, 138, 112, 119, 76, 88, 109, 21, 102, 195, 8, 18, 185, 184, 70, 248, 68, 128, 128, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112} + for i := 0; i < 31; i++ { + leaf[4+i] = newAddrBytes[i+1] + } + + hasher := trie.NewHasher(false) + // Update leaf hash in branch + newLeafHash := common.BytesToHash(hasher.HashData(leaf)) + branch[2+int(newAddrNibbles[1])] = 160 + for i := 0; i < 32; i++ { + branch[2+int(newAddrNibbles[1])+1+i] = newLeafHash[i] + } + + // Update branch hash in extension node + newBranchHash := common.BytesToHash(hasher.HashData(branch)) + for i := 0; i < 32; i++ { + ext[3+i] = newBranchHash[i] + } + + accountAddr = newAddrNibbles + addrh = newAddrBytes + + accountProof = make([][]byte, 3) + accountProof[0] = ext + accountProof[1] = branch + accountProof[2] = leaf + accountProof1 = accountProof + + sRoot = common.BytesToHash(hasher.HashData(accountProof[0])) + cRoot = common.BytesToHash(hasher.HashData(accountProof1[0])) + } + + return addrh, accountAddr, accountProof, accountProof1, sRoot, cRoot +} diff --git a/mpt-witness-generator/witness/util.go b/mpt-witness-generator/witness/util.go new file mode 100644 index 0000000000..171a223dd7 --- /dev/null +++ b/mpt-witness-generator/witness/util.go @@ -0,0 +1,65 @@ +package witness + +import ( + "encoding/json" + "fmt" + "log" + "os" + "path/filepath" + "strconv" +) + +func check(err error) { + if err != nil { + log.Fatal(err) + } +} + +func MatrixToJson(rows [][]byte) string { + // Had some problems with json.Marshal, so I just prepare json manually. + json := "[" + for i := 0; i < len(rows); i++ { + json += listToJson(rows[i]) + if i != len(rows)-1 { + json += "," + } + } + json += "]" + + return json +} + +func listToJson(row []byte) string { + json := "[" + for j := 0; j < len(row); j++ { + json += strconv.Itoa(int(row[j])) + if j != len(row)-1 { + json += "," + } + } + json += "]" + + return json +} + +func StoreNodes(testName string, nodes []Node) { + name := testName + ".json" + path := "../generated_witnesses/" + name + + // Create the directories if they do not exist yet + err := os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + log.Fatal(err) + } + + f, err := os.Create(path) + check(err) + defer f.Close() + b, err := json.Marshal(nodes) + if err != nil { + fmt.Println(err) + } + + _, err = f.WriteString(string(b)) + check(err) +} diff --git a/mpt-witness-generator/witness_gen_wrapper.go b/mpt-witness-generator/witness_gen_wrapper.go new file mode 100644 index 0000000000..60b6455fe9 --- /dev/null +++ b/mpt-witness-generator/witness_gen_wrapper.go @@ -0,0 +1,50 @@ +package main + +import "C" +import ( + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/privacy-scaling-explorations/mpt-witness-generator/witness" +) + +type Config struct { + NodeUrl string `json:"NodeUrl"` + BlockNum int `json:"BlockNum"` + Addr string `json:"Addr"` + Keys []string `json:"Keys"` + Values []string `json:"Values"` +} + +//export GetWitness +func GetWitness(proofConf *C.char) *C.char { + var config Config + + err := json.Unmarshal([]byte(C.GoString(proofConf)), &config) + fmt.Println(err) + fmt.Println(config) + + trieModifications := []witness.TrieModification{} + + addr := common.HexToAddress(config.Addr) + for i := 0; i < len(config.Keys); i++ { + trieMod := witness.TrieModification{ + Type: witness.StorageChanged, + Key: common.HexToHash(config.Keys[i]), + Value: common.HexToHash(config.Values[i]), + Address: addr, + } + trieModifications = append(trieModifications, trieMod) + } + + proof := witness.GetWitness(config.NodeUrl, config.BlockNum, trieModifications) + b, err := json.Marshal(proof) + if err != nil { + fmt.Println(err) + } + + return C.CString(string(b)) +} + +func main() {}