From fc9dbd359ba8c49a2882c0b558c97171dfcf14e5 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 23 Mar 2021 11:44:18 +0800 Subject: [PATCH 01/10] eth/protocols/snap: fix snap sync --- eth/protocols/snap/sync.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 1cfdef15bd74..122ef167750d 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -1553,6 +1553,12 @@ func (s *Syncer) processAccountResponse(res *accountResponse) { // Ensure that the response doesn't overflow into the subsequent task last := res.task.Last.Big() for i, hash := range res.hashes { + // Mark the range complete if the last is already included. + // Keep iteration to delete the extra states if exists. + if hash.Big().Cmp(last) == 0 { + res.cont = false + continue + } if hash.Big().Cmp(last) > 0 { // Chunk overflown, cut off excess, but also update the boundary nodes for j := i; j < len(res.hashes); j++ { @@ -1761,6 +1767,12 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { // Ensure the response doesn't overflow into the subsequent task last := res.subTask.Last.Big() for k, hash := range res.hashes[i] { + // Mark the range complete if the last is already included. + // Keep iteration to delete the extra states if exists. + if hash.Big().Cmp(last) == 0 { + res.cont = false + continue + } if hash.Big().Cmp(last) > 0 { // Chunk overflown, cut off excess, but also update the boundary for l := k; l < len(res.hashes[i]); l++ { @@ -1793,6 +1805,10 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { skipped++ continue } + if _, err := res.overflow.Get(it.Key()); err == nil { + skipped++ + continue + } } // Node is not a boundary, persist to disk batch.Put(it.Key(), it.Value()) From f50b88ac2c9de8d890225d6558387393fbb9ec42 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 23 Mar 2021 16:05:08 +0800 Subject: [PATCH 02/10] eth/protocols/snap: fix tests --- eth/protocols/snap/sync_test.go | 309 ++++++++++++++++++++++++-------- 1 file changed, 232 insertions(+), 77 deletions(-) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 0b048786e8a1..abdaf2681d52 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -111,10 +111,12 @@ func BenchmarkHashing(b *testing.B) { }) } -type storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error -type accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error -type trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error -type codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error +type ( + accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error + storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error + trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error + codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error +) type testPeer struct { id string @@ -147,7 +149,6 @@ func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) //peer.logger.SetHandler(stderrHandler) return peer - } func (t *testPeer) ID() string { return t.id } @@ -155,7 +156,7 @@ func (t *testPeer) Log() log.Logger { return t.logger } func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) - go t.accountRequestHandler(t, id, root, origin, bytes) + go t.accountRequestHandler(t, id, root, origin, limit, bytes) return nil } @@ -211,8 +212,8 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, } // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests -func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, cap uint64) error { - keys, vals, proofs := createAccountRequestResponse(t, root, origin, cap) +func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap) if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil { t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) @@ -223,8 +224,11 @@ func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, orig return nil } -func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { +func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { var size uint64 + if limit == (common.Hash{}) { + limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } for _, entry := range t.accountValues { if size > cap { break @@ -234,20 +238,22 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H vals = append(vals, entry.v) size += uint64(32 + len(entry.v)) } + // If we've exceeded the request threshold, abort + if limit != (common.Hash{}) && bytes.Compare(entry.k, limit[:]) >= 0 { + break + } } // Unless we send the entire trie, we need to supply proofs - // Actually, we need to supply proofs either way! This seems tob be an implementation + // Actually, we need to supply proofs either way! This seems to be an implementation // quirk in go-ethereum proof := light.NewNodeSet() if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { - t.logger.Error("Could not prove inexistence of origin", "origin", origin, - "error", err) + t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err) } if len(keys) > 0 { lastK := (keys[len(keys)-1])[:] if err := t.accountTrie.Prove(lastK, 0, proof); err != nil { - t.logger.Error("Could not prove last item", - "error", err) + t.logger.Error("Could not prove last item", "error", err) } } for _, blob := range proof.NodeList() { @@ -280,48 +286,40 @@ func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max return nil } -func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { - var ( - size uint64 - limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - ) - if len(bLimit) > 0 { - limit = common.BytesToHash(bLimit) - } - var origin common.Hash - if len(bOrigin) > 0 { - origin = common.BytesToHash(bOrigin) - } - - var limitExceeded bool - var incomplete bool +func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size uint64 for _, account := range accounts { - - var keys []common.Hash - var vals [][]byte + // The first account might start from a different origin and end sooner + var originHash common.Hash + if len(origin) > 0 { + originHash = common.BytesToHash(origin) + } + var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + if len(limit) > 0 { + limitHash = common.BytesToHash(limit) + } + var ( + keys []common.Hash + vals [][]byte + ) for _, entry := range t.storageValues[account] { - if limitExceeded { - incomplete = true - break - } - if bytes.Compare(entry.k, origin[:]) < 0 { - incomplete = true + if bytes.Compare(entry.k, originHash[:]) < 0 { continue } keys = append(keys, common.BytesToHash(entry.k)) vals = append(vals, entry.v) size += uint64(32 + len(entry.v)) - if bytes.Compare(entry.k, limit[:]) >= 0 { - limitExceeded = true - } - if size > max { - limitExceeded = true + if bytes.Compare(entry.k, limitHash[:]) >= 0 || size > max { + break } } hashes = append(hashes, keys) slots = append(slots, vals) - if incomplete { + // Generate the Merkle proofs for the first and last storage slot, but + // only if the response was capped. If the entire storage trie included + // in the response, no need for any proofs. + if originHash != (common.Hash{}) || size >= max { // If we're aborting, we need to prove the first and last item // This terminates the response (and thus the loop) proof := light.NewNodeSet() @@ -330,9 +328,8 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm // Here's a potential gotcha: when constructing the proof, we cannot // use the 'origin' slice directly, but must use the full 32-byte // hash form. - if err := stTrie.Prove(origin[:], 0, proof); err != nil { - t.logger.Error("Could not prove inexistence of origin", "origin", origin, - "error", err) + if err := stTrie.Prove(originHash[:], 0, proof); err != nil { + t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err) } if len(keys) > 0 { lastK := (keys[len(keys)-1])[:] @@ -350,21 +347,17 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm } // emptyRequestAccountRangeFn is a rejects AccountRangeRequests -func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - var proofs [][]byte - var keys []common.Hash - var vals [][]byte - t.remote.OnAccounts(t, requestId, keys, vals, proofs) +func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + t.remote.OnAccounts(t, requestId, nil, nil, nil) return nil } -func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { +func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { return nil } func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { - var nodes [][]byte - t.remote.OnTrieNodes(t, requestId, nodes) + t.remote.OnTrieNodes(t, requestId, nil) return nil } @@ -373,10 +366,7 @@ func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common. } func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { - var hashes [][]common.Hash - var slots [][][]byte - var proofs [][]byte - t.remote.OnStorage(t, requestId, hashes, slots, proofs) + t.remote.OnStorage(t, requestId, nil, nil, nil) return nil } @@ -422,16 +412,16 @@ func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Ha return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500) } -func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - return defaultAccountRequestHandler(t, requestId, root, origin, 500) +func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500) } //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500) //} -func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { - hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, cap) +func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap) if len(proofs) > 0 { proofs = proofs[1:] } @@ -479,7 +469,7 @@ func TestSyncBloatedProof(t *testing.T) { source.accountTrie = sourceAccountTrie source.accountValues = elems - source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { + source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { var proofs [][]byte var keys []common.Hash var vals [][]byte @@ -604,7 +594,7 @@ func TestSyncWithStorage(t *testing.T) { t.Parallel() cancel := make(chan struct{}) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, cancel) @@ -626,7 +616,7 @@ func TestMultiSyncManyUseless(t *testing.T) { cancel := make(chan struct{}) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, a, b, c bool) *testPeer { source := newTestPeer(name, t, cancel) @@ -668,7 +658,7 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { cancel := make(chan struct{}) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, a, b, c bool) *testPeer { source := newTestPeer(name, t, cancel) @@ -708,7 +698,7 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { cancel := make(chan struct{}) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, a, b, c bool) *testPeer { source := newTestPeer(name, t, cancel) @@ -754,6 +744,31 @@ func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { return testDone } +// TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the +// account trie has a few boundary elements. +func TestSyncBoundaryAccountTrie(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + sourceAccountTrie, elems := makeBoundaryAccountTrie(3000) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + return source + } + syncer := setupSyncer( + mkSource("peer-a"), + mkSource("peer-b"), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) +} + // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is // consistently returning very small results func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { @@ -887,6 +902,33 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { } } +// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the +// storage trie has a few boundary elements. +func TestSyncBoundaryStorageTrie(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + return source + } + syncer := setupSyncer( + mkSource("peer-a"), + mkSource("peer-b"), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) +} + // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is // consistently returning very small results func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { @@ -894,7 +936,7 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { cancel := make(chan struct{}) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false) mkSource := func(name string, slow bool) *testPeer { source := newTestPeer(name, t, cancel) @@ -927,7 +969,7 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { cancel := make(chan struct{}) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { source := newTestPeer(name, t, cancel) @@ -957,7 +999,7 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { cancel := make(chan struct{}) - sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { source := newTestPeer(name, t, cancel) @@ -1050,15 +1092,68 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { entries = append(entries, elem) } sort.Sort(entries) - // Push to disk layer accTrie.Commit(nil) return accTrie, entries } -// makeAccountTrieWithStorage spits out a trie, along with the leafs -func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, - map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { +// makeBoundaryAccountTrie constructs an account trie. Instead of filling +// accounts normally, this function will fill a few accounts which have +// boundary hash. +func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { + var ( + entries entrySlice + boundaries []common.Hash + db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + trie, _ = trie.New(common.Hash{}, db) + ) + // Initialize boundaries + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(accountConcurrency), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + boundaries = append(boundaries, last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } + // Fill boundary accounts + for i := 0; i < len(boundaries); i++ { + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: uint64(0), + Balance: big.NewInt(int64(i)), + Root: emptyRoot, + CodeHash: getACodeHash(uint64(i)), + }) + elem := &kv{boundaries[i].Bytes(), value, false} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + // Fill other accounts if required + for i := uint64(1); i <= uint64(n); i++ { + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: i, + Balance: big.NewInt(int64(i)), + Root: emptyRoot, + CodeHash: getACodeHash(i), + }) + elem := &kv{key32(i), value, false} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + sort.Sort(entries) + trie.Commit(nil) + return trie, entries +} + +// makeAccountTrieWithStorage spits out a trie, along with the leafs +func makeAccountTrieWithStorage(accounts, slots int, code bool, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { var ( db = trie.NewDatabase(rawdb.NewMemoryDatabase()) accTrie, _ = trie.New(common.Hash{}, db) @@ -1066,10 +1161,18 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, ent storageTries = make(map[common.Hash]*trie.Trie) storageEntries = make(map[common.Hash]entrySlice) ) - // Make a storage trie which we reuse for the whole lot - stTrie, stEntries := makeStorageTrie(slots, db) + var ( + stTrie *trie.Trie + stEntries entrySlice + ) + if boundary { + stTrie, stEntries = makeBoundaryStorageTrie(slots, db) + } else { + stTrie, stEntries = makeStorageTrie(slots, db) + } stRoot := stTrie.Hash() + // Create n accounts in the trie for i := uint64(1); i <= uint64(accounts); i++ { key := key32(i) @@ -1114,5 +1217,57 @@ func makeStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { entries = append(entries, elem) } sort.Sort(entries) + trie.Commit(nil) + return trie, entries +} + +// makeBoundaryStorageTrie constructs a storage trie. Instead of filling +// storage slots normally, this function will fill a few slots which have +// boundary hash. +func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { + var ( + entries entrySlice + boundaries []common.Hash + trie, _ = trie.New(common.Hash{}, db) + ) + // Initialize boundaries + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(accountConcurrency), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + boundaries = append(boundaries, last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } + // Fill boundary slots + for i := 0; i < len(boundaries); i++ { + key := boundaries[i] + val := []byte{0xde, 0xad, 0xbe, 0xef} + + elem := &kv{key[:], val, false} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + // Fill other slots if required + for i := uint64(1); i <= uint64(n); i++ { + slotKey := key32(i) + key := crypto.Keccak256Hash(slotKey[:]) + + slotValue := key32(i) + rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) + + elem := &kv{key[:], rlpSlotValue, false} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + sort.Sort(entries) + trie.Commit(nil) return trie, entries } From 60d68ac816354bfd3f0ff9d2df90542b878d0a95 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 23 Mar 2021 16:09:27 +0800 Subject: [PATCH 03/10] eth: fix tiny --- eth/protocols/snap/sync_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index abdaf2681d52..4d5aa05918d1 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -239,7 +239,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H size += uint64(32 + len(entry.v)) } // If we've exceeded the request threshold, abort - if limit != (common.Hash{}) && bytes.Compare(entry.k, limit[:]) >= 0 { + if bytes.Compare(entry.k, limit[:]) >= 0 { break } } From e849f9dca1d9931c482f4d736ed6fa224c5f3f95 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 23 Mar 2021 20:35:11 +0800 Subject: [PATCH 04/10] eth: update tests --- eth/protocols/snap/sync_test.go | 48 ++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 4d5aa05918d1..2892b7f8821a 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -309,7 +310,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm keys = append(keys, common.BytesToHash(entry.k)) vals = append(vals, entry.v) size += uint64(32 + len(entry.v)) - if bytes.Compare(entry.k, limitHash[:]) >= 0 || size > max { + if bytes.Compare(entry.k, limitHash[:]) >= 0 || size >= max { break } } @@ -399,10 +400,10 @@ func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max for _, h := range hashes[:1] { bytecodes = append(bytecodes, getCode(h)) } + // Missing bytecode can be retrieved again, no error expected if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { - t.logger.Error("remote error on delivery", "error", err) - // Mimic the real-life handler, which drops a peer on errors - t.remote.Unregister(t.id) + t.test.Errorf("Remote side rejected our delivery: %v", err) + close(t.cancelCh) } return nil } @@ -1271,3 +1272,42 @@ func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) trie.Commit(nil) return trie, entries } + +func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { + triedb := trie.NewDatabase(db) + accTrie, err := trie.New(root, triedb) + if err != nil { + t.Fatal(err) + } + accounts, slots := 0, 0 + accIt := trie.NewIterator(accTrie.NodeIterator(nil)) + for accIt.Next() { + var acc struct { + Nonce uint64 + Balance *big.Int + Root common.Hash + CodeHash []byte + } + if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { + log.Crit("Invalid account encountered during snapshot creation", "err", err) + } + accounts++ + if acc.Root != emptyRoot { + storeTrie, err := trie.NewSecure(acc.Root, triedb) + if err != nil { + t.Fatal(err) + } + storeIt := trie.NewIterator(storeTrie.NodeIterator(nil)) + for storeIt.Next() { + slots++ + } + if err := storeIt.Err; err != nil { + t.Fatal(err) + } + } + } + if err := accIt.Err; err != nil { + t.Fatal(err) + } + t.Logf("accounts: %d, slots: %d", accounts, slots) +} From 9abfdcc2eee944b4a535d90f5243749ee578c785 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 23 Mar 2021 21:19:25 +0800 Subject: [PATCH 05/10] eth: update tests --- eth/protocols/snap/sync_test.go | 378 ++++++++++++++++++++++---------- 1 file changed, 262 insertions(+), 116 deletions(-) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 2892b7f8821a..18ad2bd6aebc 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -23,6 +23,7 @@ import ( "fmt" "math/big" "sort" + "sync" "testing" "time" @@ -133,10 +134,10 @@ type testPeer struct { storageRequestHandler storageHandlerFunc trieRequestHandler trieHandlerFunc codeRequestHandler codeHandlerFunc - cancelCh chan struct{} + term func() } -func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { +func newTestPeer(id string, t *testing.T, term func()) *testPeer { peer := &testPeer{ id: id, test: t, @@ -145,7 +146,7 @@ func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { trieRequestHandler: defaultTrieRequestHandler, storageRequestHandler: defaultStorageRequestHandler, codeRequestHandler: defaultCodeRequestHandler, - cancelCh: cancelCh, + term: term, } //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) //peer.logger.SetHandler(stderrHandler) @@ -216,10 +217,8 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap) if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil { - t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) - t.remote.Unregister(t.id) - close(t.cancelCh) + t.term() return err } return nil @@ -267,9 +266,8 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error { hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max) if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { - t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) - close(t.cancelCh) + t.term() } return nil } @@ -277,12 +275,11 @@ func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Has func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { var bytecodes [][]byte for _, h := range hashes { - bytecodes = append(bytecodes, getCode(h)) + bytecodes = append(bytecodes, getCodeByHash(h)) } if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { - t.logger.Error("remote error on delivery", "error", err) t.test.Errorf("Remote side rejected our delivery: %v", err) - close(t.cancelCh) + t.term() } return nil } @@ -388,7 +385,7 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max bytecodes = append(bytecodes, h[:]) } if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { - t.logger.Error("remote error on delivery", "error", err) + t.logger.Info("remote error on delivery (as expected)", "error", err) // Mimic the real-life handler, which drops a peer on errors t.remote.Unregister(t.id) } @@ -398,12 +395,12 @@ func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { var bytecodes [][]byte for _, h := range hashes[:1] { - bytecodes = append(bytecodes, getCode(h)) + bytecodes = append(bytecodes, getCodeByHash(h)) } // Missing bytecode can be retrieved again, no error expected if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { t.test.Errorf("Remote side rejected our delivery: %v", err) - close(t.cancelCh) + t.term() } return nil } @@ -464,23 +461,36 @@ func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Has func TestSyncBloatedProof(t *testing.T) { t.Parallel() + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(100) - cancel := make(chan struct{}) - source := newTestPeer("source", t, cancel) + source := newTestPeer("source", t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { - var proofs [][]byte - var keys []common.Hash - var vals [][]byte - + var ( + proofs [][]byte + keys []common.Hash + vals [][]byte + ) // The values for _, entry := range t.accountValues { - if bytes.Compare(origin[:], entry.k) <= 0 { - keys = append(keys, common.BytesToHash(entry.k)) - vals = append(vals, entry.v) + if bytes.Compare(entry.k, origin[:]) < 0 { + continue + } + if bytes.Compare(entry.k, limit[:]) > 0 { + continue } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) } // The proofs proof := light.NewNodeSet() @@ -502,9 +512,9 @@ func TestSyncBloatedProof(t *testing.T) { proofs = append(proofs, blob) } if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil { - t.logger.Info("remote error on delivery", "error", err) + t.logger.Info("remote error on delivery (as expected)", "error", err) + t.term() // This is actually correct, signal to exit the test successfully - close(t.cancelCh) } return nil } @@ -528,20 +538,28 @@ func setupSyncer(peers ...*testPeer) *Syncer { func TestSync(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(100) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source } - - syncer := setupSyncer(mkSource("sourceA")) + syncer := setupSyncer(mkSource("source")) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a @@ -549,56 +567,79 @@ func TestSync(t *testing.T) { func TestSyncTinyTriePanic(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(1) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source } - - syncer := setupSyncer( - mkSource("nice-a"), - ) - done := checkStall(t, cancel) + syncer := setupSyncer(mkSource("source")) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSync tests a basic sync with multiple peers func TestMultiSync(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(100) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source } - syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB")) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorage tests basic sync using accounts + storage + code func TestSyncWithStorage(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -606,33 +647,43 @@ func TestSyncWithStorage(t *testing.T) { return source } syncer := setupSyncer(mkSource("sourceA")) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all func TestMultiSyncManyUseless(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) - mkSource := func(name string, a, b, c bool) *testPeer { - source := newTestPeer(name, t, cancel) + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries source.storageValues = storageElems - if !a { + if !noAccount { source.accountRequestHandler = emptyRequestAccountRangeFn } - if !b { + if !noStorage { source.storageRequestHandler = emptyStorageRequestHandler } - if !c { + if !noTrieNode { source.trieRequestHandler = emptyTrieRequestHandler } return source @@ -644,9 +695,12 @@ func TestMultiSyncManyUseless(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all @@ -657,24 +711,31 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { defer func(old time.Duration) { requestTimeout = old }(requestTimeout) requestTimeout = time.Millisecond - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) - mkSource := func(name string, a, b, c bool) *testPeer { - source := newTestPeer(name, t, cancel) + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries source.storageValues = storageElems - if !a { + if !noAccount { source.accountRequestHandler = emptyRequestAccountRangeFn } - if !b { + if !noStorage { source.storageRequestHandler = emptyStorageRequestHandler } - if !c { + if !noTrieNode { source.trieRequestHandler = emptyTrieRequestHandler } return source @@ -686,9 +747,12 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all @@ -697,24 +761,31 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { defer func(old time.Duration) { requestTimeout = old }(requestTimeout) requestTimeout = time.Millisecond - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) - mkSource := func(name string, a, b, c bool) *testPeer { - source := newTestPeer(name, t, cancel) + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries source.storageValues = storageElems - if !a { + if !noAccount { source.accountRequestHandler = nonResponsiveRequestAccountRangeFn } - if !b { + if !noStorage { source.storageRequestHandler = nonResponsiveStorageRequestHandler } - if !c { + if !noTrieNode { source.trieRequestHandler = nonResponsiveTrieRequestHandler } return source @@ -726,18 +797,21 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } + close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } -func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { +func checkStall(t *testing.T, term func()) chan struct{} { testDone := make(chan struct{}) go func() { select { case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much t.Log("Sync stalled") - close(cancel) + term() case <-testDone: return } @@ -750,11 +824,19 @@ func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { func TestSyncBoundaryAccountTrie(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeBoundaryAccountTrie(3000) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems return source @@ -763,11 +845,12 @@ func TestSyncBoundaryAccountTrie(t *testing.T) { mkSource("peer-a"), mkSource("peer-b"), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is @@ -775,12 +858,19 @@ func TestSyncBoundaryAccountTrie(t *testing.T) { func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, slow bool) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems @@ -796,11 +886,12 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { mkSource("nice-c", false), mkSource("capped", true), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver @@ -808,12 +899,19 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.codeRequestHandler = codeFn @@ -827,22 +925,30 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { mkSource("capped", cappedCodeRequestHandler), mkSource("corrupt", corruptCodeRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, accFn accountHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.accountRequestHandler = accFn @@ -856,11 +962,12 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { mkSource("capped", defaultAccountRequestHandler), mkSource("corrupt", corruptAccountRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes @@ -868,12 +975,19 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.codeRequestHandler = codeFn @@ -888,7 +1002,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { return cappedCodeRequestHandler(t, id, hashes, max) }), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } @@ -901,6 +1015,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { if threshold := 100; counter > threshold { t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter) } + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the @@ -908,11 +1023,19 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { func TestSyncBoundaryStorageTrie(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true) mkSource := func(name string) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -923,11 +1046,12 @@ func TestSyncBoundaryStorageTrie(t *testing.T) { mkSource("peer-a"), mkSource("peer-b"), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is @@ -935,12 +1059,19 @@ func TestSyncBoundaryStorageTrie(t *testing.T) { func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false) mkSource := func(name string, slow bool) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -956,11 +1087,12 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { mkSource("nice-a", false), mkSource("slow", true), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is @@ -968,12 +1100,19 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { func TestSyncWithStorageAndCorruptPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -988,22 +1127,30 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { mkSource("nice-c", defaultStorageRequestHandler), mkSource("corrupt", corruptStorageRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { t.Parallel() - cancel := make(chan struct{}) - + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { - source := newTestPeer(name, t, cancel) + source := newTestPeer(name, t, term) source.accountTrie = sourceAccountTrie source.accountValues = elems source.storageTries = storageTries @@ -1011,23 +1158,22 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { source.storageRequestHandler = handler return source } - syncer := setupSyncer( mkSource("nice-a", defaultStorageRequestHandler), mkSource("nice-b", defaultStorageRequestHandler), mkSource("nice-c", defaultStorageRequestHandler), mkSource("corrupt", noProofStorageRequestHandler), ) - done := checkStall(t, cancel) + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } close(done) + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } type kv struct { k, v []byte - t bool } // Some helpers for sorting @@ -1056,14 +1202,14 @@ var ( } ) -// getACodeHash returns a pseudo-random code hash -func getACodeHash(i uint64) []byte { +// getCodeHash returns a pseudo-random code hash +func getCodeHash(i uint64) []byte { h := codehashes[int(i)%len(codehashes)] return common.CopyBytes(h[:]) } -// convenience function to lookup the code from the code hash -func getCode(hash common.Hash) []byte { +// getCodeByHash convenience function to lookup the code from the code hash +func getCodeByHash(hash common.Hash) []byte { if hash == emptyCode { return nil } @@ -1085,10 +1231,10 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { Nonce: i, Balance: big.NewInt(int64(i)), Root: emptyRoot, - CodeHash: getACodeHash(i), + CodeHash: getCodeHash(i), }) key := key32(i) - elem := &kv{key, value, false} + elem := &kv{key, value} accTrie.Update(elem.k, elem.v) entries = append(entries, elem) } @@ -1130,9 +1276,9 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { Nonce: uint64(0), Balance: big.NewInt(int64(i)), Root: emptyRoot, - CodeHash: getACodeHash(uint64(i)), + CodeHash: getCodeHash(uint64(i)), }) - elem := &kv{boundaries[i].Bytes(), value, false} + elem := &kv{boundaries[i].Bytes(), value} trie.Update(elem.k, elem.v) entries = append(entries, elem) } @@ -1142,9 +1288,9 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { Nonce: i, Balance: big.NewInt(int64(i)), Root: emptyRoot, - CodeHash: getACodeHash(i), + CodeHash: getCodeHash(i), }) - elem := &kv{key32(i), value, false} + elem := &kv{key32(i), value} trie.Update(elem.k, elem.v) entries = append(entries, elem) } @@ -1179,7 +1325,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool, boundary bool) ( key := key32(i) codehash := emptyCode[:] if code { - codehash = getACodeHash(i) + codehash = getCodeHash(i) } value, _ := rlp.EncodeToBytes(state.Account{ Nonce: i, @@ -1187,7 +1333,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool, boundary bool) ( Root: stRoot, CodeHash: codehash, }) - elem := &kv{key, value, false} + elem := &kv{key, value} accTrie.Update(elem.k, elem.v) entries = append(entries, elem) // we reuse the same one for all accounts @@ -1213,7 +1359,7 @@ func makeStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { slotKey := key32(i) key := crypto.Keccak256Hash(slotKey[:]) - elem := &kv{key[:], rlpSlotValue, false} + elem := &kv{key[:], rlpSlotValue} trie.Update(elem.k, elem.v) entries = append(entries, elem) } @@ -1252,7 +1398,7 @@ func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) key := boundaries[i] val := []byte{0xde, 0xad, 0xbe, 0xef} - elem := &kv{key[:], val, false} + elem := &kv{key[:], val} trie.Update(elem.k, elem.v) entries = append(entries, elem) } @@ -1264,7 +1410,7 @@ func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) slotValue := key32(i) rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) - elem := &kv{key[:], rlpSlotValue, false} + elem := &kv{key[:], rlpSlotValue} trie.Update(elem.k, elem.v) entries = append(entries, elem) } From aeb0f802d14ecb5876d3b501fa757c88106f06c0 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 24 Mar 2021 11:25:04 +0100 Subject: [PATCH 06/10] core/state/snapshot: testcase for #22534 --- eth/protocols/snap/sync_test.go | 157 ++++++++++++++++++++++++++++++-- 1 file changed, 150 insertions(+), 7 deletions(-) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 18ad2bd6aebc..da94589ebb01 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -344,6 +344,65 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm return hashes, slots, proofs } +// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always +// supplies the proof for the last account, even if it is 'complete'.h +func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size uint64 + max = max * 3 / 4 + + var origin common.Hash + if len(bOrigin) > 0 { + origin = common.BytesToHash(bOrigin) + } + var exit bool + for i, account := range accounts { + var keys []common.Hash + var vals [][]byte + for _, entry := range t.storageValues[account] { + if bytes.Compare(entry.k, origin[:]) < 0 { + exit = true + } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + size += uint64(32 + len(entry.v)) + if size > max { + exit = true + } + } + if i == len(accounts)-1 { + exit = true + } + hashes = append(hashes, keys) + slots = append(slots, vals) + + if exit { + // If we're aborting, we need to prove the first and last item + // This terminates the response (and thus the loop) + proof := light.NewNodeSet() + stTrie := t.storageTries[account] + + // Here's a potential gotcha: when constructing the proof, we cannot + // use the 'origin' slice directly, but must use the full 32-byte + // hash form. + if err := stTrie.Prove(origin[:], 0, proof); err != nil { + t.logger.Error("Could not prove inexistence of origin", "origin", origin, + "error", err) + } + if len(keys) > 0 { + lastK := (keys[len(keys)-1])[:] + if err := stTrie.Prove(lastK, 0, proof); err != nil { + t.logger.Error("Could not prove last item", "error", err) + } + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + break + } + } + return hashes, slots, proofs +} + // emptyRequestAccountRangeFn is a rejects AccountRangeRequests func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { t.remote.OnAccounts(t, requestId, nil, nil, nil) @@ -372,6 +431,15 @@ func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root comm return nil } +func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max) + if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() + } + return nil +} + //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { // var bytecodes [][]byte // t.remote.OnByteCodes(t, id, bytecodes) @@ -1172,6 +1240,39 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) } +// TestSyncWithStorage tests basic sync using accounts + storage + code, against +// a peer who insists on delivering full storage sets _and_ proofs. This triggered +// an error, where the recipient erroneously clipped the boundary nodes, but +// did not mark the account for healing. +func TestSyncWithStorageMisbehavingProve(t *testing.T) { + t.Parallel() + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + source.storageRequestHandler = proofHappyStorageRequestHandler + return source + } + syncer := setupSyncer(mkSource("sourceA")) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) +} + type kv struct { k, v []byte } @@ -1299,8 +1400,48 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) { return trie, entries } +// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts +// has a unique storage set. +func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { + var ( + db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + accTrie, _ = trie.New(common.Hash{}, db) + entries entrySlice + storageTries = make(map[common.Hash]*trie.Trie) + storageEntries = make(map[common.Hash]entrySlice) + ) + // Create n accounts in the trie + for i := uint64(1); i <= uint64(accounts); i++ { + key := key32(i) + codehash := emptyCode[:] + if code { + codehash = getCodeHash(i) + } + // Create a storage trie + stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db) + stRoot := stTrie.Hash() + stTrie.Commit(nil) + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: i, + Balance: big.NewInt(int64(i)), + Root: stRoot, + CodeHash: codehash, + }) + elem := &kv{key, value} + accTrie.Update(elem.k, elem.v) + entries = append(entries, elem) + + storageTries[common.BytesToHash(key)] = stTrie + storageEntries[common.BytesToHash(key)] = stEntries + } + sort.Sort(entries) + + accTrie.Commit(nil) + return accTrie, entries, storageTries, storageEntries +} + // makeAccountTrieWithStorage spits out a trie, along with the leafs -func makeAccountTrieWithStorage(accounts, slots int, code bool, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { +func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { var ( db = trie.NewDatabase(rawdb.NewMemoryDatabase()) accTrie, _ = trie.New(common.Hash{}, db) @@ -1316,7 +1457,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool, boundary bool) ( if boundary { stTrie, stEntries = makeBoundaryStorageTrie(slots, db) } else { - stTrie, stEntries = makeStorageTrie(slots, db) + stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db) } stRoot := stTrie.Hash() @@ -1346,14 +1487,15 @@ func makeAccountTrieWithStorage(accounts, slots int, code bool, boundary bool) ( return accTrie, entries, storageTries, storageEntries } -// makeStorageTrie fills a storage trie with n items, returning the -// not-yet-committed trie and the sorted entries -func makeStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { +// makeStorageTrieWithSeed fills a storage trie with n items, returning the +// not-yet-committed trie and the sorted entries. The seeds can be used to ensure +// that tries are unique. +func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) { trie, _ := trie.New(common.Hash{}, db) var entries entrySlice for i := uint64(1); i <= uint64(n); i++ { - // store 'i' at slot 'i' - slotValue := key32(i) + // store 'x' at slot 'x' + slotValue := key32(i + seed) rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) slotKey := key32(i) @@ -1420,6 +1562,7 @@ func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) } func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { + t.Helper() triedb := trie.NewDatabase(db) accTrie, err := trie.New(root, triedb) if err != nil { From 2f13d476fcb64b8bb2c5d0a7a95bd5c71d9c95ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 24 Mar 2021 12:37:53 +0200 Subject: [PATCH 07/10] eth/protocols/snap: fix boundary loss on full-but-proven range --- eth/protocols/snap/sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 122ef167750d..2efb4a932794 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -1800,7 +1800,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { it := res.nodes[i].NewIterator(nil, nil) for it.Next() { // Boundary nodes are not written for the last result, since they are incomplete - if i == len(res.hashes)-1 { + if i == len(res.hashes)-1 && res.subTask != nil { if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok { skipped++ continue From b1ed10e5af22cbd41925c379438e15e8de877bea Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 24 Mar 2021 11:56:18 +0100 Subject: [PATCH 08/10] core/state/snapshot: lintfix --- eth/protocols/snap/sync_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index da94589ebb01..5d491d1f21c1 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -1493,7 +1493,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) { trie, _ := trie.New(common.Hash{}, db) var entries entrySlice - for i := uint64(1); i <= uint64(n); i++ { + for i := uint64(1); i <= n; i++ { // store 'x' at slot 'x' slotValue := key32(i + seed) rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) From 41795c07e1a51fa7fb17bc3ec78a75660583c0db Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 24 Mar 2021 20:52:55 +0800 Subject: [PATCH 09/10] eth: address comment --- eth/protocols/snap/sync.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 2efb4a932794..57619ed1e387 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -1555,11 +1555,12 @@ func (s *Syncer) processAccountResponse(res *accountResponse) { for i, hash := range res.hashes { // Mark the range complete if the last is already included. // Keep iteration to delete the extra states if exists. - if hash.Big().Cmp(last) == 0 { + cmp := hash.Big().Cmp(last) + if cmp == 0 { res.cont = false continue } - if hash.Big().Cmp(last) > 0 { + if cmp > 0 { // Chunk overflown, cut off excess, but also update the boundary nodes for j := i; j < len(res.hashes); j++ { if err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil { @@ -1769,11 +1770,12 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { for k, hash := range res.hashes[i] { // Mark the range complete if the last is already included. // Keep iteration to delete the extra states if exists. - if hash.Big().Cmp(last) == 0 { + cmp := hash.Big().Cmp(last) + if cmp == 0 { res.cont = false continue } - if hash.Big().Cmp(last) > 0 { + if cmp > 0 { // Chunk overflown, cut off excess, but also update the boundary for l := k; l < len(res.hashes[i]); l++ { if err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil { From 2511b25c53c95e50392531d651ec414b1414924d Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 24 Mar 2021 21:02:23 +0800 Subject: [PATCH 10/10] eth: fix handler --- eth/protocols/snap/handler.go | 9 +++++++-- eth/protocols/snap/sync_test.go | 13 +++++++++---- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index b9515b8a398a..37e84839aba6 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -256,8 +256,13 @@ func handleMessage(backend Backend, peer *Peer) error { var ( storage []*StorageData last common.Hash + abort bool ) - for it.Next() && size < hardLimit { + for it.Next() { + if size >= hardLimit { + abort = true + break + } hash, slot := it.Hash(), common.CopyBytes(it.Slot()) // Track the returned interval for the Merkle proofs @@ -280,7 +285,7 @@ func handleMessage(backend Backend, peer *Peer) error { // Generate the Merkle proofs for the first and last storage slot, but // only if the response was capped. If the entire storage trie included // in the response, no need for any proofs. - if origin != (common.Hash{}) || size >= hardLimit { + if origin != (common.Hash{}) || abort { // Request started at a non-zero hash or was capped prematurely, add // the endpoint Merkle proofs accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 5d491d1f21c1..c54d36c40282 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -297,17 +297,22 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm limitHash = common.BytesToHash(limit) } var ( - keys []common.Hash - vals [][]byte + keys []common.Hash + vals [][]byte + abort bool ) for _, entry := range t.storageValues[account] { + if size >= max { + abort = true + break + } if bytes.Compare(entry.k, originHash[:]) < 0 { continue } keys = append(keys, common.BytesToHash(entry.k)) vals = append(vals, entry.v) size += uint64(32 + len(entry.v)) - if bytes.Compare(entry.k, limitHash[:]) >= 0 || size >= max { + if bytes.Compare(entry.k, limitHash[:]) >= 0 { break } } @@ -317,7 +322,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm // Generate the Merkle proofs for the first and last storage slot, but // only if the response was capped. If the entire storage trie included // in the response, no need for any proofs. - if originHash != (common.Hash{}) || size >= max { + if originHash != (common.Hash{}) || abort { // If we're aborting, we need to prove the first and last item // This terminates the response (and thus the loop) proof := light.NewNodeSet()