Skip to content

Commit

Permalink
use maxSize to control the collected leaves total size
Browse files Browse the repository at this point in the history
  • Loading branch information
BeniaminDrasovean committed Jan 15, 2025
1 parent 65d9e3d commit 65d088e
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 99 deletions.
16 changes: 0 additions & 16 deletions testscommon/state/testTrie.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,19 +53,3 @@ func AddDataToTrie(tr common.Trie, numLeaves int) {
}
_ = tr.Commit()
}

// GetTrieWithData returns a trie with some data.
// The added data builds a rootNode that is a branch with 2 leaves and 1 extension node which will have 4 leaves when traversed;
// this way the size of the iterator will be highest when the extension node is reached but 2 leaves will
// have already been retrieved
func GetTrieWithData() common.Trie {
tr := GetNewTrie()
_ = tr.Update([]byte("key1"), []byte("value1"))
_ = tr.Update([]byte("key2"), []byte("value2"))
_ = tr.Update([]byte("key13"), []byte("value3"))
_ = tr.Update([]byte("key23"), []byte("value4"))
_ = tr.Update([]byte("key33"), []byte("value4"))
_ = tr.Update([]byte("key43"), []byte("value4"))
_ = tr.Commit()
return tr
}
26 changes: 7 additions & 19 deletions trie/leavesRetriever/dfsTrieIterator/dfsTrieIterator.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ type dfsIterator struct {
db common.TrieStorageInteractor
marshaller marshal.Marshalizer
hasher hashing.Hasher
size uint64
}

// NewIterator creates a new DFS iterator for the trie.
Expand All @@ -41,17 +40,11 @@ func NewIterator(initialState [][]byte, db common.TrieStorageInteractor, marshal
return nil, err
}

size := uint64(0)
for _, node := range nextNodes {
size += node.Size()
}

return &dfsIterator{
nextNodes: nextNodes,
db: db,
marshaller: marshaller,
hasher: hasher,
size: size,
}, nil
}

Expand Down Expand Up @@ -92,13 +85,14 @@ func getIteratorStateFromNextNodes(nextNodes []common.TrieNodeData) [][]byte {
// GetLeaves retrieves leaves from the trie. It stops either when the number of leaves is reached or the context is done.
func (it *dfsIterator) GetLeaves(numLeaves int, maxSize uint64, ctx context.Context) (map[string]string, error) {
retrievedLeaves := make(map[string]string)
leavesSize := uint64(0)
for {
nextNodes := make([]common.TrieNodeData, 0)
if it.size >= maxSize {
if leavesSize >= maxSize {
return retrievedLeaves, nil
}

if len(retrievedLeaves) >= numLeaves {
if len(retrievedLeaves) >= numLeaves && numLeaves != 0 {
return retrievedLeaves, nil
}

Expand All @@ -117,24 +111,23 @@ func (it *dfsIterator) GetLeaves(numLeaves int, maxSize uint64, ctx context.Cont
return nil, err
}

childrenSize := uint64(0)
for _, childNode := range childrenNodes {
if childNode.IsLeaf() {
key, err := childNode.GetKeyBuilder().GetKey()
if err != nil {
return nil, err
}

retrievedLeaves[hex.EncodeToString(key)] = hex.EncodeToString(childNode.GetData())
hexKey := hex.EncodeToString(key)
hexData := hex.EncodeToString(childNode.GetData())
retrievedLeaves[hexKey] = hexData
leavesSize += uint64(len(hexKey) + len(hexData))
continue
}

nextNodes = append(nextNodes, childNode)
childrenSize += childNode.Size()
}

it.size += childrenSize
it.size -= it.nextNodes[0].Size()
it.nextNodes = append(nextNodes, it.nextNodes[1:]...)
}
}
Expand All @@ -153,11 +146,6 @@ func (it *dfsIterator) FinishedIteration() bool {
return len(it.nextNodes) == 0
}

// Size returns the size of the iterator.
func (it *dfsIterator) Size() uint64 {
return it.size
}

// IsInterfaceNil returns true if there is no value under the interface
func (it *dfsIterator) IsInterfaceNil() bool {
return it == nil
Expand Down
68 changes: 23 additions & 45 deletions trie/leavesRetriever/dfsTrieIterator/dfsTrieIterator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ func TestNewIterator(t *testing.T) {
iterator, err := NewIterator(initialState, db, marshaller, hasher)
assert.Nil(t, err)

assert.Equal(t, uint64(80), iterator.size)
assert.Equal(t, 2, len(iterator.nextNodes))
})
}
Expand Down Expand Up @@ -139,20 +138,38 @@ func TestDfsIterator_GetLeaves(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, expectedNumRetrievedLeaves, len(trieData))
})
t.Run("num leaves 0 iterates until maxSize reached", func(t *testing.T) {
t.Parallel()

tr := trieTest.GetNewTrie()
numLeaves := 25
trieTest.AddDataToTrie(tr, numLeaves)
rootHash, _ := tr.RootHash()

_, marshaller, hasher := trieTest.GetDefaultTrieParameters()
iterator, _ := NewIterator([][]byte{rootHash}, tr.GetStorageManager(), marshaller, hasher)

trieData, err := iterator.GetLeaves(0, 200, context.Background())
assert.Nil(t, err)
assert.Equal(t, 8, len(trieData))
assert.Equal(t, 8, len(iterator.nextNodes))
})
t.Run("max size reached returns retrieved leaves and saves iterator context", func(t *testing.T) {
t.Parallel()

tr := trieTest.GetTrieWithData()
expectedNumRetrievedLeaves := 2
tr := trieTest.GetNewTrie()
numLeaves := 25
trieTest.AddDataToTrie(tr, numLeaves)
rootHash, _ := tr.RootHash()

_, marshaller, hasher := trieTest.GetDefaultTrieParameters()
iterator, _ := NewIterator([][]byte{rootHash}, tr.GetStorageManager(), marshaller, hasher)

iteratorMaxSize := uint64(100)
trieData, err := iterator.GetLeaves(5, iteratorMaxSize, context.Background())
iteratorMaxSize := uint64(200)
trieData, err := iterator.GetLeaves(numLeaves, iteratorMaxSize, context.Background())
assert.Nil(t, err)
assert.Equal(t, expectedNumRetrievedLeaves, len(trieData))
assert.Equal(t, 8, len(trieData))
assert.Equal(t, 8, len(iterator.nextNodes))
})
t.Run("retrieve all leaves in multiple calls", func(t *testing.T) {
t.Parallel()
Expand Down Expand Up @@ -249,42 +266,3 @@ func TestDfsIterator_FinishedIteration(t *testing.T) {
assert.Equal(t, numLeaves, numRetrievedLeaves)
assert.True(t, iterator.FinishedIteration())
}

func TestDfsIterator_Size(t *testing.T) {
t.Parallel()

tr := trieTest.GetNewTrie()
numLeaves := 25
trieTest.AddDataToTrie(tr, numLeaves)
rootHash, _ := tr.RootHash()
_, marshaller, hasher := trieTest.GetDefaultTrieParameters()

iterator, _ := NewIterator([][]byte{rootHash}, tr.GetStorageManager(), marshaller, hasher)
assert.Equal(t, uint64(32), iterator.Size()) // root hash
assert.False(t, iterator.FinishedIteration())

_, err := iterator.GetLeaves(5, maxSize, context.Background())
assert.Nil(t, err)
assert.Equal(t, uint64(299), iterator.Size()) // 9 hashes + leaf key(3) + 8 x intermediary nodes key(8 * 1)
assert.False(t, iterator.FinishedIteration())

_, err = iterator.GetLeaves(5, maxSize, context.Background())
assert.Nil(t, err)
assert.Equal(t, uint64(268), iterator.Size()) // 8 hashes + 2 x leaf keys(2 * 3) + 6 x intermediary nodes key(6*1)
assert.False(t, iterator.FinishedIteration())

_, err = iterator.GetLeaves(5, maxSize, context.Background())
assert.Nil(t, err)
assert.Equal(t, uint64(165), iterator.Size()) // 5 hashes + 5 x intermediary nodes key(5*1)
assert.False(t, iterator.FinishedIteration())

_, err = iterator.GetLeaves(5, maxSize, context.Background())
assert.Nil(t, err)
assert.Equal(t, uint64(101), iterator.Size()) // 3 hashes + leaf key(3) + 2 x intermediary nodes key(2*1)
assert.False(t, iterator.FinishedIteration())

_, err = iterator.GetLeaves(5, maxSize, context.Background())
assert.Nil(t, err)
assert.Equal(t, uint64(0), iterator.Size())
assert.True(t, iterator.FinishedIteration())
}
16 changes: 0 additions & 16 deletions trie/leavesRetriever/export_test.go

This file was deleted.

3 changes: 0 additions & 3 deletions trie/leavesRetriever/leavesRetriever.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,8 @@ import (
"github.com/multiversx/mx-chain-core-go/marshal"
"github.com/multiversx/mx-chain-go/common"
"github.com/multiversx/mx-chain-go/trie/leavesRetriever/dfsTrieIterator"
logger "github.com/multiversx/mx-chain-logger-go"
)

var log = logger.GetOrCreate("trie/leavesRetriever")

type leavesRetriever struct {
db common.TrieStorageInteractor
marshaller marshal.Marshalizer
Expand Down

0 comments on commit 65d088e

Please sign in to comment.