Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
qdm12 committed Jan 5, 2023
1 parent 5e161fa commit 1611647
Show file tree
Hide file tree
Showing 5 changed files with 297 additions and 176 deletions.
21 changes: 12 additions & 9 deletions internal/pruner/full.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,17 +65,23 @@ type journalRecord struct {
func NewFullNode(journalDB JournalDatabase, storageDB ChainDBNewBatcher, retainBlocks uint32,
blockState BlockState, logger Logger) (pruner *FullNode, err error) {
highestBlockNumber, err := getBlockNumberFromKey(journalDB, []byte(highestBlockNumberKey))
if err != nil {
if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) {
return nil, fmt.Errorf("getting highest block number: %w", err)
}
logger.Debugf("highest block number stored in journal: %d", highestBlockNumber)

var nextBlockNumberToPrune uint32
lastPrunedBlockNumber, err := getBlockNumberFromKey(journalDB, []byte(lastPrunedKey))
if err != nil {
if errors.Is(err, chaindb.ErrKeyNotFound) {
nextBlockNumberToPrune = 0
} else if err != nil {
return nil, fmt.Errorf("getting last pruned block number: %w", err)
} else {
// if the error is database.ErrKeyNotFound it means we have not pruned
// any block number yet, so leave the next block number to prune as 0.
nextBlockNumberToPrune = lastPrunedBlockNumber + 1
}
logger.Debugf("last pruned block number: %d", lastPrunedBlockNumber)
nextBlockNumberToPrune := lastPrunedBlockNumber + 1
logger.Debugf("next block number to prune: %d", nextBlockNumberToPrune)

pruner = &FullNode{
storageDatabase: storageDB,
Expand Down Expand Up @@ -181,7 +187,7 @@ func (p *FullNode) StoreJournalRecord(deletedNodeHashes, insertedNodeHashes map[
return fmt.Errorf("flushing journal database batch: %w", err)
}

p.logger.Debugf("journal record stored for block number %d", blockNumber)
p.logger.Debugf("journal record stored for block number %d and block hash %s", blockNumber, blockHash.Short())
return nil
}

Expand All @@ -204,7 +210,7 @@ func (p *FullNode) handleInsertedKey(insertedNodeHash common.Hash, blockNumber u
// since we no longer want to prune it, as it was re-inserted.
deletedNodeHashKey := makeDeletedKey(insertedNodeHash)
journalKeyDeletedAt, err := p.journalDatabase.Get(deletedNodeHashKey)
nodeHashDeletedInAnotherBlock := errors.Is(err, chaindb.ErrKeyNotFound)
nodeHashDeletedInAnotherBlock := !errors.Is(err, chaindb.ErrKeyNotFound)
if !nodeHashDeletedInAnotherBlock {
return nil
} else if err != nil {
Expand Down Expand Up @@ -438,9 +444,6 @@ func storeBlockNumberAtKey(batch Putter, key []byte, blockNumber uint32) error {
func getBlockNumberFromKey(database Getter, key []byte) (blockNumber uint32, err error) {
encodedBlockNumber, err := database.Get(key)
if err != nil {
if errors.Is(err, chaindb.ErrKeyNotFound) {
return 0, nil
}
return 0, fmt.Errorf("getting block number from database: %w", err)
}

Expand Down
137 changes: 117 additions & 20 deletions internal/pruner/full_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,101 @@ func scaleMarshal(t *testing.T, x any) (b []byte) {
return b
}

func scaleEncodeJournalKey(blockNumber uint32, blockHash common.Hash) (b []byte) {
key := journalKey{
BlockNumber: blockNumber,
BlockHash: blockHash,
}
return scale.MustMarshal(key)
}

func Test_FullNode(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)

database, err := chaindb.NewBadgerDB(&chaindb.Config{InMemory: true})
require.NoError(t, err)
journalDB := chaindb.NewTable(database, "journal")
storageDB := chaindb.NewTable(database, "storage")
retainBlocks := uint32(2)
blockState := NewMockBlockState(ctrl)
logger := NewMockLogger(ctrl)
var keyValuePairs keyValuePairs

logger.EXPECT().Debugf("highest block number stored in journal: %d", uint32(0))
logger.EXPECT().Debugf("next block number to prune: %d", uint32(0))
pruner, err := NewFullNode(journalDB, storageDB, retainBlocks,
blockState, logger)
require.NoError(t, err)

assertMemoryDatabase(t, database, keyValuePairs)

// Block 0 hash 100
setNodeHashInStorageDB(t, storageDB, []common.Hash{{1}, {2}})
logger.EXPECT().Debugf("journal record stored for block number %d and block hash %s",
uint32(0), "0x64000000...00000000")
err = pruner.StoreJournalRecord(
map[common.Hash]struct{}{}, // first block has no deleted node hashes
map[common.Hash]struct{}{{1}: {}, {2}: {}}, // inserted node hashes
common.Hash{100}, // block hash
0, // block number
)
require.NoError(t, err)

// Block 1 hash 101
setNodeHashInStorageDB(t, storageDB, []common.Hash{{3}, {4}})
logger.EXPECT().Debugf("journal record stored for block number %d and block hash %s",
uint32(1), "0x65000000...00000000")
err = pruner.StoreJournalRecord(
map[common.Hash]struct{}{{1}: {}}, // deleted node hashes
map[common.Hash]struct{}{{3}: {}, {4}: {}}, // inserted node hashes
common.Hash{101}, // block hash
1, // block number
)
require.NoError(t, err)

// Block 1 hash 102
setNodeHashInStorageDB(t, storageDB, []common.Hash{{5}, {6}})
logger.EXPECT().Debugf("journal record stored for block number %d and block hash %s",
uint32(1), "0x66000000...00000000")
err = pruner.StoreJournalRecord(
map[common.Hash]struct{}{{3}: {}}, // deleted node hashes
map[common.Hash]struct{}{{5}: {}, {6}: {}}, // inserted node hashes
common.Hash{102}, // block hash
1, // block number
)
require.NoError(t, err)

// Block 2 hash 103
setNodeHashInStorageDB(t, storageDB, []common.Hash{{7}, {8}})
logger.EXPECT().Debugf("journal record stored for block number %d and block hash %s",
uint32(2), "0x67000000...00000000")
err = pruner.StoreJournalRecord(
map[common.Hash]struct{}{{5}: {}}, // deleted node hashes
map[common.Hash]struct{}{{7}: {}, {8}: {}}, // inserted node hashes
common.Hash{103}, // block hash
2, // block number
)
require.NoError(t, err)

logger.EXPECT().Debugf("")

// Block 3 hash 104
setNodeHashInStorageDB(t, storageDB, []common.Hash{{9}, {10}})
logger.EXPECT().Debugf("journal record stored for block number %d and block hash %s",
uint32(3), "0x68000000...00000000")
err = pruner.StoreJournalRecord(
map[common.Hash]struct{}{{7}: {}}, // deleted node hashes
map[common.Hash]struct{}{{9}: {}, {10}: {}}, // inserted node hashes
common.Hash{104}, // block hash
3, // block number
)
require.NoError(t, err)

// keyValuePairs.add(scaleEncodeJournalKey(0, common.Hash{100}), []byte{1, 2})
// assertMemoryDatabase(t, database, keyValuePairs)
}

func Test_FullNode_pruneAll(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -336,12 +431,12 @@ func Test_prune(t *testing.T) {
return database
},
journalBatchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
batch.EXPECT().Del([]byte("block_number_to_hash_1")).Return(errTest)
return batch
},
storageBatchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil)
return batch
},
Expand All @@ -366,15 +461,15 @@ func Test_prune(t *testing.T) {
return database
},
journalBatchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
batch.EXPECT().Del([]byte("block_number_to_hash_1")).Return(nil)
key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}
encodedKey := scaleMarshal(t, key)
batch.EXPECT().Del(encodedKey).Return(nil)
return batch
},
storageBatchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil)
return batch
},
Expand Down Expand Up @@ -442,7 +537,7 @@ func Test_pruneStorage(t *testing.T) {
return database
},
batchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(errTest)
return batch
},
Expand Down Expand Up @@ -470,7 +565,7 @@ func Test_pruneStorage(t *testing.T) {
return database
},
batchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
batch.EXPECT().Del(common.Hash{11}.ToBytes()).Return(nil)
batch.EXPECT().Del(common.Hash{12}.ToBytes()).Return(nil)
batch.EXPECT().Del(common.Hash{13}.ToBytes()).Return(nil)
Expand Down Expand Up @@ -512,7 +607,7 @@ func Test_pruneJournal(t *testing.T) {
}{
"prune block hashes error": {
batchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
batch.EXPECT().Del([]byte("block_number_to_hash_10")).Return(errTest)
return batch
},
Expand All @@ -524,7 +619,7 @@ func Test_pruneJournal(t *testing.T) {
},
"delete journal key error": {
batchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
batch.EXPECT().Del([]byte("block_number_to_hash_10")).Return(nil)
encodedKey := scaleMarshal(t, journalKey{BlockNumber: 10, BlockHash: common.Hash{1}})
batch.EXPECT().Del(encodedKey).Return(errTest)
Expand All @@ -538,7 +633,7 @@ func Test_pruneJournal(t *testing.T) {
},
"success": {
batchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
batch.EXPECT().Del([]byte("block_number_to_hash_10")).Return(nil)
encodedKeyA := scaleMarshal(t, journalKey{BlockNumber: 10, BlockHash: common.Hash{1}})
batch.EXPECT().Del(encodedKeyA).Return(nil)
Expand Down Expand Up @@ -583,7 +678,7 @@ func Test_storeJournalRecord(t *testing.T) {
}{
"deleted node hash put error": {
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutter(ctrl)
database := NewMockPutDeleter(ctrl)
databaseKey := makeDeletedKey(common.Hash{3})
encodedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}})
database.EXPECT().Put(databaseKey, encodedKey).Return(errTest)
Expand All @@ -597,7 +692,7 @@ func Test_storeJournalRecord(t *testing.T) {
},
"encoded record put error": {
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutter(ctrl)
database := NewMockPutDeleter(ctrl)
databaseKey := makeDeletedKey(common.Hash{3})
encodedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}})
database.EXPECT().Put(databaseKey, encodedKey).Return(nil)
Expand All @@ -619,7 +714,7 @@ func Test_storeJournalRecord(t *testing.T) {
},
"success": {
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutter(ctrl)
database := NewMockPutDeleter(ctrl)
databaseKey := makeDeletedKey(common.Hash{3})
encodedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}})
database.EXPECT().Put(databaseKey, encodedKey).Return(nil)
Expand Down Expand Up @@ -747,7 +842,7 @@ func Test_storeBlockNumberAtKey(t *testing.T) {
}{
"put error": {
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutter(ctrl)
database := NewMockPutDeleter(ctrl)
expectedKey := []byte("key")
expectedValue := scaleMarshal(t, uint32(1))
database.EXPECT().Put(expectedKey, expectedValue).Return(errTest)
Expand All @@ -760,7 +855,7 @@ func Test_storeBlockNumberAtKey(t *testing.T) {
},
"success": {
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutter(ctrl)
database := NewMockPutDeleter(ctrl)
expectedKey := []byte("key")
expectedValue := scaleMarshal(t, uint32(1))
database.EXPECT().Put(expectedKey, expectedValue).Return(nil)
Expand Down Expand Up @@ -818,7 +913,9 @@ func Test_getBlockNumberFromKey(t *testing.T) {
database.EXPECT().Get(expectedKey).Return(nil, chaindb.ErrKeyNotFound)
return database
},
key: []byte("key"),
key: []byte("key"),
errWrapped: chaindb.ErrKeyNotFound,
errMessage: "getting block number from database: Key not found",
},
"decoding error": {
databaseBuilder: func(ctrl *gomock.Controller) Getter {
Expand Down Expand Up @@ -966,7 +1063,7 @@ func Test_appendBlockHashes(t *testing.T) {
return database
},
batchBuilder: func(ctrl *gomock.Controller) Putter {
batch := NewMockPutter(ctrl)
batch := NewMockPutDeleter(ctrl)
databaseKey := []byte("block_number_to_hash_10")
databaseValue := common.Hash{2}.ToBytes()
batch.EXPECT().Put(databaseKey, databaseValue).Return(nil)
Expand All @@ -983,7 +1080,7 @@ func Test_appendBlockHashes(t *testing.T) {
return database
},
batchBuilder: func(ctrl *gomock.Controller) Putter {
batch := NewMockPutter(ctrl)
batch := NewMockPutDeleter(ctrl)
databaseKey := []byte("block_number_to_hash_10")
databaseValue := common.Hash{2}.ToBytes()
batch.EXPECT().Put(databaseKey, databaseValue).Return(errTest)
Expand All @@ -1005,7 +1102,7 @@ func Test_appendBlockHashes(t *testing.T) {
return database
},
batchBuilder: func(ctrl *gomock.Controller) Putter {
batch := NewMockPutter(ctrl)
batch := NewMockPutDeleter(ctrl)
databaseKey := []byte("block_number_to_hash_10")
databaseValue := bytes.Join([][]byte{
common.Hash{1}.ToBytes(), common.Hash{3}.ToBytes(), common.Hash{2}.ToBytes(),
Expand Down Expand Up @@ -1048,7 +1145,7 @@ func Test_pruneBlockHashes(t *testing.T) {
"delete from batch error": {
blockNumber: 10,
batchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
databaseKey := []byte("block_number_to_hash_10")
batch.EXPECT().Del(databaseKey).Return(errTest)
return batch
Expand All @@ -1059,7 +1156,7 @@ func Test_pruneBlockHashes(t *testing.T) {
"success": {
blockNumber: 10,
batchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch := NewMockPutDeleter(ctrl)
databaseKey := []byte("block_number_to_hash_10")
batch.EXPECT().Del(databaseKey).Return(nil)
return batch
Expand Down
Loading

0 comments on commit 1611647

Please sign in to comment.