diff --git a/api/tbcapi/tbcapi.go b/api/tbcapi/tbcapi.go index 995942f9e..c8a7cf3e5 100644 --- a/api/tbcapi/tbcapi.go +++ b/api/tbcapi/tbcapi.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -15,6 +15,7 @@ import ( "github.com/hemilabs/heminetwork/api" "github.com/hemilabs/heminetwork/api/protocol" + "github.com/hemilabs/heminetwork/hemi" ) // XXX we should kill the wrapping types that are basically identical to wire. @@ -76,6 +77,9 @@ const ( CmdBlockDownloadAsyncRawRequest = "tbcapi-block-download-async-raw-request" CmdBlockDownloadAsyncRawResponse = "tbcapi-block-download-async-raw-response" + + CmdBlockKeystoneByL2KeystoneAbrevHashRequest = "tbcapi-l2-keystone-abrev-by-abrev-hash-request" + CmdBlockKeystoneByL2KeystoneAbrevHashResponse = "tbcapi-l2-keystone-abrev-by-abrev-hash-response" ) var ( @@ -284,6 +288,16 @@ type BlockInsertRawRequest struct { Block api.ByteSlice `json:"block"` } +type BlockKeystoneByL2KeystoneAbrevHashRequest struct { + L2KeystoneAbrevHash *chainhash.Hash `json:"l2_keystones_abrev_hash"` +} + +type BlockKeystoneByL2KeystoneAbrevHashResponse struct { + L2KeystoneAbrev *hemi.L2KeystoneAbrev `json:"l2_keystone_abrev"` + BtcBlockHash *chainhash.Hash `json:"btc_block_hash"` + Error *protocol.Error `json:"error,omitempty"` +} + type BlockInsertRawResponse struct { BlockHash *chainhash.Hash `json:"block_hash"` Error *protocol.Error `json:"error,omitempty"` @@ -315,42 +329,44 @@ type BlockDownloadAsyncRawResponse struct { } var commands = map[protocol.Command]reflect.Type{ - CmdPingRequest: reflect.TypeOf(PingRequest{}), - CmdPingResponse: reflect.TypeOf(PingResponse{}), - CmdBlockByHashRequest: reflect.TypeOf(BlockByHashRequest{}), - CmdBlockByHashResponse: reflect.TypeOf(BlockByHashResponse{}), - CmdBlockByHashRawRequest: reflect.TypeOf(BlockByHashRawRequest{}), - CmdBlockByHashRawResponse: reflect.TypeOf(BlockByHashRawResponse{}), - CmdBlockHeadersByHeightRawRequest: reflect.TypeOf(BlockHeadersByHeightRawRequest{}), - CmdBlockHeadersByHeightRawResponse: reflect.TypeOf(BlockHeadersByHeightRawResponse{}), - CmdBlockHeadersByHeightRequest: reflect.TypeOf(BlockHeadersByHeightRequest{}), - CmdBlockHeadersByHeightResponse: reflect.TypeOf(BlockHeadersByHeightResponse{}), - CmdBlockHeaderBestRawRequest: reflect.TypeOf(BlockHeaderBestRawRequest{}), - CmdBlockHeaderBestRawResponse: reflect.TypeOf(BlockHeaderBestRawResponse{}), - CmdBlockHeaderBestRequest: reflect.TypeOf(BlockHeaderBestRequest{}), - CmdBlockHeaderBestResponse: reflect.TypeOf(BlockHeaderBestResponse{}), - CmdBalanceByAddressRequest: reflect.TypeOf(BalanceByAddressRequest{}), - CmdBalanceByAddressResponse: reflect.TypeOf(BalanceByAddressResponse{}), - CmdUTXOsByAddressRawRequest: reflect.TypeOf(UTXOsByAddressRawRequest{}), - CmdUTXOsByAddressRawResponse: reflect.TypeOf(UTXOsByAddressRawResponse{}), - CmdUTXOsByAddressRequest: reflect.TypeOf(UTXOsByAddressRequest{}), - CmdUTXOsByAddressResponse: reflect.TypeOf(UTXOsByAddressResponse{}), - CmdTxByIdRawRequest: reflect.TypeOf(TxByIdRawRequest{}), - CmdTxByIdRawResponse: reflect.TypeOf(TxByIdRawResponse{}), - CmdTxByIdRequest: reflect.TypeOf(TxByIdRequest{}), - CmdTxByIdResponse: reflect.TypeOf(TxByIdResponse{}), - CmdTxBroadcastRequest: reflect.TypeOf(TxBroadcastRequest{}), - CmdTxBroadcastResponse: reflect.TypeOf(TxBroadcastResponse{}), - CmdTxBroadcastRawRequest: reflect.TypeOf(TxBroadcastRawRequest{}), - CmdTxBroadcastRawResponse: reflect.TypeOf(TxBroadcastRawResponse{}), - CmdBlockInsertRequest: reflect.TypeOf(BlockInsertRequest{}), - CmdBlockInsertResponse: reflect.TypeOf(BlockInsertResponse{}), - CmdBlockInsertRawRequest: reflect.TypeOf(BlockInsertRawRequest{}), - CmdBlockInsertRawResponse: reflect.TypeOf(BlockInsertRawResponse{}), - CmdBlockDownloadAsyncRequest: reflect.TypeOf(BlockDownloadAsyncRequest{}), - CmdBlockDownloadAsyncResponse: reflect.TypeOf(BlockDownloadAsyncResponse{}), - CmdBlockDownloadAsyncRawRequest: reflect.TypeOf(BlockDownloadAsyncRawRequest{}), - CmdBlockDownloadAsyncRawResponse: reflect.TypeOf(BlockDownloadAsyncRawResponse{}), + CmdPingRequest: reflect.TypeOf(PingRequest{}), + CmdPingResponse: reflect.TypeOf(PingResponse{}), + CmdBlockByHashRequest: reflect.TypeOf(BlockByHashRequest{}), + CmdBlockByHashResponse: reflect.TypeOf(BlockByHashResponse{}), + CmdBlockByHashRawRequest: reflect.TypeOf(BlockByHashRawRequest{}), + CmdBlockByHashRawResponse: reflect.TypeOf(BlockByHashRawResponse{}), + CmdBlockHeadersByHeightRawRequest: reflect.TypeOf(BlockHeadersByHeightRawRequest{}), + CmdBlockHeadersByHeightRawResponse: reflect.TypeOf(BlockHeadersByHeightRawResponse{}), + CmdBlockHeadersByHeightRequest: reflect.TypeOf(BlockHeadersByHeightRequest{}), + CmdBlockHeadersByHeightResponse: reflect.TypeOf(BlockHeadersByHeightResponse{}), + CmdBlockHeaderBestRawRequest: reflect.TypeOf(BlockHeaderBestRawRequest{}), + CmdBlockHeaderBestRawResponse: reflect.TypeOf(BlockHeaderBestRawResponse{}), + CmdBlockHeaderBestRequest: reflect.TypeOf(BlockHeaderBestRequest{}), + CmdBlockHeaderBestResponse: reflect.TypeOf(BlockHeaderBestResponse{}), + CmdBalanceByAddressRequest: reflect.TypeOf(BalanceByAddressRequest{}), + CmdBalanceByAddressResponse: reflect.TypeOf(BalanceByAddressResponse{}), + CmdUTXOsByAddressRawRequest: reflect.TypeOf(UTXOsByAddressRawRequest{}), + CmdUTXOsByAddressRawResponse: reflect.TypeOf(UTXOsByAddressRawResponse{}), + CmdUTXOsByAddressRequest: reflect.TypeOf(UTXOsByAddressRequest{}), + CmdUTXOsByAddressResponse: reflect.TypeOf(UTXOsByAddressResponse{}), + CmdTxByIdRawRequest: reflect.TypeOf(TxByIdRawRequest{}), + CmdTxByIdRawResponse: reflect.TypeOf(TxByIdRawResponse{}), + CmdTxByIdRequest: reflect.TypeOf(TxByIdRequest{}), + CmdTxByIdResponse: reflect.TypeOf(TxByIdResponse{}), + CmdTxBroadcastRequest: reflect.TypeOf(TxBroadcastRequest{}), + CmdTxBroadcastResponse: reflect.TypeOf(TxBroadcastResponse{}), + CmdTxBroadcastRawRequest: reflect.TypeOf(TxBroadcastRawRequest{}), + CmdTxBroadcastRawResponse: reflect.TypeOf(TxBroadcastRawResponse{}), + CmdBlockInsertRequest: reflect.TypeOf(BlockInsertRequest{}), + CmdBlockInsertResponse: reflect.TypeOf(BlockInsertResponse{}), + CmdBlockInsertRawRequest: reflect.TypeOf(BlockInsertRawRequest{}), + CmdBlockInsertRawResponse: reflect.TypeOf(BlockInsertRawResponse{}), + CmdBlockDownloadAsyncRequest: reflect.TypeOf(BlockDownloadAsyncRequest{}), + CmdBlockDownloadAsyncResponse: reflect.TypeOf(BlockDownloadAsyncResponse{}), + CmdBlockDownloadAsyncRawRequest: reflect.TypeOf(BlockDownloadAsyncRawRequest{}), + CmdBlockDownloadAsyncRawResponse: reflect.TypeOf(BlockDownloadAsyncRawResponse{}), + CmdBlockKeystoneByL2KeystoneAbrevHashRequest: reflect.TypeOf(BlockKeystoneByL2KeystoneAbrevHashRequest{}), + CmdBlockKeystoneByL2KeystoneAbrevHashResponse: reflect.TypeOf(BlockKeystoneByL2KeystoneAbrevHashResponse{}), } type tbcAPI struct{} diff --git a/cmd/tbcd/tbcd.go b/cmd/tbcd/tbcd.go index bf45b2f88..967279689 100644 --- a/cmd/tbcd/tbcd.go +++ b/cmd/tbcd/tbcd.go @@ -61,10 +61,16 @@ var ( }, "TBC_BLOCK_SANITY": config.Config{ Value: &cfg.BlockSanity, - DefaultValue: false, + DefaultValue: true, Help: "enable/disable block sanity checks before inserting", Print: config.PrintAll, }, + "TBC_HEMI_INDEX": config.Config{ + Value: &cfg.HemiIndex, + DefaultValue: false, + Help: "enable/disable various hemi related indexes", + Print: config.PrintAll, + }, "TBC_LEVELDB_HOME": config.Config{ Value: &cfg.LevelDBHome, DefaultValue: defaultHome, @@ -77,6 +83,12 @@ var ( Help: "loglevel for various packages; INFO, DEBUG and TRACE", Print: config.PrintAll, }, + "TBC_MAX_CACHED_KEYSTONES": config.Config{ + Value: &cfg.MaxCachedKeystones, + DefaultValue: int(1e5), + Help: "maximum cached keystones during indexing", + Print: config.PrintAll, + }, "TBC_MAX_CACHED_TXS": config.Config{ Value: &cfg.MaxCachedTxs, DefaultValue: int(1e6), diff --git a/database/level/level.go b/database/level/level.go index e380aca48..f03fe326b 100644 --- a/database/level/level.go +++ b/database/level/level.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -29,6 +29,7 @@ const ( BlockHeadersDB = "blockheaders" BlocksMissingDB = "blocksmissing" MetadataDB = "metadata" + KeystonesDB = "keystones" HeightHashDB = "heighthash" PeersDB = "peers" OutputsDB = "outputs" @@ -221,6 +222,10 @@ func New(ctx context.Context, home string, version int) (*Database, error) { if err != nil { return nil, fmt.Errorf("leveldb %v: %w", TransactionsDB, err) } + err = l.openDB(KeystonesDB, nil) + if err != nil { + return nil, fmt.Errorf("leveldb %v: %w", KeystonesDB, err) + } // Blocks database is special err = l.openRawDB(BlocksDB, rawdb.DefaultMaxFileSize) diff --git a/database/tbcd/database.go b/database/tbcd/database.go index afbef9b6e..978afcf0e 100644 --- a/database/tbcd/database.go +++ b/database/tbcd/database.go @@ -21,6 +21,7 @@ import ( "github.com/syndtr/goleveldb/leveldb" "github.com/hemilabs/heminetwork/database" + "github.com/hemilabs/heminetwork/hemi" ) type InsertType int @@ -118,6 +119,15 @@ type Database interface { ScriptHashByOutpoint(ctx context.Context, op Outpoint) (*ScriptHash, error) UtxosByScriptHash(ctx context.Context, sh ScriptHash, start uint64, count uint64) ([]Utxo, error) UtxosByScriptHashCount(ctx context.Context, sh ScriptHash) (uint64, error) + + // Hemi + BlockKeystoneUpdate(ctx context.Context, direction int, keystones map[chainhash.Hash]Keystone) error + BlockKeystoneByL2KeystoneAbrevHash(ctx context.Context, abrevhash chainhash.Hash) (*Keystone, error) +} + +type Keystone struct { + BlockHash chainhash.Hash // Block that contains abbreviated keystone + AbbreviatedKeystone [hemi.L2KeystoneAbrevSize]byte // Abbreviated keystone } // XXX there exist various types in this file that need to be reevaluated. @@ -225,13 +235,13 @@ func NewOutpoint(txid [32]byte, index uint32) (op Outpoint) { return } -// CacheOutput is a densely packed representation of a bitcoin UTXo. The fields are -// script_hash + value + out_index. It is packed for -// memory conservation reasons. +// CacheOutput is a densely packed representation of a bitcoin UTXo. The fields +// are script_hash + value + out_index. It is packed for memory conservation +// reasons. type CacheOutput [32 + 8 + 4]byte // script_hash + value + out_idx -// String returns pretty printable CacheOutput. Hash is not reversed since it is an -// opaque pointer. It prints satoshis@script_hash:output_index +// String returns pretty printable CacheOutput. Hash is not reversed since it +// is an opaque pointer. It prints satoshis@script_hash:output_index func (c CacheOutput) String() string { return fmt.Sprintf("%d @ %x:%d", binary.BigEndian.Uint64(c[32:40]), c[0:32], binary.BigEndian.Uint32(c[40:])) @@ -292,8 +302,8 @@ func NewDeleteCacheOutput(hash [32]byte, outIndex uint32) (co CacheOutput) { // Utxo packs a transaction id, the value and the out index. type Utxo [32 + 8 + 4]byte // tx_id + value + out_idx -// String returns pretty printable CacheOutput. Hash is not reversed since it is an -// opaque pointer. It prints satoshis@script_hash:output_index +// String returns pretty printable CacheOutput. Hash is not reversed since it +// is an opaque pointer. It prints satoshis@script_hash:output_index func (u Utxo) String() string { ch, _ := chainhash.NewHash(u[0:32]) return fmt.Sprintf("%d @ %v:%d", binary.BigEndian.Uint64(u[32:40]), diff --git a/database/tbcd/level/encodedecode_test.go b/database/tbcd/level/encodedecode_test.go new file mode 100644 index 000000000..042804116 --- /dev/null +++ b/database/tbcd/level/encodedecode_test.go @@ -0,0 +1,33 @@ +package level + +import ( + "reflect" + "testing" + + btcchainhash "github.com/btcsuite/btcd/chaincfg/chainhash" + + "github.com/hemilabs/heminetwork/database/tbcd" + "github.com/hemilabs/heminetwork/hemi" +) + +func TestKeystoneEncodeDecode(t *testing.T) { + hks := hemi.L2Keystone{ + Version: 1, + L1BlockNumber: 5, + L2BlockNumber: 44, + ParentEPHash: fillOutBytes("v1parentephash", 32), + PrevKeystoneEPHash: fillOutBytes("v1prevkeystoneephash", 32), + StateRoot: fillOutBytes("v1stateroot", 32), + EPHash: fillOutBytes("v1ephash", 32), + } + abrvKs := hemi.L2KeystoneAbbreviate(hks).Serialize() + ks := tbcd.Keystone{ + BlockHash: btcchainhash.Hash(fillOutBytes("blockhash", 32)), + AbbreviatedKeystone: abrvKs, + } + eks := encodeKeystone(ks) + nks := decodeKeystone(eks[:]) + if !reflect.DeepEqual(nks, ks) { + t.Fatal("decoded keystone not equal") + } +} diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go index 4e27253c7..f2abc4728 100644 --- a/database/tbcd/level/level.go +++ b/database/tbcd/level/level.go @@ -26,6 +26,7 @@ import ( "github.com/hemilabs/heminetwork/database" "github.com/hemilabs/heminetwork/database/level" "github.com/hemilabs/heminetwork/database/tbcd" + "github.com/hemilabs/heminetwork/hemi" ) // Locking order: @@ -282,6 +283,23 @@ func (l *ldb) MetadataBatchGet(ctx context.Context, allOrNone bool, keys [][]byt return l.transactionBatchGet(ctx, mdDB, allOrNone, keys) } +func (l *ldb) BlockKeystoneByL2KeystoneAbrevHash(ctx context.Context, abrevhash chainhash.Hash) (*tbcd.Keystone, error) { + log.Tracef("BlockKeystoneByL2KeystoneAbrevHash") + defer log.Tracef("BlockKeystoneByL2KeystoneAbrevHash exit") + + kssDB := l.pool[level.KeystonesDB] + eks, err := kssDB.Get(abrevhash.CloneBytes(), nil) + if err != nil { + if errors.Is(err, leveldb.ErrNotFound) { + return nil, database.NotFoundError(fmt.Sprintf("l2 keystone not found: %v", abrevhash)) + } + return nil, fmt.Errorf("l2 keystone get: %w", err) + } + + ks := decodeKeystone(eks) + return &ks, nil +} + // BatchAppend appends rows to batch b. func BatchAppend(ctx context.Context, b *leveldb.Batch, rows []tbcd.Row) { log.Tracef("BatchAppend") @@ -1707,6 +1725,86 @@ func (l *ldb) BlockTxUpdate(ctx context.Context, direction int, txs map[tbcd.TxK return nil } +// encodeKeystone encodes a database keystone as +// [blockhash,abbreviated keystone] or [32+76] bytes. The abbreviated keystone +// hash is the leveldb table key. +func encodeKeystone(ks tbcd.Keystone) (eks [chainhash.HashSize + hemi.L2KeystoneAbrevSize]byte) { + copy(eks[0:32], ks.BlockHash[:]) + copy(eks[32:], ks.AbbreviatedKeystone[:]) + return +} + +func encodeKeystoneToSlice(ks tbcd.Keystone) []byte { + eks := encodeKeystone(ks) + return eks[:] +} + +// decodeKeystone reverse the process of encodeKeystone. +func decodeKeystone(eks []byte) (ks tbcd.Keystone) { + bh, err := chainhash.NewHash(eks[0:32]) + if err != nil { + panic(err) // Can't happen + } + ks.BlockHash = *bh + // copy the values to prevent slicing reentrancy problems. + copy(ks.AbbreviatedKeystone[:], eks[32:]) + return ks +} + +func (l *ldb) BlockKeystoneUpdate(ctx context.Context, direction int, keystones map[chainhash.Hash]tbcd.Keystone) error { + log.Tracef("BlockKeystoneUpdate") + defer log.Tracef("BlockKeystoneUpdate exit") + + if !(direction == 1 || direction == -1) { + return fmt.Errorf("invalid direction: %v", direction) + } + + // keystones + kssTx, kssCommit, kssDiscard, err := l.startTransaction(level.KeystonesDB) + if err != nil { + return fmt.Errorf("keystones open db transaction: %w", err) + } + defer kssDiscard() + + kssBatch := new(leveldb.Batch) + for k, v := range keystones { + switch direction { + case -1: + eks, err := kssTx.Get(k[:], nil) + if err != nil { + continue + } + ks := decodeKeystone(eks) + // Only delete keystone if it is in the previously found block. + if ks.BlockHash.IsEqual(&v.BlockHash) { + kssBatch.Delete(k[:]) + } + case 1: + has, err := kssTx.Has(k[:], nil) + if err != nil { + return fmt.Errorf("keystone update has: %w", err) + } + if has { + // Only store unknown keystones + continue + } + kssBatch.Put(k[:], encodeKeystoneToSlice(v)) + } + } + + // Write keystones batch + if err = kssTx.Write(kssBatch, nil); err != nil { + return fmt.Errorf("keystones insert: %w", err) + } + + // keystones commit + if err = kssCommit(); err != nil { + return fmt.Errorf("keystones commit: %w", err) + } + + return nil +} + func (l *ldb) BlockHeaderCacheStats() tbcd.CacheStats { if l.cfg.blockheaderCacheSize == 0 { return noStats diff --git a/database/tbcd/level/level_test.go b/database/tbcd/level/level_test.go index 3327359cc..139dd07b9 100644 --- a/database/tbcd/level/level_test.go +++ b/database/tbcd/level/level_test.go @@ -1,17 +1,22 @@ -package level_test +package level import ( "bytes" "context" "errors" + "fmt" "reflect" "testing" + "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/davecgh/go-spew/spew" + "github.com/go-test/deep" + + btcchainhash "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/hemilabs/heminetwork/database" "github.com/hemilabs/heminetwork/database/tbcd" - "github.com/hemilabs/heminetwork/database/tbcd/level" + "github.com/hemilabs/heminetwork/hemi" ) func TestMD(t *testing.T) { @@ -21,8 +26,8 @@ func TestMD(t *testing.T) { home := t.TempDir() t.Logf("temp: %v", home) - cfg := level.NewConfig(home, "128kb", "1m") - db, err := level.New(ctx, cfg) + cfg := NewConfig(home, "128kb", "1m") + db, err := New(ctx, cfg) if err != nil { t.Fatal(err) } @@ -112,3 +117,338 @@ func TestMD(t *testing.T) { t.Fatal("expected no return value") } } + +func fillOutBytes(prefix string, size int) []byte { + result := []byte(prefix) + for len(result) < size { + result = append(result, '_') + } + return result +} + +func makeKssMap(kssList []hemi.L2Keystone, blockHashSeed string) map[chainhash.Hash]tbcd.Keystone { + kssMap := make(map[chainhash.Hash]tbcd.Keystone) + for _, l2Keystone := range kssList { + abrvKs := hemi.L2KeystoneAbbreviate(l2Keystone).Serialize() + kssMap[*hemi.L2KeystoneAbbreviate(l2Keystone).Hash()] = tbcd.Keystone{ + BlockHash: btcchainhash.Hash(fillOutBytes(blockHashSeed, 32)), + AbbreviatedKeystone: abrvKs, + } + } + return kssMap +} + +func TestKssEncoding(t *testing.T) { + keystones := []hemi.L2Keystone{ + { + Version: 1, + L1BlockNumber: 5, + L2BlockNumber: 44, + ParentEPHash: fillOutBytes("parentephash", 32), + PrevKeystoneEPHash: fillOutBytes("prevkeystoneephash", 32), + StateRoot: fillOutBytes("stateroot", 32), + EPHash: fillOutBytes("ephash", 32), + }, { + Version: 1, + L1BlockNumber: 5, + L2BlockNumber: 44, + ParentEPHash: fillOutBytes("altparentephash", 32), + PrevKeystoneEPHash: fillOutBytes("altprevkeystoneephash", 32), + StateRoot: fillOutBytes("altstateroot", 32), + EPHash: fillOutBytes("altephash", 32), + }, + } + + altKeystone := hemi.L2Keystone{ + Version: 2, + L1BlockNumber: 6, + L2BlockNumber: 64, + ParentEPHash: fillOutBytes("fakeparentephash", 32), + PrevKeystoneEPHash: fillOutBytes("fakeprevkeystoneephash", 32), + StateRoot: fillOutBytes("fakestateroot", 32), + EPHash: fillOutBytes("fakeephash", 32), + } + altAbrevKeystone := hemi.L2KeystoneAbbreviate(altKeystone).Serialize() + + kssMap := makeKssMap(keystones, "blockhash") + for _, ks := range kssMap { + encodedKs := encodeKeystoneToSlice(ks) + decodedKs := decodeKeystone(encodedKs) + + if !decodedKs.BlockHash.IsEqual(&ks.BlockHash) { + t.Fatalf("blockhash diff: got %v, expected %v", decodedKs.BlockHash, ks.BlockHash) + } + if diff := deep.Equal(decodedKs.AbbreviatedKeystone, ks.AbbreviatedKeystone); len(diff) > 0 { + t.Fatalf("abrv Ks diff: %s", diff) + } + } + diffKssMap := makeKssMap(keystones, "diffblockhash") + for key, ks := range diffKssMap { + dks := kssMap[key] + encodedKs := encodeKeystoneToSlice(dks) + decodedKs := decodeKeystone(encodedKs) + + if decodedKs.BlockHash.IsEqual(&ks.BlockHash) { + t.Fatalf("blockhash not diff: got %v, expected %v", decodedKs.BlockHash, ks.BlockHash) + } + + if diff := deep.Equal(decodedKs.AbbreviatedKeystone, altAbrevKeystone); len(diff) == 0 { + t.Fatalf("abrv Ks diff: %v", diff) + } + } +} + +func TestKeystoneUpdate(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + kssList := []hemi.L2Keystone{ + { + Version: 1, + L1BlockNumber: 5, + L2BlockNumber: 44, + ParentEPHash: fillOutBytes("v1parentephash", 32), + PrevKeystoneEPHash: fillOutBytes("v1prevkeystoneephash", 32), + StateRoot: fillOutBytes("v1stateroot", 32), + EPHash: fillOutBytes("v1ephash", 32), + }, + { + Version: 1, + L1BlockNumber: 6, + L2BlockNumber: 44, + ParentEPHash: fillOutBytes("v2parentephash", 32), + PrevKeystoneEPHash: fillOutBytes("v2prevkeystoneephash", 32), + StateRoot: fillOutBytes("v2stateroot", 32), + EPHash: fillOutBytes("v2ephash", 32), + }, + { + Version: 1, + L1BlockNumber: 5, + L2BlockNumber: 44, + ParentEPHash: fillOutBytes("i1parentephash", 32), + PrevKeystoneEPHash: fillOutBytes("i1prevkeystoneephash", 32), + StateRoot: fillOutBytes("i1stateroot", 32), + EPHash: fillOutBytes("i1ephash", 32), + }, + { + Version: 1, + L1BlockNumber: 6, + L2BlockNumber: 44, + ParentEPHash: fillOutBytes("i2parentephash", 32), + PrevKeystoneEPHash: fillOutBytes("i2prevkeystoneephash", 32), + StateRoot: fillOutBytes("i2stateroot", 32), + EPHash: fillOutBytes("i2ephash", 32), + }, + } + + type testTableItem struct { + name string + direction []int + preInsertValid bool + kssMap map[chainhash.Hash]tbcd.Keystone + expectedInDB map[chainhash.Hash]tbcd.Keystone + expectedOutDB map[chainhash.Hash]tbcd.Keystone + expectedError error + } + + testTable := []testTableItem{ + { + name: "invalidDirection", + direction: []int{0}, + expectedOutDB: makeKssMap(kssList[:], "blockhash"), + expectedError: fmt.Errorf("invalid direction: %v", 0), + }, + { + name: "nilMap", + direction: []int{-1, 1}, + expectedOutDB: makeKssMap(kssList, "blockhash"), + }, + { + name: "emptyMap", + direction: []int{-1, 1}, + kssMap: makeKssMap(nil, "blockhash"), + expectedOutDB: makeKssMap(kssList, "blockhash"), + }, + + { + name: "duplicateInsert", + direction: []int{1}, + expectedError: nil, + preInsertValid: true, + kssMap: makeKssMap(kssList[:2], "blockhash"), + expectedInDB: makeKssMap(kssList[:2], "blockhash"), + expectedOutDB: makeKssMap(kssList[2:], "blockhash"), + }, + + { + name: "invalidRemove", + direction: []int{-1}, + kssMap: makeKssMap(kssList[2:], "blockhash"), + expectedOutDB: makeKssMap(kssList, "blockhash"), + }, + { + name: "validInsert", + direction: []int{1}, + kssMap: makeKssMap(kssList[:2], "blockhash"), + expectedInDB: makeKssMap(kssList[:2], "blockhash"), + expectedOutDB: makeKssMap(kssList[2:], "blockhash"), + }, + { + name: "validRemove", + direction: []int{1, -1}, + kssMap: makeKssMap(kssList[2:], "blockhash"), + expectedOutDB: makeKssMap(kssList, "blockhash"), + }, + { + name: "mixedRemove", + direction: []int{-1}, + preInsertValid: true, + kssMap: makeKssMap(kssList, "blockhash"), + expectedOutDB: makeKssMap(kssList, "blockhash"), + }, + { + name: "mixedInsert", + direction: []int{1}, + preInsertValid: true, + kssMap: makeKssMap(kssList, "blockhash"), + expectedInDB: makeKssMap(kssList, "blockhash"), + }, + { + name: "invalidBlockhashRemove", + direction: []int{-1}, + preInsertValid: true, + kssMap: makeKssMap(kssList, "fakeblockhash"), + expectedInDB: makeKssMap(kssList[:2], "blockhash"), + }, + } + + for _, tti := range testTable { + t.Run(tti.name, func(t *testing.T) { + home := t.TempDir() + t.Logf("temp: %v", home) + + cfg := NewConfig(home, "", "") + db, err := New(ctx, cfg) + if err != nil { + t.Fatal(err) + } + defer func() { + err := db.Close() + if err != nil { + t.Fatal(err) + } + }() + + if tti.preInsertValid { + if err := db.BlockKeystoneUpdate(ctx, 1, makeKssMap(kssList[:2], "blockhash")); err != nil { + t.Fatal(err) + } + } + + for _, dir := range tti.direction { + err := db.BlockKeystoneUpdate(ctx, dir, tti.kssMap) + if diff := deep.Equal(err, tti.expectedError); len(diff) > 0 { + t.Fatalf("(direction %v) unexpected error diff: %s", dir, diff) + } + } + + for v := range tti.expectedInDB { + _, err := db.BlockKeystoneByL2KeystoneAbrevHash(ctx, v) + if err != nil { + t.Fatalf("keystone not in db: %v", err) + } + } + + for k, v := range tti.expectedOutDB { + _, err := db.BlockKeystoneByL2KeystoneAbrevHash(ctx, k) + if err == nil { + t.Fatalf("keystone in db: %v", spew.Sdump(v)) + } else { + if !errors.Is(err, database.ErrNotFound) { + t.Fatalf("expected '%v', got '%v'", database.ErrNotFound, err) + } + } + } + }) + } +} + +func newKeystone(blockhash *chainhash.Hash, l1, l2 uint32) (*chainhash.Hash, tbcd.Keystone) { + hks := hemi.L2Keystone{ + Version: 1, + L1BlockNumber: l1, + L2BlockNumber: l2, + ParentEPHash: fillOutBytes("v1parentephash", 32), + PrevKeystoneEPHash: fillOutBytes("v1prevkeystoneephash", 32), + StateRoot: fillOutBytes("v1stateroot", 32), + EPHash: fillOutBytes("v1ephash", 32), + } + abrvKs := hemi.L2KeystoneAbbreviate(hks) + return abrvKs.Hash(), tbcd.Keystone{ + BlockHash: *blockhash, + AbbreviatedKeystone: abrvKs.Serialize(), + } +} + +func TestKeystoneDBWindUnwind(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + home := t.TempDir() + t.Logf("temp: %v", home) + + cfg := NewConfig(home, "", "") + db, err := New(ctx, cfg) + if err != nil { + t.Fatal(err) + } + defer func() { + err := db.Close() + if err != nil { + t.Fatal(err) + } + }() + + blk1Hash := chainhash.Hash{1} + k1hash, k1 := newKeystone(&blk1Hash, 1, 2) + blk2Hash := chainhash.Hash{1} + k2hash, k2 := newKeystone(&blk2Hash, 2, 3) + ksm := map[chainhash.Hash]tbcd.Keystone{ + *k1hash: k1, + *k2hash: k2, + } + err = db.BlockKeystoneUpdate(ctx, 1, ksm) + if err != nil { + t.Fatal(err) + } + + // Get keystones back out + ks1, err := db.BlockKeystoneByL2KeystoneAbrevHash(ctx, *k1hash) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(k1, *ks1) { + t.Fatalf("%v%v", spew.Sdump(k1), spew.Sdump(*ks1)) + } + ks2, err := db.BlockKeystoneByL2KeystoneAbrevHash(ctx, *k2hash) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(k2, *ks2) { + t.Fatalf("%v%v", spew.Sdump(k2), spew.Sdump(*ks2)) + } + + // Unwind + err = db.BlockKeystoneUpdate(ctx, -1, ksm) + if err != nil { + t.Fatal(err) + } + _, err = db.BlockKeystoneByL2KeystoneAbrevHash(ctx, *k1hash) + if err == nil { + t.Fatal("k1 found") + } + _, err = db.BlockKeystoneByL2KeystoneAbrevHash(ctx, *k2hash) + if err == nil { + t.Fatal("k2 found") + } +} diff --git a/e2e/e2e_ext_test.go b/e2e/e2e_ext_test.go index 7d72e393b..23f3a20c9 100644 --- a/e2e/e2e_ext_test.go +++ b/e2e/e2e_ext_test.go @@ -774,7 +774,7 @@ func TestNewL2Keystone(t *testing.T) { } } - l2KeystoneAbrevHash := hemi.L2KeystoneAbbreviate(l2KeystoneRequest.L2Keystone).Hash() + l2KeystoneAbrevHash := hemi.L2KeystoneAbbreviate(l2KeystoneRequest.L2Keystone).HashB() time.Sleep(2 * time.Second) @@ -1475,7 +1475,7 @@ func TestBitcoinBroadcast(t *testing.T) { // go away in the future once we add "missing keystone" logic and // functionality if diff := deep.Equal(l2k, []bfgd.L2Keystone{ - bfgd.L2Keystone{ + { Version: 1, L1BlockNumber: 5, L2BlockNumber: 44, @@ -1483,7 +1483,7 @@ func TestBitcoinBroadcast(t *testing.T) { PrevKeystoneEPHash: fillOutBytesWith0s("prevkeystone", 32), StateRoot: fillOutBytes("stateroot", 32), EPHash: fillOutBytesWith0s("ephash______", 32), - Hash: hemi.L2KeystoneAbbreviate(l2Keystone).Hash(), + Hash: hemi.L2KeystoneAbbreviate(l2Keystone).HashB(), }, }); len(diff) > 0 { t.Fatalf("unexpected diff: %s", diff) @@ -1586,7 +1586,7 @@ func TestBitcoinBroadcastDuplicate(t *testing.T) { publicKeyUncompressed := publicKey.SerializeUncompressed() // 3 - popBases, err := db.PopBasisByL2KeystoneAbrevHash(ctx, [32]byte(hemi.L2KeystoneAbbreviate(l2Keystone).Hash()), false, 0) + popBases, err := db.PopBasisByL2KeystoneAbrevHash(ctx, [32]byte(hemi.L2KeystoneAbbreviate(l2Keystone).HashB()), false, 0) if err != nil { t.Fatal(err) } @@ -1600,7 +1600,7 @@ func TestBitcoinBroadcastDuplicate(t *testing.T) { diff := deep.Equal(popBases, []bfgd.PopBasis{ { - L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(l2Keystone).Hash(), + L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(l2Keystone).HashB(), PopMinerPublicKey: publicKeyUncompressed, BtcRawTx: btx, BtcTxId: btcTxId[:], @@ -1760,7 +1760,7 @@ loop: // go away in the future once we add "missing keystone" logic and // functionality if diff := deep.Equal(l2k, []bfgd.L2Keystone{ - bfgd.L2Keystone{ + { Version: 1, L1BlockNumber: 5, L2BlockNumber: 44, @@ -1768,7 +1768,7 @@ loop: PrevKeystoneEPHash: fillOutBytesWith0s("prevkeystone", 32), StateRoot: fillOutBytes("stateroot", 32), EPHash: fillOutBytesWith0s("ephash______", 32), - Hash: hemi.L2KeystoneAbbreviate(l2Keystone).Hash(), + Hash: hemi.L2KeystoneAbbreviate(l2Keystone).HashB(), }, }); len(diff) > 0 { t.Fatalf("unexpected diff: %s", diff) @@ -1829,7 +1829,7 @@ loop: case <-lctx.Done(): break loop case <-time.After(1 * time.Second): - popBases, err = db.PopBasisByL2KeystoneAbrevHash(ctx, [32]byte(hemi.L2KeystoneAbbreviate(l2Keystone).Hash()), false, 0) + popBases, err = db.PopBasisByL2KeystoneAbrevHash(ctx, [32]byte(hemi.L2KeystoneAbbreviate(l2Keystone).HashB()), false, 0) if len(popBases) > 0 { break loop } @@ -1876,7 +1876,7 @@ loop: BtcHeaderHash: btcHeaderHash, BtcTxIndex: &txIndex, PopTxId: popTxId, - L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(l2Keystone).Hash(), + L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(l2Keystone).HashB(), BtcRawTx: btx, PopMinerPublicKey: publicKeyUncompressed, BtcMerklePath: mockMerkleHashes, @@ -1899,7 +1899,7 @@ loop: // go away in the future once we add "missing keystone" logic and // functionality if diff := deep.Equal(l2k, []bfgd.L2Keystone{ - bfgd.L2Keystone{ + { Version: 1, L1BlockNumber: 5, L2BlockNumber: 44, @@ -1907,7 +1907,7 @@ loop: PrevKeystoneEPHash: fillOutBytesWith0s("prevkeystone", 32), StateRoot: fillOutBytes("stateroot", 32), EPHash: fillOutBytesWith0s("ephash______", 32), - Hash: hemi.L2KeystoneAbbreviate(l2Keystone).Hash(), + Hash: hemi.L2KeystoneAbbreviate(l2Keystone).HashB(), }, }); len(diff) > 0 { t.Fatalf("unexpected diff: %s", diff) @@ -2025,7 +2025,7 @@ loop: case <-lctx.Done(): break loop case <-time.After(1 * time.Second): - popBases, err = db.PopBasisByL2KeystoneAbrevHash(ctx, [32]byte(hemi.L2KeystoneAbbreviate(l2Keystone).Hash()), true, 0) + popBases, err = db.PopBasisByL2KeystoneAbrevHash(ctx, [32]byte(hemi.L2KeystoneAbbreviate(l2Keystone).HashB()), true, 0) if len(popBases) > 0 { break loop } @@ -2061,7 +2061,7 @@ loop: BtcHeaderHash: btcHeaderHash, BtcTxIndex: &txIndex, PopTxId: popTxId, - L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(l2Keystone).Hash(), + L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(l2Keystone).HashB(), BtcRawTx: btx, PopMinerPublicKey: publicKeyUncompressed, BtcMerklePath: mockMerkleHashes, @@ -2150,7 +2150,7 @@ func TestPopPayouts(t *testing.T) { BtcTxId: fillOutBytes("btctxid1", 32), BtcRawTx: []byte("btcrawtx1"), PopTxId: fillOutBytes("poptxid1", 32), - L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(includedL2Keystone).Hash(), + L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(includedL2Keystone).HashB(), PopMinerPublicKey: publicKeyUncompressed, BtcHeaderHash: btcHeaderHash, BtcTxIndex: &txIndex, @@ -2167,7 +2167,7 @@ func TestPopPayouts(t *testing.T) { BtcTxId: fillOutBytes("btctxid2", 32), BtcRawTx: []byte("btcrawtx2"), PopTxId: fillOutBytes("poptxid2", 32), - L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(includedL2Keystone).Hash(), + L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(includedL2Keystone).HashB(), PopMinerPublicKey: otherPublicKeyUncompressed, BtcHeaderHash: btcHeaderHash, BtcTxIndex: &txIndex, @@ -2184,7 +2184,7 @@ func TestPopPayouts(t *testing.T) { BtcTxId: fillOutBytes("btctxid3", 32), BtcRawTx: []byte("btcrawtx3"), PopTxId: fillOutBytes("poptxid3", 32), - L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(includedL2Keystone).Hash(), + L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(includedL2Keystone).HashB(), PopMinerPublicKey: publicKeyUncompressed, BtcHeaderHash: btcHeaderHash, BtcTxIndex: &txIndex, @@ -2201,7 +2201,7 @@ func TestPopPayouts(t *testing.T) { BtcTxId: fillOutBytes("btctxid4", 32), BtcRawTx: []byte("btcrawtx4"), PopTxId: fillOutBytes("poptxid4", 32), - L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(differentL2Keystone).Hash(), + L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(differentL2Keystone).HashB(), PopMinerPublicKey: publicKeyUncompressed, BtcHeaderHash: btcHeaderHash, BtcTxIndex: &txIndex, @@ -2354,7 +2354,7 @@ func TestPopPayoutsMultiplePages(t *testing.T) { BtcTxId: fillOutBytes("btctxid1", 32), BtcRawTx: []byte("btcrawtx1"), PopTxId: fillOutBytes("poptxid1", 32), - L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(includedL2Keystone).Hash(), + L2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(includedL2Keystone).HashB(), PopMinerPublicKey: publicKeyUncompressed, BtcHeaderHash: btcHeaderHash, BtcTxIndex: &txIndex, @@ -3692,7 +3692,7 @@ func createBtcBlock(ctx context.Context, t *testing.T, db bfgd.Database, count i L2BlockNumber: l2BlockNumber, } - l2KeystoneAbrevHash := hemi.L2KeystoneAbbreviate(hemiL2Keystone).Hash() + l2KeystoneAbrevHash := hemi.L2KeystoneAbbreviate(hemiL2Keystone).HashB() l2Keystone := bfgd.L2Keystone{ Hash: l2KeystoneAbrevHash, ParentEPHash: parentEpHash, diff --git a/hemi/hemi.go b/hemi/hemi.go index 35684a947..e2eb34266 100644 --- a/hemi/hemi.go +++ b/hemi/hemi.go @@ -200,15 +200,26 @@ func L2KeystoneAbrevDeserialize(r RawAbreviatedL2Keystone) *L2KeystoneAbrev { return &a } -func (a *L2KeystoneAbrev) Hash() []byte { +func (a *L2KeystoneAbrev) HashB() []byte { b := a.Serialize() return chainhash.DoubleHashB(b[:]) } -func HashSerializedL2KeystoneAbrev(s []byte) []byte { +func HashSerializedL2KeystoneAbrevB(s []byte) []byte { return chainhash.DoubleHashB(s) } +func (a *L2KeystoneAbrev) Hash() *chainhash.Hash { + b := a.Serialize() + h := chainhash.DoubleHashH(b[:]) + return &h +} + +func HashSerializedL2KeystoneAbrev(s []byte) *chainhash.Hash { + h := chainhash.DoubleHashH(s) + return &h +} + func L2KeystoneAbbreviate(l2ks L2Keystone) *L2KeystoneAbrev { a := &L2KeystoneAbrev{ Version: l2ks.Version, @@ -233,7 +244,7 @@ func NewL2KeystoneAbrevFromBytes(b []byte) (*L2KeystoneAbrev, error) { switch ka.Version { case L2KeystoneAbrevVersion: if len(b) != L2KeystoneAbrevSize { - return nil, fmt.Errorf("invalid keystone sbrev length (%d)", + return nil, fmt.Errorf("invalid keystone abrev length (%d)", len(b)) } ka.L1BlockNumber = binary.BigEndian.Uint32(b[1:5]) diff --git a/hemi/pop/pop.go b/hemi/pop/pop.go index ef2dfb8d8..5a9b48e4c 100644 --- a/hemi/pop/pop.go +++ b/hemi/pop/pop.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -39,7 +39,7 @@ func MinerAddressFromString(address string) (*MinerAddress, error) { // TransactionL2 rename to Transaction and fixup this code type TransactionL2 struct { - L2Keystone *hemi.L2KeystoneAbrev + L2Keystone *hemi.L2KeystoneAbrev // XXX wtf is this a pointer? } // Serialize serializes a PoP transaction to its byte representation. diff --git a/service/bfg/bfg.go b/service/bfg/bfg.go index fcbda2827..5dc8b8680 100644 --- a/service/bfg/bfg.go +++ b/service/bfg/bfg.go @@ -441,7 +441,7 @@ func (s *Server) handleOneBroadcastRequest(pctx context.Context, highPriority bo return } - log.Infof("successfully broadcast tx %s, for l2 keystone %s", mb.TxID(), hex.EncodeToString(tl2.L2Keystone.Hash())) + log.Infof("successfully broadcast tx %s, for l2 keystone %s", mb.TxID(), tl2.L2Keystone.Hash()) } func (s *Server) bitcoinBroadcastWorker(ctx context.Context, highPriority bool) { @@ -696,7 +696,7 @@ func (s *Server) processBitcoinBlock(ctx context.Context, height uint64) error { BtcHeaderHash: btcHeaderHash, BtcTxIndex: &btcTxIndex, PopTxId: popTxId, - L2KeystoneAbrevHash: tl2.L2Keystone.Hash(), + L2KeystoneAbrevHash: tl2.L2Keystone.HashB(), BtcRawTx: rtx, PopMinerPublicKey: publicKeyUncompressed, BtcMerklePath: merkleHashes, @@ -1195,7 +1195,7 @@ func (s *Server) handlePopTxsForL2Block(ctx context.Context, ptl2 *bfgapi.PopTxs hash := hemi.HashSerializedL2KeystoneAbrev(ptl2.L2Block) var h [32]byte - copy(h[:], hash) + copy(h[:], hash[:]) response := &bfgapi.PopTxsForL2BlockResponse{} @@ -1264,7 +1264,7 @@ func (s *Server) handleBtcFinalityByKeystonesRequest(ctx context.Context, bfkr * l2KeystoneAbrevHashes := make([]database.ByteArray, 0, len(bfkr.L2Keystones)) for _, l := range bfkr.L2Keystones { a := hemi.L2KeystoneAbbreviate(l) - l2KeystoneAbrevHashes = append(l2KeystoneAbrevHashes, a.Hash()) + l2KeystoneAbrevHashes = append(l2KeystoneAbrevHashes, a.HashB()) } finalities, err := s.db.L2BTCFinalityByL2KeystoneAbrevHash( @@ -1417,7 +1417,7 @@ func hemiL2KeystoneAbrevToDb(l2ks hemi.L2KeystoneAbrev) bfgd.L2Keystone { } return bfgd.L2Keystone{ - Hash: l2ks.Hash(), + Hash: l2ks.HashB(), Version: uint32(l2ks.Version), L1BlockNumber: l2ks.L1BlockNumber, L2BlockNumber: l2ks.L2BlockNumber, @@ -1430,7 +1430,7 @@ func hemiL2KeystoneAbrevToDb(l2ks hemi.L2KeystoneAbrev) bfgd.L2Keystone { func hemiL2KeystoneToDb(l2ks hemi.L2Keystone) bfgd.L2Keystone { return bfgd.L2Keystone{ - Hash: hemi.L2KeystoneAbbreviate(l2ks).Hash(), + Hash: hemi.L2KeystoneAbbreviate(l2ks).HashB(), Version: uint32(l2ks.Version), L1BlockNumber: l2ks.L1BlockNumber, L2BlockNumber: l2ks.L2BlockNumber, diff --git a/service/tbc/crawler.go b/service/tbc/crawler.go index c953ddf9c..837343427 100644 --- a/service/tbc/crawler.go +++ b/service/tbc/crawler.go @@ -22,6 +22,7 @@ import ( "github.com/hemilabs/heminetwork/database" "github.com/hemilabs/heminetwork/database/tbcd" + "github.com/hemilabs/heminetwork/hemi/pop" ) func s2h(s string) chainhash.Hash { @@ -33,12 +34,14 @@ func s2h(s string) chainhash.Hash { } var ( - UtxoIndexHashKey = []byte("utxoindexhash") // last indexed utxo hash - TxIndexHashKey = []byte("txindexhash") // last indexed tx hash + UtxoIndexHashKey = []byte("utxoindexhash") // last indexed utxo block hash + TxIndexHashKey = []byte("txindexhash") // last indexed tx block hash + KeystoneIndexHashKey = []byte("keystoneindexhash") // last indexed keystone block hash ErrAlreadyIndexing = errors.New("already indexing") testnet3Checkpoints = map[chainhash.Hash]uint64{ + s2h("0000000000002b9408d001dd42f830e16a9c28ed8daa828523e67e09ea9e0411"): 3600000, s2h("000000000000098faa89ab34c3ec0e6e037698e3e54c8d1bbb9dcfe0054a8e7a"): 3200000, s2h("0000000000001242d96bedebc9f45a2ecdd40d393ca0d725f500fb4977a50582"): 3100000, s2h("0000000000003c46fc60e56b9c2ae202b1efec83fcc7899d21de16757dea40a4"): 3000000, @@ -137,12 +140,50 @@ func (s *Server) mdHashHeight(ctx context.Context, key []byte) (*HashHeight, err // UtxoIndexHash returns the last hash that has been UTxO indexed. func (s *Server) UtxoIndexHash(ctx context.Context) (*HashHeight, error) { - return s.mdHashHeight(ctx, UtxoIndexHashKey) + h, err := s.mdHashHeight(ctx, UtxoIndexHashKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return nil, err + } + h = &HashHeight{ + Hash: *s.chainParams.GenesisHash, + Height: 0, + } + } + return h, nil } // TxIndexHash returns the last hash that has been Tx indexed. func (s *Server) TxIndexHash(ctx context.Context) (*HashHeight, error) { - return s.mdHashHeight(ctx, TxIndexHashKey) + h, err := s.mdHashHeight(ctx, TxIndexHashKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return nil, err + } + h = &HashHeight{ + Hash: *s.chainParams.GenesisHash, + Height: 0, + } + } + return h, nil +} + +// KeystoneIndexHash returns the last hash that has been Keystone indexed. +func (s *Server) KeystoneIndexHash(ctx context.Context) (*HashHeight, error) { + h, err := s.mdHashHeight(ctx, KeystoneIndexHashKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return nil, err + } + // h = s.hemiGenesis // XXX disable this for now until we are + // sure we may or may not have to pass "genesis" around in the + // various functions. + h = &HashHeight{ + Hash: *s.chainParams.GenesisHash, + Height: 0, + } + } + return h, nil } func (s *Server) findCommonParent(ctx context.Context, bhX, bhY *tbcd.BlockHeader) (*tbcd.BlockHeader, error) { @@ -478,13 +519,7 @@ func (s *Server) indexUtxosInBlocks(ctx context.Context, endHash *chainhash.Hash // Find start hash utxoHH, err := s.UtxoIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return 0, last, fmt.Errorf("utxo index hash: %w", err) - } - utxoHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return 0, last, fmt.Errorf("utxo index hash: %w", err) } utxosPercentage := 95 // flush cache at >95% capacity @@ -577,13 +612,7 @@ func (s *Server) unindexUtxosInBlocks(ctx context.Context, endHash *chainhash.Ha // Find start hash utxoHH, err := s.UtxoIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return 0, last, fmt.Errorf("utxo index hash: %w", err) - } - utxoHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return 0, last, fmt.Errorf("utxo index hash: %w", err) } utxosPercentage := 95 // flush cache at >95% capacity @@ -797,13 +826,7 @@ func (s *Server) UtxoIndexer(ctx context.Context, endHash *chainhash.Hash) error // Verify start point is not after the end point utxoHH, err := s.UtxoIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return fmt.Errorf("utxo indexer: %w", err) - } - utxoHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return fmt.Errorf("utxo index hash: %w", err) } // XXX make sure there is no gap between start and end or vice versa. @@ -868,13 +891,7 @@ func (s *Server) indexTxsInBlocks(ctx context.Context, endHash *chainhash.Hash, // Find start hash txHH, err := s.TxIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return 0, last, fmt.Errorf("tx index hash: %w", err) - } - txHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return 0, last, fmt.Errorf("tx index hash: %w", err) } txsPercentage := 95 // flush cache at >95% capacity @@ -959,13 +976,7 @@ func (s *Server) unindexTxsInBlocks(ctx context.Context, endHash *chainhash.Hash // Find start hash txHH, err := s.TxIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return 0, last, fmt.Errorf("tx index hash: %w", err) - } - txHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return 0, last, fmt.Errorf("tx index hash: %w", err) } txsPercentage := 95 // flush cache at >95% capacity @@ -1184,13 +1195,7 @@ func (s *Server) TxIndexer(ctx context.Context, endHash *chainhash.Hash) error { // Verify start point is not after the end point txHH, err := s.TxIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return fmt.Errorf("tx indexer: %w", err) - } - txHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return fmt.Errorf("tx index hash: %w", err) } // Make sure there is no gap between start and end or vice versa. @@ -1215,6 +1220,371 @@ func (s *Server) TxIndexer(ctx context.Context, endHash *chainhash.Hash) error { return fmt.Errorf("invalid direction: %v", direction) } +func processKeystones(blockHash *chainhash.Hash, txs []*btcutil.Tx, kssCache map[chainhash.Hash]tbcd.Keystone) error { + for _, tx := range txs { + if blockchain.IsCoinBase(tx) { + // Skip coinbase inputs + continue + } + + for _, txOut := range tx.MsgTx().TxOut { + aPoPTx, err := pop.ParseTransactionL2FromOpReturn(txOut.PkScript) + if err != nil { + // log.Tracef("error parsing tx l2: %s", err) + continue + } + if _, ok := kssCache[*aPoPTx.L2Keystone.Hash()]; ok { + // Multiple keystones may exist in block, only store first + continue + } + + abvKss := aPoPTx.L2Keystone.Serialize() + kssCache[*aPoPTx.L2Keystone.Hash()] = tbcd.Keystone{ + BlockHash: *blockHash, + AbbreviatedKeystone: abvKss, + } + } + } + return nil +} + +// indexKeystonesInBlocks indexes txs from the last processed block until the +// provided end hash, inclusive. It returns the number of blocks processed and +// the last hash it processed. +func (s *Server) indexKeystonesInBlocks(ctx context.Context, endHash *chainhash.Hash, kss map[chainhash.Hash]tbcd.Keystone) (int, *HashHeight, error) { + log.Tracef("indexKeystonesInBlocks") + defer log.Tracef("indexKeystonesInBlocks exit") + + // indicates if we have processed endHash and thus have hit the exit + // condition. + var last *HashHeight + + // Find start hash + ksHH, err := s.KeystoneIndexHash(ctx) + if err != nil { + return 0, last, fmt.Errorf("keystone index hash: %w", err) + } + + kssPercentage := 95 // flush cache at >95% capacity + blocksProcessed := 0 + hh := ksHH + for { + log.Debugf("indexing keystones: %v", hh) + + hash := hh.Hash + bh, err := s.db.BlockHeaderByHash(ctx, &hash) + if err != nil { + return 0, last, fmt.Errorf("block header %v: %w", hash, err) + } + + // Index block + b, err := s.db.BlockByHash(ctx, &bh.Hash) + if err != nil { + return 0, last, fmt.Errorf("block by hash %v: %w", bh, err) + } + + err = processKeystones(b.Hash(), b.Transactions(), kss) + if err != nil { + return 0, last, fmt.Errorf("process keystones %v: %w", hh, err) + } + + blocksProcessed++ + + // Try not to overshoot the cache to prevent costly allocations + cp := len(kss) * 100 / s.cfg.MaxCachedKeystones + if bh.Height%10000 == 0 || cp > kssPercentage || blocksProcessed == 1 { + log.Infof("Tx indexer: %v tx cache %v%%", hh, cp) + } + if cp > kssPercentage { + // Set kssMax to the largest tx capacity seen + s.cfg.MaxCachedKeystones = max(len(kss), s.cfg.MaxCachedKeystones) + last = hh + // Flush + break + } + + // Exit if we processed the provided end hash + if endHash.IsEqual(&hash) { + last = hh + break + } + + // Move to next block + height := bh.Height + 1 + bhs, err := s.db.BlockHeadersByHeight(ctx, height) + if err != nil { + return 0, last, fmt.Errorf("block headers by height %v: %w", + height, err) + } + index, err := s.findPathFromHash(ctx, endHash, bhs) + if err != nil { + return 0, last, fmt.Errorf("could not determine canonical path %v: %w", + height, err) + } + // Verify it connects to parent + if !hash.IsEqual(bhs[index].ParentHash()) { + return 0, last, fmt.Errorf("%v does not connect to: %v", + bhs[index], hash) + } + hh.Hash = *bhs[index].BlockHash() + hh.Height = bhs[index].Height + } + + return blocksProcessed, last, nil +} + +// unindexKeystonesInBlocks unindexes keystones from the last processed block +// until the provided end hash, inclusive. It returns the number of blocks +// processed and the last hash it processed. +func (s *Server) unindexKeystonesInBlocks(ctx context.Context, endHash *chainhash.Hash, kss map[chainhash.Hash]tbcd.Keystone) (int, *HashHeight, error) { + log.Tracef("unindexKeystonesInBlocks") + defer log.Tracef("unindexKeystonesInBlocks exit") + + // indicates if we have processed endHash and thus have hit the exit + // condition. + var last *HashHeight + + // Find start hash + ksHH, err := s.KeystoneIndexHash(ctx) + if err != nil { + return 0, last, fmt.Errorf("keystone index hash: %w", err) + } + + kssPercentage := 95 // flush cache at >95% capacity + blocksProcessed := 0 + hh := ksHH + for { + log.Debugf("unindexing keystones: %v", hh) + + hash := hh.Hash + + // Exit if we processed the provided end hash + if endHash.IsEqual(&hash) { + last = hh + break + } + + bh, err := s.db.BlockHeaderByHash(ctx, &hash) + if err != nil { + return 0, last, fmt.Errorf("block header %v: %w", hash, err) + } + + // Index block + b, err := s.db.BlockByHash(ctx, &bh.Hash) + if err != nil { + return 0, last, fmt.Errorf("block by hash %v: %w", bh, err) + } + + err = processKeystones(b.Hash(), b.Transactions(), kss) + if err != nil { + return 0, last, fmt.Errorf("process keystones %v: %w", hh, err) + } + + blocksProcessed++ + + // Try not to overshoot the cache to prevent costly allocations + cp := len(kss) * 100 / s.cfg.MaxCachedKeystones + if bh.Height%10000 == 0 || cp > kssPercentage || blocksProcessed == 1 { + log.Infof("Keystone unindexer: %v keystone cache %v%%", hh, cp) + } + if cp > kssPercentage { + // Set kssMax to the largest keystone capacity seen + s.cfg.MaxCachedKeystones = max(len(kss), s.cfg.MaxCachedKeystones) + last = hh + // Flush + break + } + + // Move to previous block + height := bh.Height - 1 + pbh, err := s.db.BlockHeaderByHash(ctx, bh.ParentHash()) + if err != nil { + return 0, last, fmt.Errorf("block headers by height %v: %w", + height, err) + } + hh.Hash = *pbh.BlockHash() + hh.Height = pbh.Height + } + + return blocksProcessed, last, nil +} + +func (s *Server) KeystoneIndexerUnwind(ctx context.Context, startBH, endBH *tbcd.BlockHeader) error { + log.Tracef("KeystoneIndexerUnwind") + defer log.Tracef("KeystoneIndexerUnwind exit") + + // XXX dedup with KeystoneIndexerWind; it's basically the same code but with the direction, start anf endhas flipped + + s.mtx.Lock() + if !s.indexing { + // XXX this prob should be an error but pusnish bad callers for now + s.mtx.Unlock() + panic("KeystoneIndexerUnwind indexing not true") + } + s.mtx.Unlock() + // Allocate here so that we don't waste space when not indexing. + kss := make(map[chainhash.Hash]tbcd.Keystone, s.cfg.MaxCachedKeystones) + defer clear(kss) + + log.Infof("Start unwinding keystones at hash %v height %v", startBH, startBH.Height) + log.Infof("End unwinding keystones at hash %v height %v", endBH, endBH.Height) + endHash := endBH.BlockHash() + for { + start := time.Now() + blocksProcessed, last, err := s.unindexKeystonesInBlocks(ctx, endHash, kss) + if err != nil { + return fmt.Errorf("unindex keystones in blocks: %w", err) + } + if blocksProcessed == 0 { + return nil + } + kssCached := len(kss) + log.Infof("Keystone unwinder blocks processed %v in %v transactions cached %v cache unused %v avg tx/blk %v", + blocksProcessed, time.Since(start), kssCached, + s.cfg.MaxCachedKeystones-kssCached, kssCached/blocksProcessed) + + // Flush to disk + start = time.Now() + if err = s.db.BlockKeystoneUpdate(ctx, -1, kss); err != nil { + return fmt.Errorf("block keystone update: %w", err) + } + // leveldb does all kinds of allocations, force GC to lower + // memory pressure. + logMemStats() + runtime.GC() + + log.Infof("Flushing unwind keystones complete %v took %v", + kssCached, time.Since(start)) + + // Record height in metadata + err = s.db.MetadataPut(ctx, KeystoneIndexHashKey, last.Hash[:]) + if err != nil { + return fmt.Errorf("metadata keystone hash: %w", err) + } + + if endHash.IsEqual(&last.Hash) { + break + } + + } + return nil +} + +func (s *Server) KeystoneIndexerWind(ctx context.Context, startBH, endBH *tbcd.BlockHeader) error { + log.Tracef("KeystoneIndexerWind") + defer log.Tracef("KeystoneIndexerWind exit") + + s.mtx.Lock() + if !s.indexing { + // XXX this prob should be an error but pusnish bad callers for now + s.mtx.Unlock() + panic("KeystoneIndexerWind not true") + } + s.mtx.Unlock() + + // Allocate here so that we don't waste space when not indexing. + kss := make(map[chainhash.Hash]tbcd.Keystone, s.cfg.MaxCachedKeystones) + defer clear(kss) + + log.Infof("Start indexing keystones at hash %v height %v", startBH, startBH.Height) + log.Infof("End indexing keystones at hash %v height %v", endBH, endBH.Height) + endHash := endBH.BlockHash() + for { + start := time.Now() + blocksProcessed, last, err := s.indexKeystonesInBlocks(ctx, endHash, kss) + if err != nil { + return fmt.Errorf("index blocks: %w", err) + } + if blocksProcessed == 0 { + return nil + } + kssCached := len(kss) + log.Infof("Keystone indexer blocks processed %v in %v transactions cached %v cache unused %v avg keystones/blk %v", + blocksProcessed, time.Since(start), kssCached, + s.cfg.MaxCachedKeystones-kssCached, kssCached/blocksProcessed) + + // Flush to disk + start = time.Now() + if err = s.db.BlockKeystoneUpdate(ctx, 1, kss); err != nil { + return fmt.Errorf("block hemi update: %w", err) + } + // leveldb does all kinds of allocations, force GC to lower + // memory pressure. + logMemStats() + runtime.GC() + + log.Infof("Flushing keysyones complete %v took %v", + kssCached, time.Since(start)) + + // Record height in metadata + err = s.db.MetadataPut(ctx, KeystoneIndexHashKey, last.Hash[:]) + if err != nil { + return fmt.Errorf("metadata keystone hash: %w", err) + } + + if endHash.IsEqual(&last.Hash) { + break + } + + } + + return nil +} + +func (s *Server) KeystoneIndexer(ctx context.Context, endHash *chainhash.Hash) error { + log.Tracef("KeystoneIndexer") + defer log.Tracef("KeystoneIndexer exit") + + // XXX this is basically duplicate from TxIndexIsLinear + + if !s.cfg.HemiIndex { + return errors.New("disabled") + } + s.mtx.Lock() + if !s.indexing { + // XXX this prob should be an error but pusnish bad callers for now + s.mtx.Unlock() + panic("KeystoneIndexer not true") + } + s.mtx.Unlock() + + // Verify exit condition hash + if endHash == nil { + return errors.New("must provide an end hash") + } + endBH, err := s.db.BlockHeaderByHash(ctx, endHash) + if err != nil { + return fmt.Errorf("blockheader end hash: %w", err) + } + + // Verify start point is not after the end point + keystoneHH, err := s.KeystoneIndexHash(ctx) + if err != nil { + return fmt.Errorf("keystone index hash: %w", err) + } + + // Make sure there is no gap between start and end or vice versa. + startBH, err := s.db.BlockHeaderByHash(ctx, &keystoneHH.Hash) + if err != nil { + return fmt.Errorf("blockheader keystone hash: %w", err) + } + direction, err := s.KeystoneIndexIsLinear(ctx, endHash) + if err != nil { + return fmt.Errorf("keystone index is linear: %w", err) + } + switch direction { + case 1: + return s.KeystoneIndexerWind(ctx, startBH, endBH) + case -1: + return s.KeystoneIndexerUnwind(ctx, startBH, endBH) + case 0: + // Because we call KeystoneIndexIsLinear we know it's the same block. + return nil + } + + return fmt.Errorf("invalid direction: %v", direction) +} + func (s *Server) UtxoIndexIsLinear(ctx context.Context, endHash *chainhash.Hash) (int, error) { log.Tracef("UtxoIndexIsLinear") defer log.Tracef("UtxoIndexIsLinear exit") @@ -1222,13 +1592,7 @@ func (s *Server) UtxoIndexIsLinear(ctx context.Context, endHash *chainhash.Hash) // Verify start point is not after the end point utxoHH, err := s.UtxoIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return 0, fmt.Errorf("tx indexer: %w", err) - } - utxoHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return 0, fmt.Errorf("utxo index hash: %w", err) } return s.IndexIsLinear(ctx, &utxoHH.Hash, endHash) @@ -1241,18 +1605,25 @@ func (s *Server) TxIndexIsLinear(ctx context.Context, endHash *chainhash.Hash) ( // Verify start point is not after the end point txHH, err := s.TxIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return 0, fmt.Errorf("tx indexer: %w", err) - } - txHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return 0, fmt.Errorf("tx index hash: %w", err) } return s.IndexIsLinear(ctx, &txHH.Hash, endHash) } +func (s *Server) KeystoneIndexIsLinear(ctx context.Context, endHash *chainhash.Hash) (int, error) { + log.Tracef("KeystoneIndexIsLinear") + defer log.Tracef("KeystoneIndexIsLinear exit") + + // Verify start point is not after the end point + keystoneHH, err := s.KeystoneIndexHash(ctx) + if err != nil { + return 0, fmt.Errorf("keystone index hash: %w", err) + } + + return s.IndexIsLinear(ctx, &keystoneHH.Hash, endHash) +} + func (s *Server) IndexIsLinear(ctx context.Context, startHash, endHash *chainhash.Hash) (int, error) { log.Tracef("IndexIsLinear") defer log.Tracef("IndexIsLinear exit") @@ -1356,6 +1727,14 @@ func (s *Server) SyncIndexersToHash(ctx context.Context, hash *chainhash.Hash) e if err := s.TxIndexer(ctx, hash); err != nil { return fmt.Errorf("tx indexer: %w", err) } + + // Hemi indexes + if s.cfg.HemiIndex { + if err := s.KeystoneIndexer(ctx, hash); err != nil { + return fmt.Errorf("keystone indexer: %w", err) + } + } + log.Debugf("Done syncing to: %v", hash) bh, err := s.db.BlockHeaderByHash(ctx, hash) @@ -1368,27 +1747,14 @@ func (s *Server) SyncIndexersToHash(ctx context.Context, hash *chainhash.Hash) e return nil } -func (s *Server) syncIndexersToBest(ctx context.Context) error { - log.Tracef("syncIndexersToBest") - defer log.Tracef("syncIndexersToBest exit") +func (s *Server) utxoIndexersToBest(ctx context.Context, bhb *tbcd.BlockHeader) error { + log.Tracef("utxoIndexersToBest") + defer log.Tracef("utxoIndexersToBest exit") - bhb, err := s.db.BlockHeaderBest(ctx) - if err != nil { - return err - } - - log.Debugf("Sync indexers to best: %v @ %v", bhb, bhb.Height) - - // Index Utxo + // Index Utxos to best utxoHH, err := s.UtxoIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return fmt.Errorf("utxo index hash: %w", err) - } - utxoHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return fmt.Errorf("utxo index hash: %w", err) } utxoBH, err := s.db.BlockHeaderByHash(ctx, &utxoHH.Hash) if err != nil { @@ -1411,28 +1777,25 @@ func (s *Server) syncIndexersToBest(ctx context.Context) error { return fmt.Errorf("utxo indexer: %w", err) } + return nil +} + +func (s *Server) txIndexersToBest(ctx context.Context, bhb *tbcd.BlockHeader) error { + log.Tracef("txIndexersToBest") + defer log.Tracef("txIndexersToBest exit") + // Index Tx txHH, err := s.TxIndexHash(ctx) if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return fmt.Errorf("tx index hash: %w", err) - } - txHH = &HashHeight{ - Hash: *s.chainParams.GenesisHash, - Height: 0, - } + return fmt.Errorf("tx index hash: %w", err) } txBH, err := s.db.BlockHeaderByHash(ctx, &txHH.Hash) if err != nil { return err } - // We can short circuit looking up canonical parent if txBH == utxoBH. - ptxHash := &txBH.Hash - if !ptxHash.IsEqual(&utxoBH.Hash) { - cp, err = s.findCanonicalParent(ctx, txBH) - if err != nil { - return err - } + cp, err := s.findCanonicalParent(ctx, txBH) + if err != nil { + return err } if !cp.Hash.IsEqual(&txBH.Hash) { log.Infof("Syncing tx index to: %v from: %v via: %v", @@ -1447,11 +1810,73 @@ func (s *Server) syncIndexersToBest(ctx context.Context) error { return fmt.Errorf("tx indexer: %w", err) } + return nil +} + +func (s *Server) keystoneIndexersToBest(ctx context.Context, bhb *tbcd.BlockHeader) error { + log.Tracef("keystoneIndexersToBest") + defer log.Tracef("keystoneIndexersToBest exit") + + // Index keystones to best + keystoneHH, err := s.KeystoneIndexHash(ctx) + if err != nil { + return fmt.Errorf("keystone index hash: %w", err) + } + keystoneBH, err := s.db.BlockHeaderByHash(ctx, &keystoneHH.Hash) + if err != nil { + return err + } + cp, err := s.findCanonicalParent(ctx, keystoneBH) + if err != nil { + return err + } + if !cp.Hash.IsEqual(&keystoneBH.Hash) { + log.Infof("Syncing keystone index to: %v from: %v via: %v", + bhb.HH(), keystoneBH.HH(), cp.HH()) + // keystoneBH is NOT on canonical chain, unwind first + if err := s.KeystoneIndexer(ctx, &cp.Hash); err != nil { + return fmt.Errorf("keystone indexer unwind: %w", err) + } + } + // Index keystones to best block + if err := s.KeystoneIndexer(ctx, &bhb.Hash); err != nil { + return fmt.Errorf("keystone indexer: %w", err) + } + + return nil +} + +func (s *Server) syncIndexersToBest(ctx context.Context) error { + log.Tracef("syncIndexersToBest") + defer log.Tracef("syncIndexersToBest exit") + + bhb, err := s.db.BlockHeaderBest(ctx) + if err != nil { + return err + } + + log.Debugf("Sync indexers to best: %v @ %v", bhb, bhb.Height) + + if err := s.utxoIndexersToBest(ctx, bhb); err != nil { + return err + } + + if err := s.txIndexersToBest(ctx, bhb); err != nil { + return err + } + + if s.cfg.HemiIndex { + if err := s.keystoneIndexersToBest(ctx, bhb); err != nil { + return err + } + } + + // Print nice message to indicate completion. bh, err := s.db.BlockHeaderByHash(ctx, &bhb.Hash) if err != nil { log.Errorf("block header by hash: %v", err) } else { - log.Infof("Syncing complete at: %v", bh.HH()) + log.Infof("Syncing complete at: %v", bh.HH()) // XXX this prints too often } return nil diff --git a/service/tbc/rpc.go b/service/tbc/rpc.go index d24a419f4..037b83e32 100644 --- a/service/tbc/rpc.go +++ b/service/tbc/rpc.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Hemi Labs, Inc. +// Copyright (c) 2024-2025 Hemi Labs, Inc. // Use of this source code is governed by the MIT License, // which can be found in the LICENSE file. @@ -27,6 +27,7 @@ import ( "github.com/hemilabs/heminetwork/api/tbcapi" "github.com/hemilabs/heminetwork/database" "github.com/hemilabs/heminetwork/database/tbcd/level" + "github.com/hemilabs/heminetwork/hemi" ) func tx2Bytes(tx *wire.MsgTx) ([]byte, error) { @@ -190,6 +191,13 @@ func (s *Server) handleWebsocketRead(ctx context.Context, ws *tbcWs) { return s.handleBlockDownloadAsyncRawRequest(ctx, req) } + go s.handleRequest(ctx, ws, id, cmd, handler) + case tbcapi.CmdBlockKeystoneByL2KeystoneAbrevHashRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.BlockKeystoneByL2KeystoneAbrevHashRequest) + return s.handleBlockKeystoneByL2KeystoneAbrevHashRequest(ctx, req) + } + go s.handleRequest(ctx, ws, id, cmd, handler) default: err = fmt.Errorf("unknown command: %v", cmd) @@ -550,8 +558,11 @@ func (s *Server) handleTxBroadcastRequest(ctx context.Context, req *tbcapi.TxBro txid, err := s.TxBroadcast(ctx, req.Tx, req.Force) if err != nil { - if errors.Is(err, ErrTxAlreadyBroadcast) || errors.Is(err, ErrTxBroadcastNoPeers) { - return &tbcapi.TxBroadcastResponse{Error: protocol.RequestError(err)}, err + if errors.Is(err, ErrTxAlreadyBroadcast) || + errors.Is(err, ErrTxBroadcastNoPeers) { + return &tbcapi.TxBroadcastResponse{ + Error: protocol.RequestError(err), + }, err } e := protocol.NewInternalError(err) return &tbcapi.TxBroadcastResponse{Error: e.ProtocolError()}, e @@ -573,8 +584,11 @@ func (s *Server) handleTxBroadcastRawRequest(ctx context.Context, req *tbcapi.Tx } txid, err := s.TxBroadcast(ctx, tx, req.Force) if err != nil { - if errors.Is(err, ErrTxAlreadyBroadcast) || errors.Is(err, ErrTxBroadcastNoPeers) { - return &tbcapi.TxBroadcastResponse{Error: protocol.RequestError(err)}, err + if errors.Is(err, ErrTxAlreadyBroadcast) || + errors.Is(err, ErrTxBroadcastNoPeers) { + return &tbcapi.TxBroadcastResponse{ + Error: protocol.RequestError(err), + }, err } e := protocol.NewInternalError(err) return &tbcapi.TxBroadcastResponse{Error: e.ProtocolError()}, e @@ -626,6 +640,33 @@ func (s *Server) handleBlockInsertRawRequest(ctx context.Context, req *tbcapi.Bl return &tbcapi.BlockInsertRawResponse{BlockHash: &hash}, nil } +func (s *Server) handleBlockKeystoneByL2KeystoneAbrevHashRequest(ctx context.Context, req *tbcapi.BlockKeystoneByL2KeystoneAbrevHashRequest) (any, error) { + log.Tracef("handleBlockKeystoneByL2KeystoneAbrevHashRequest") + defer log.Tracef("handleBlockKeystoneByL2KeystoneAbrevHashRequest exit") + + if req.L2KeystoneAbrevHash == nil { + return &tbcapi.BlockKeystoneByL2KeystoneAbrevHashResponse{ + Error: protocol.RequestErrorf("invalid nil abrev hash"), + }, nil + } + ks, err := s.db.BlockKeystoneByL2KeystoneAbrevHash(ctx, *req.L2KeystoneAbrevHash) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + return &tbcapi.BlockKeystoneByL2KeystoneAbrevHashResponse{ + Error: protocol.RequestErrorf("could not find l2 keystone"), + }, nil + } + e := protocol.NewInternalError(err) + return &tbcapi.BlockKeystoneByL2KeystoneAbrevHashResponse{ + Error: e.ProtocolError(), + }, e + } + return &tbcapi.BlockKeystoneByL2KeystoneAbrevHashResponse{ + L2KeystoneAbrev: hemi.L2KeystoneAbrevDeserialize(hemi.RawAbreviatedL2Keystone(ks.AbbreviatedKeystone)), + BtcBlockHash: &ks.BlockHash, + }, nil +} + // handleBlockDownloadAsyncRequest handles tbcapi.BlockDownloadAsyncRequest. func (s *Server) handleBlockDownloadAsyncRequest(ctx context.Context, req *tbcapi.BlockDownloadAsyncRequest) (any, error) { log.Tracef("handleBlockAsyncDownloadRequest") diff --git a/service/tbc/rpc_test.go b/service/tbc/rpc_test.go index ab387b47d..ae9c78c13 100644 --- a/service/tbc/rpc_test.go +++ b/service/tbc/rpc_test.go @@ -15,10 +15,17 @@ import ( "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" + btcchaincfg "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + btcchainhash "github.com/btcsuite/btcd/chaincfg/chainhash" + btctxscript "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" + btcwire "github.com/btcsuite/btcd/wire" "github.com/coder/websocket" "github.com/coder/websocket/wsjson" "github.com/davecgh/go-spew/spew" + dcrsecp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" + dcrecdsa "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "github.com/docker/go-connections/nat" "github.com/go-test/deep" "github.com/testcontainers/testcontainers-go" @@ -27,6 +34,9 @@ import ( "github.com/hemilabs/heminetwork/api/protocol" "github.com/hemilabs/heminetwork/api/tbcapi" "github.com/hemilabs/heminetwork/bitcoin" + "github.com/hemilabs/heminetwork/database/tbcd" + "github.com/hemilabs/heminetwork/hemi" + "github.com/hemilabs/heminetwork/hemi/pop" ) func bytes2Tx(b []byte) (*wire.MsgTx, error) { @@ -1489,6 +1499,146 @@ func TestTxByIdNotFound(t *testing.T) { } } +func TestL2BlockByAbrevHash(t *testing.T) { + l2Keystone := hemi.L2Keystone{ + Version: 1, + L1BlockNumber: 5, + L2BlockNumber: 44, + ParentEPHash: fillOutBytes("parentephash", 32), + PrevKeystoneEPHash: fillOutBytes("prevkeystoneephash", 32), + StateRoot: fillOutBytes("stateroot", 32), + EPHash: fillOutBytes("ephash", 32), + } + + popTx := pop.TransactionL2{ + L2Keystone: hemi.L2KeystoneAbbreviate(l2Keystone), + } + + popTxOpReturn, err := popTx.EncodeToOpReturn() + if err != nil { + t.Fatal(err) + } + + t.Log(spew.Sdump(popTxOpReturn)) + + btcBlockHash := btcchainhash.Hash(fillOutBytes("blockhash", 32)) + + invalidL2KeystoneAbrevHash := chainhash.Hash(fillOutBytes("123", 32)) + + type testTableItem struct { + name string + l2KeystoneAbrevHash *chainhash.Hash + expectedError *protocol.Error + expectedL2KeystoneAbrev *hemi.L2KeystoneAbrev + expectedBTCBlockHash *chainhash.Hash + } + + testTable := []testTableItem{ + { + name: "nilL2KeystoneAbrevHash", + expectedError: protocol.RequestErrorf("invalid nil abrev hash"), + }, + { + name: "invalidL2KeystoneAbrevHash", + l2KeystoneAbrevHash: &invalidL2KeystoneAbrevHash, + expectedError: protocol.RequestErrorf("could not find l2 keystone"), + }, + { + name: "validL2KeystoneAbrevHash", + l2KeystoneAbrevHash: hemi.L2KeystoneAbbreviate(l2Keystone).Hash(), + expectedL2KeystoneAbrev: hemi.L2KeystoneAbbreviate(l2Keystone), + expectedBTCBlockHash: &btcBlockHash, + }, + } + + for _, tti := range testTable { + t.Run(tti.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + port, err := nat.NewPort("tcp", "9999") + if err != nil { + t.Fatal(err) + } + s, tbcUrl := createTbcServer(ctx, t, port) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var response tbcapi.BlockKeystoneByL2KeystoneAbrevHashResponse + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + + // 1 + btx := createBtcTx(t, 199, &l2Keystone, []byte{1, 2, 3}) + + aPoPTx, err := pop.ParseTransactionL2FromOpReturn(btx) + if err != nil { + t.Fatal(err) + } + + abrvKss := aPoPTx.L2Keystone.Serialize() + + kssCache := make(map[chainhash.Hash]tbcd.Keystone) + + kssCache[*hemi.L2KeystoneAbbreviate(l2Keystone).Hash()] = tbcd.Keystone{ + BlockHash: btcBlockHash, + AbbreviatedKeystone: abrvKss, + } + + if err := s.db.BlockKeystoneUpdate(ctx, 1, kssCache); err != nil { + t.Fatal(err) + } + + if err := tbcapi.Write(ctx, tws.conn, "someid", tbcapi.BlockKeystoneByL2KeystoneAbrevHashRequest{ + L2KeystoneAbrevHash: tti.l2KeystoneAbrevHash, + }); err != nil { + t.Fatal(err) + } + + var v protocol.Message + if err := wsjson.Read(ctx, c, &v); err != nil { + t.Fatal(err) + } + + if v.Header.Command != tbcapi.CmdBlockKeystoneByL2KeystoneAbrevHashResponse { + t.Fatalf("received unexpected command: %s", v.Header.Command) + } + + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(response.Error, tti.expectedError); len(diff) > 0 { + t.Fatalf("unexpected error diff: %s", diff) + } + + if response.L2KeystoneAbrev != nil { + t.Logf("%s\n\n%s", spew.Sdump(response.L2KeystoneAbrev.Serialize()), spew.Sdump(tti.expectedL2KeystoneAbrev.Serialize())) + } + + if diff := deep.Equal(response.BtcBlockHash, tti.expectedBTCBlockHash); len(diff) > 0 { + t.Fatalf("unexpected retrieved block hash diff: %s", diff) + } + + if diff := deep.Equal(response.L2KeystoneAbrev, tti.expectedL2KeystoneAbrev); len(diff) > 0 { + t.Fatalf("unexpected retrieved keystone diff: %s", diff) + } + }) + } +} + func assertPing(ctx context.Context, t *testing.T, c *websocket.Conn, cmd protocol.Command) { var v protocol.Message err := wsjson.Read(ctx, c, &v) @@ -1513,3 +1663,62 @@ func indexAll(ctx context.Context, t *testing.T, tbcServer *Server) { t.Fatal(err) } } + +func fillOutBytes(prefix string, size int) []byte { + result := []byte(prefix) + for len(result) < size { + result = append(result, '_') + } + return result +} + +func createBtcTx(t *testing.T, btcHeight uint64, l2Keystone *hemi.L2Keystone, minerPrivateKeyBytes []byte) []byte { + btx := &btcwire.MsgTx{ + Version: 2, + LockTime: uint32(btcHeight), + } + + popTx := pop.TransactionL2{ + L2Keystone: hemi.L2KeystoneAbbreviate(*l2Keystone), + } + + popTxOpReturn, err := popTx.EncodeToOpReturn() + if err != nil { + t.Fatal(err) + } + + privateKey := dcrsecp256k1.PrivKeyFromBytes(minerPrivateKeyBytes) + publicKey := privateKey.PubKey() + pubKeyBytes := publicKey.SerializeCompressed() + btcAddress, err := btcutil.NewAddressPubKey(pubKeyBytes, &btcchaincfg.TestNet3Params) + if err != nil { + t.Fatal(err) + } + + payToScript, err := btctxscript.PayToAddrScript(btcAddress.AddressPubKeyHash()) + if err != nil { + t.Fatal(err) + } + + if len(payToScript) != 25 { + t.Fatalf("incorrect length for pay to public key script (%d != 25)", len(payToScript)) + } + + outPoint := btcwire.OutPoint{Hash: btcchainhash.Hash(fillOutBytes("hash", 32)), Index: 0} + btx.TxIn = []*btcwire.TxIn{btcwire.NewTxIn(&outPoint, payToScript, nil)} + + changeAmount := int64(100) + btx.TxOut = []*btcwire.TxOut{btcwire.NewTxOut(changeAmount, payToScript)} + + btx.TxOut = append(btx.TxOut, btcwire.NewTxOut(0, popTxOpReturn)) + + sig := dcrecdsa.Sign(privateKey, []byte{}) + sigBytes := append(sig.Serialize(), byte(btctxscript.SigHashAll)) + sigScript, err := btctxscript.NewScriptBuilder().AddData(sigBytes).AddData(pubKeyBytes).Script() + if err != nil { + t.Fatal(err) + } + btx.TxIn[0].SignatureScript = sigScript + + return btx.TxOut[1].PkScript +} diff --git a/service/tbc/tbc.go b/service/tbc/tbc.go index 6368c024e..3610344e5 100644 --- a/service/tbc/tbc.go +++ b/service/tbc/tbc.go @@ -51,6 +51,8 @@ const ( minPeersRequired = 64 // minimum number of peers in good map before cache is purged defaultPendingBlocks = 128 // 128 * ~4MB max memory use + defaultMaxCachedKeystones = 1e5 // number of cached keystones prior to flush + defaultMaxCachedTxs = 1e6 // dual purpose cache, max key 69, max value 36 networkLocalnet = "localnet" // XXX this needs to be rethought @@ -71,6 +73,19 @@ var ( // upstreamStateIdKey is used for storing upstream state IDs // representing a unique state of an upstream system driving TBC state/ upstreamStateIdKey = []byte("upstreamstateid") + + mainnetHemiGenesis = &HashHeight{ + Hash: *chaincfg.MainNetParams.GenesisHash, // XXX fixme + Height: 0, + } + testnet3HemiGenesis = &HashHeight{ + Hash: s2h("000000000001323071f38f21ea5aae529ece491eadaccce506a59bcc2d968917"), + Height: 2815000, + } + localnetHemiGenesis = &HashHeight{ + Hash: *chaincfg.RegressionNetParams.GenesisHash, // XXX fixme + Height: 0, + } ) func init() { @@ -84,9 +99,11 @@ type Config struct { BlockCacheSize string BlockheaderCacheSize string BlockSanity bool + HemiIndex bool LevelDBHome string ListenAddress string LogLevel string + MaxCachedKeystones int MaxCachedTxs int MempoolEnabled bool Network string @@ -111,6 +128,7 @@ func NewDefaultConfig() *Config { BlockCacheSize: "1gb", BlockheaderCacheSize: "128mb", LogLevel: logLevel, + MaxCachedKeystones: defaultMaxCachedKeystones, MaxCachedTxs: defaultMaxCachedTxs, MempoolEnabled: false, // XXX default to false until it is fixed PeersWanted: defaultPeersWanted, @@ -145,6 +163,7 @@ type Server struct { chainParams *chaincfg.Params timeSource blockchain.MedianTimeSource checkpoints map[chainhash.Hash]uint64 + hemiGenesis *HashHeight pm *PeerManager blocks *ttl.TTL // outstanding block downloads [hash]when/where @@ -239,16 +258,19 @@ func NewServer(cfg *Config) (*Server, error) { s.wireNet = wire.MainNet s.chainParams = &chaincfg.MainNetParams s.checkpoints = mainnetCheckpoints + s.hemiGenesis = mainnetHemiGenesis case "testnet3": s.wireNet = wire.TestNet3 s.chainParams = &chaincfg.TestNet3Params s.checkpoints = testnet3Checkpoints + s.hemiGenesis = testnet3HemiGenesis case networkLocalnet: s.wireNet = wire.TestNet s.chainParams = &chaincfg.RegressionNetParams s.checkpoints = make(map[chainhash.Hash]uint64) + s.hemiGenesis = localnetHemiGenesis wanted = 1 default: @@ -2428,13 +2450,13 @@ func (s *Server) Run(pctx context.Context) error { log.Infof("Genesis: %v", s.chainParams.GenesisHash) // XXX make debug log.Infof("Starting block headers sync at %v height: %v time %v", bhb, bhb.Height, bhb.Timestamp()) - utxoHH, err := s.UtxoIndexHash(ctx) - if err == nil { - log.Infof("Utxo index %v", utxoHH) - } - txHH, err := s.TxIndexHash(ctx) - if err == nil { - log.Infof("Tx index %v", txHH) + utxoHH, _ := s.UtxoIndexHash(ctx) + log.Infof("Utxo index %v", utxoHH) + txHH, _ := s.TxIndexHash(ctx) + log.Infof("Tx index %v", txHH) + if s.cfg.HemiIndex { + hemiHH, _ := s.KeystoneIndexHash(ctx) + log.Infof("Keystone index %v", hemiHH) } // HTTP server @@ -2635,10 +2657,10 @@ func (s *Server) ExternalHeaderSetup(ctx context.Context, upstreamStateId []byte } gh := genesis.BlockHash() if !bytes.Equal(gb[0].Hash[:], gh[:]) { - return fmt.Errorf("effective genesis block hash mismatch, db has %x but genesis should be %x", gb[0].Hash, gh) + return fmt.Errorf("effective genesis block hash mismatch, db has %v but genesis should be %v", gb[0].Hash, gh) } } - log.Infof("TBC set up in External Header Mode, effectiveGenesis=%x, tip=%x", genesis.BlockHash(), bhb.Hash) + log.Infof("TBC set up in External Header Mode, effectiveGenesis=%v, tip=%v", genesis.BlockHash(), bhb.Hash) return nil } diff --git a/service/tbc/tbcfork_test.go b/service/tbc/tbcfork_test.go index 246370420..f3353c1e6 100644 --- a/service/tbc/tbcfork_test.go +++ b/service/tbc/tbcfork_test.go @@ -21,13 +21,22 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" + btcchaincfg "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" + btctxscript "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" + btcwire "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/decred/dcrd/dcrec/secp256k1/v4" + dcrsecp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/go-test/deep" "github.com/juju/loggo" + "github.com/hemilabs/heminetwork/bitcoin" "github.com/hemilabs/heminetwork/database/tbcd" + "github.com/hemilabs/heminetwork/hemi" + "github.com/hemilabs/heminetwork/hemi/pop" "github.com/hemilabs/heminetwork/service/tbc/peer/rawpeer" ) @@ -35,7 +44,8 @@ type block struct { name string b *btcutil.Block - txs map[tbcd.TxKey]*tbcd.TxValue // Parsed Txs in cache format + txs map[tbcd.TxKey]*tbcd.TxValue // Parsed Txs in cache format + kss map[chainhash.Hash]tbcd.Keystone // Keystones passed in txs } func newBlock(params *chaincfg.Params, name string, b *btcutil.Block) *block { @@ -43,6 +53,7 @@ func newBlock(params *chaincfg.Params, name string, b *btcutil.Block) *block { name: name, b: b, txs: make(map[tbcd.TxKey]*tbcd.TxValue, 10), + kss: make(map[chainhash.Hash]tbcd.Keystone, 10), } err := processTxs(b.Hash(), b.Transactions(), blk.txs) if err != nil { @@ -145,7 +156,6 @@ func newFakeNode(t *testing.T, port string) (*btcNode, error) { } // lookupKey is used by the sign function. -// Must be called locked. func (b *btcNode) lookupKey(a btcutil.Address) (*btcec.PrivateKey, bool, error) { nk, ok := b.keys[a.String()] if !ok { @@ -154,7 +164,17 @@ func (b *btcNode) lookupKey(a btcutil.Address) (*btcec.PrivateKey, bool, error) return nk.key, true, nil } -// newKey creates and inserts a new key into thw lookup table. +func (b *btcNode) findKeyByName(name string) (*btcec.PrivateKey, error) { + for k, v := range b.keys { + b.t.Logf("findKeyByName %v == %v ---- %v", name, v.name, k) + if v.name == name { + return v.key, nil + } + } + return nil, errors.New("not found") +} + +// newKey creates and inserts a new key into the lookup table. // Must be called locked func (b *btcNode) newKey(name string) (*btcec.PrivateKey, *btcec.PublicKey, *btcutil.AddressPubKeyHash, error) { privateKey, err := btcec.NewPrivateKey() @@ -229,7 +249,7 @@ func (b *btcNode) newSignedTxFromTx(name string, inTx *btcutil.Tx, amount btcuti if err != nil { return nil, err } - b.t.Logf("rdeeem pkScript: %x", pkScript) + b.t.Logf("rdeeem pkScript (%v): %x", name, pkScript) b.t.Logf("redeem keys:") b.t.Logf(" private : %x", redeemPrivate.Serialize()) b.t.Logf(" public : %x", redeemPublic.SerializeCompressed()) @@ -270,12 +290,14 @@ func (b *btcNode) newSignedTxFromTx(name string, inTx *btcutil.Tx, amount btcuti change := value - left if change != 0 { payToAddress := as[0] + b.t.Logf("%v", spew.Sdump(as[0])) changeScript, err := txscript.PayToAddrScript(payToAddress) if err != nil { return nil, err } txOutChange := wire.NewTxOut(int64(change), changeScript) redeemTx.AddTxOut(txOutChange) + b.t.Logf("change address %v value %v", payToAddress, change) } break } @@ -490,7 +512,7 @@ func (b *btcNode) dumpChain(parent *chainhash.Hash) error { } } -func newBlockTemplate(params *chaincfg.Params, payToAddress btcutil.Address, nextBlockHeight int32, parent *chainhash.Hash, extraNonce uint64, mempool []*btcutil.Tx) (*btcutil.Block, error) { +func newBlockTemplate(t *testing.T, params *chaincfg.Params, payToAddress btcutil.Address, nextBlockHeight int32, parent *chainhash.Hash, extraNonce uint64, mempool []*btcutil.Tx) (*btcutil.Block, error) { coinbaseScript, err := standardCoinbaseScript(nextBlockHeight, extraNonce) if err != nil { return nil, err @@ -500,7 +522,7 @@ func newBlockTemplate(params *chaincfg.Params, payToAddress btcutil.Address, nex if err != nil { return nil, err } - log.Infof("coinbase tx %v: %v", nextBlockHeight, coinbaseTx.Hash()) + t.Logf("coinbase tx %v: %v", nextBlockHeight, coinbaseTx.Hash()) reqDifficulty := uint32(0x1d00ffff) // XXX @@ -619,6 +641,111 @@ func mkGetScript(scripts map[string][]byte) txscript.ScriptDB { }) } +var popPrivate *btcec.PrivateKey // xxx hardcode an actual miner key + +func executeTX(t *testing.T, dump bool, scriptPubKey []byte, tx *btcutil.Tx) error { + flags := txscript.ScriptBip16 | txscript.ScriptVerifyDERSignatures | + txscript.ScriptStrictMultiSig | txscript.ScriptDiscourageUpgradableNops + vm, err := txscript.NewEngine(scriptPubKey, tx.MsgTx(), 0, flags, nil, nil, -1, nil) + if err != nil { + return err + } + if dump { + t.Logf("=== executing tx %v", tx.Hash()) + } + for i := 0; ; i++ { + d, err := vm.DisasmPC() + if dump { + t.Logf("%v: %v", i, d) + } + done, err := vm.Step() + if err != nil { + return err + } + stack := vm.GetStack() + if dump { + t.Logf("%v: stack %v", i, spew.Sdump(stack)) + } + if done { + break + } + } + err = vm.CheckErrorCondition(true) + if err != nil { + return err + } + + if dump { + t.Logf("=== SUCCESS tx %v", tx.Hash()) + } + return nil +} + +func createPopTx(btcHeight uint64, l2Keystone *hemi.L2Keystone, minerPrivateKeyBytes []byte, recipient *secp256k1.PublicKey, inTx *btcutil.Tx) (*btcutil.Tx, error) { + btx := &btcwire.MsgTx{ + Version: 2, + LockTime: uint32(btcHeight), + } + + popTx := pop.TransactionL2{ + L2Keystone: hemi.L2KeystoneAbbreviate(*l2Keystone), + } + + popTxOpReturn, err := popTx.EncodeToOpReturn() + if err != nil { + return nil, err + } + + privateKey := dcrsecp256k1.PrivKeyFromBytes(minerPrivateKeyBytes) + publicKey := privateKey.PubKey() // just send it back to the miner + var btcAddress *btcutil.AddressPubKey + if recipient == nil { + pubKeyBytes := publicKey.SerializeCompressed() + btcAddress, err = btcutil.NewAddressPubKey(pubKeyBytes, &btcchaincfg.TestNet3Params) + if err != nil { + return nil, err + } + } else { + pubKeyBytes := recipient.SerializeCompressed() + btcAddress, err = btcutil.NewAddressPubKey(pubKeyBytes, &btcchaincfg.TestNet3Params) + if err != nil { + return nil, err + } + } + + payToScript, err := btctxscript.PayToAddrScript(btcAddress.AddressPubKeyHash()) + if err != nil { + return nil, err + } + + if len(payToScript) != 25 { + return nil, fmt.Errorf("incorrect length for pay to public key script (%d != 25)", len(payToScript)) + } + + var ( + outPoint btcwire.OutPoint + changeAmount int64 + PkScript []byte + ) + + idx := uint32(1) + outPoint = *wire.NewOutPoint(inTx.Hash(), idx) // hardcoded index + changeAmount = inTx.MsgTx().TxOut[idx].Value // spend entire tx + PkScript = inTx.MsgTx().TxOut[idx].PkScript // Lift PkScript from utxo we are spending + + btx.TxIn = []*btcwire.TxIn{btcwire.NewTxIn(&outPoint, payToScript, nil)} + btx.TxOut = []*btcwire.TxOut{btcwire.NewTxOut(changeAmount, payToScript)} + btx.TxOut = append(btx.TxOut, btcwire.NewTxOut(0, popTxOpReturn)) + err = bitcoin.SignTx(btx, PkScript, privateKey, publicKey) + if err != nil { + return nil, fmt.Errorf("sign Bitcoin transaction: %w", err) + } + + tx := btcutil.NewTx(btx) + + return tx, nil +} + func (b *btcNode) mine(name string, from *chainhash.Hash, payToAddress btcutil.Address) (*block, error) { parent, ok := b.chain[from.String()] if !ok { @@ -630,6 +757,7 @@ func (b *btcNode) mine(name string, from *chainhash.Hash, payToAddress btcutil.A var mempool []*btcutil.Tx nextBlockHeight := parent.Height() + 1 + kssList := []hemi.L2Keystone{} switch nextBlockHeight { case 2: // spend block 1 coinbase @@ -640,9 +768,41 @@ func (b *btcNode) mine(name string, from *chainhash.Hash, payToAddress btcutil.A b.t.Logf("tx %v: %v spent from %v", nextBlockHeight, tx.Hash(), tx.MsgTx().TxIn[0].PreviousOutPoint) mempool = []*btcutil.Tx{tx} + + // Add keystone + l2Keystone := hemi.L2Keystone{ + Version: 1, + L1BlockNumber: 2, + L2BlockNumber: 44, + ParentEPHash: fillOutBytes("parentephash", 32), + PrevKeystoneEPHash: fillOutBytes("prevkeystoneephash", 32), + StateRoot: fillOutBytes("stateroot", 32), + EPHash: fillOutBytes("ephash", 32), + } + kssList = append(kssList, l2Keystone) + + signer, err := b.findKeyByName("miner") + if err != nil { + return nil, err + } + recipient, err := b.findKeyByName("pop") + if err != nil { + return nil, err + } + popTx, err := createPopTx(uint64(nextBlockHeight), &l2Keystone, signer.Serialize(), recipient.PubKey(), tx) + if err != nil { + return nil, err + } + + err = executeTX(b.t, true, tx.MsgTx().TxOut[1].PkScript, popTx) + if err != nil { + return nil, err + } + mempool = append(mempool, popTx) + // b.t.Logf("added popTx %v", popTx) case 3: // spend block 2 transaction 1 - tx, err := b.newSignedTxFromTx(name+":0", parent.TxByIndex(1), 1100000000) + tx, err := b.newSignedTxFromTx(name+":0", parent.TxByIndex(0), 1100000000) if err != nil { return nil, fmt.Errorf("new tx from tx: %w", err) } @@ -650,17 +810,41 @@ func (b *btcNode) mine(name string, from *chainhash.Hash, payToAddress btcutil.A tx.MsgTx().TxIn[0].PreviousOutPoint) mempool = []*btcutil.Tx{tx} - // spend above tx in same block - tx2, err := b.newSignedTxFromTx(name+":1", tx, 3000000000) + // Add keystone + l2Keystone := hemi.L2Keystone{ + Version: 1, + L1BlockNumber: 3, + L2BlockNumber: 55, + ParentEPHash: fillOutBytes("PARENTEPHASH", 32), + PrevKeystoneEPHash: fillOutBytes("PREVKEYSTONEEPHASH", 32), + StateRoot: fillOutBytes("STATEROOT", 32), + EPHash: fillOutBytes("EPHASH", 32), + } + kssList = append(kssList, l2Keystone) + + signer, err := b.findKeyByName("miner") if err != nil { - return nil, fmt.Errorf("new tx from tx: %w", err) + return nil, err + } + recipient, err := b.findKeyByName("pop") + if err != nil { + return nil, err + } + popTx, err := createPopTx(uint64(nextBlockHeight), &l2Keystone, + signer.Serialize(), recipient.PubKey(), tx) + if err != nil { + return nil, err } - b.t.Logf("tx %v: %v spent from %v", nextBlockHeight, tx2.Hash(), - tx2.MsgTx().TxIn[0].PreviousOutPoint) - mempool = []*btcutil.Tx{tx, tx2} + + err = executeTX(b.t, true, tx.MsgTx().TxOut[1].PkScript, popTx) + if err != nil { + return nil, err + } + mempool = append(mempool, popTx) + // b.t.Logf("added popTx %v", popTx) } - bt, err := newBlockTemplate(b.params, payToAddress, nextBlockHeight, + bt, err := newBlockTemplate(b.t, b.params, payToAddress, nextBlockHeight, parent.Hash(), extraNonce, mempool) if err != nil { return nil, fmt.Errorf("height %v: %w", nextBlockHeight, err) @@ -676,6 +860,15 @@ func (b *btcNode) mine(name string, from *chainhash.Hash, payToAddress btcutil.A b.height = blk.Height() } + // store values manually to check if keystone processing is correct + for _, l2Keystone := range kssList { + abrvKs := hemi.L2KeystoneAbbreviate(l2Keystone).Serialize() + blk.kss[*hemi.L2KeystoneAbbreviate(l2Keystone).Hash()] = tbcd.Keystone{ + BlockHash: *blk.Hash(), + AbbreviatedKeystone: abrvKs, + } + } + return blk, nil } @@ -716,7 +909,7 @@ func (b *btcNode) MineAndSend(ctx context.Context, name string, parent *chainhas if err != nil { return nil, err } - + b.t.Logf("mined %v: %v", blk.name, blk.MsgBlock().Header.BlockHash()) err = b.SendBlockheader(ctx, blk.MsgBlock().Header) if err != nil { return nil, err @@ -776,7 +969,55 @@ func newPKAddress(params *chaincfg.Params) (*btcec.PrivateKey, *btcec.PublicKey, return key, key.PubKey(), address, nil } -func mustHave(ctx context.Context, s *Server, blocks ...*block) error { +func checkKeystoneDB(ctx context.Context, t *testing.T, s *Server, kssAbrev ...chainhash.Hash) error { + for _, k := range kssAbrev { + _, err := s.db.BlockKeystoneByL2KeystoneAbrevHash(ctx, k) + if err != nil { + return err + } + t.Logf("%v: keystone in db", k) + } + return nil +} + +func mustHaveKss(ctx context.Context, t *testing.T, s *Server, blocks ...*block) ([]chainhash.Hash, error) { + processedKss := make([]chainhash.Hash, 0) + for _, b := range blocks { + txsInBlock := 0 + _, height, err := s.BlockHeaderByHash(ctx, b.Hash()) + if err != nil { + return nil, err + } + if height != uint64(b.Height()) { + return nil, fmt.Errorf("%v != %v", height, uint64(b.Height())) + } + + kssCache := make(map[chainhash.Hash]tbcd.Keystone, 10) + + err = processKeystones(b.b.Hash(), b.b.Transactions(), kssCache) + if err != nil { + panic(fmt.Errorf("processKeystones: %w", err)) + } + + for kkss, vkss := range kssCache { + if !bytes.Equal(b.Hash()[:], vkss.BlockHash[:]) { + return nil, fmt.Errorf("block mismatch %v", + vkss.BlockHash[:]) + } + if diff := deep.Equal(b.kss[kkss], vkss); len(diff) > 0 { + t.Fatalf("processKeystones: returned %v, expected %v", + spew.Sdump(b.kss[kkss]), spew.Sdump(vkss)) + } + txsInBlock++ + processedKss = append(processedKss, kkss) + } + t.Logf("got %v keystone txs in block %s", txsInBlock, b.name) + } + + return processedKss, nil +} + +func mustHave(ctx context.Context, t *testing.T, s *Server, blocks ...*block) error { for _, b := range blocks { _, height, err := s.BlockHeaderByHash(ctx, b.Hash()) if err != nil { @@ -786,7 +1027,7 @@ func mustHave(ctx context.Context, s *Server, blocks ...*block) error { return fmt.Errorf("%v != %v", height, uint64(b.Height())) } - log.Infof("mustHave: %v", b.Hash()) + t.Logf("mustHave: %v", b.Hash()) // Verify Txs cache for ktx, vtx := range b.txs { switch ktx[0] { @@ -809,10 +1050,10 @@ func mustHave(ctx context.Context, s *Server, blocks ...*block) error { break } if !found { - log.Infof("tx hash: %v", tx) - log.Infof("ktx: %v", spew.Sdump(ktx)) - log.Infof("vtx: %v", spew.Sdump(vtx)) - log.Infof(spew.Sdump(sis)) + t.Logf("tx hash: %v", tx) + t.Logf("ktx: %v", spew.Sdump(ktx)) + t.Logf("vtx: %v", spew.Sdump(vtx)) + t.Logf(spew.Sdump(sis)) return errors.New("block mismatch") } @@ -876,6 +1117,16 @@ func TestFork(t *testing.T) { } }() + popPriv, popPublic, popAddress, err := n.newKey("pop") + if err != nil { + t.Fatal(err) + } + popPrivate = popPriv + t.Logf("pop keys:") + t.Logf(" private : %x", popPrivate.Serialize()) + t.Logf(" public : %x", popPublic.SerializeCompressed()) + t.Logf(" address : %v", popAddress) + startHash := n.Best() count := 9 expectedHeight := uint64(count) @@ -912,8 +1163,6 @@ func TestFork(t *testing.T) { t.Fatal(err) } go func() { - log.Infof("s run") - defer log.Infof("s run done") err := s.Run(ctx) if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, rawpeer.ErrNoConn) { panic(err) @@ -930,7 +1179,6 @@ func TestFork(t *testing.T) { // See if we are at the right height si := s.Synced(ctx) if si.BlockHeader.Height != expectedHeight { - log.Infof("not synced") continue } @@ -1121,6 +1369,16 @@ func TestIndexNoFork(t *testing.T) { } }() + popPriv, popPublic, popAddress, err := n.newKey("pop") + if err != nil { + t.Fatal(err) + } + popPrivate = popPriv + t.Logf("pop keys:") + t.Logf(" private : %x", popPrivate.Serialize()) + t.Logf(" public : %x", popPublic.SerializeCompressed()) + t.Logf(" address : %v", popAddress) + go func() { if err := n.Run(ctx); !errorIsOneOf(err, []error{net.ErrClosed, context.Canceled, rawpeer.ErrNoConn}) { panic(err) @@ -1134,13 +1392,16 @@ func TestIndexNoFork(t *testing.T) { BlockCacheSize: "10mb", BlockheaderCacheSize: "1mb", BlockSanity: false, + HemiIndex: true, // Test keystone index LevelDBHome: t.TempDir(), ListenAddress: "localhost:8882", // LogLevel: "tbcd=TRACE:tbc=TRACE:level=DEBUG", MaxCachedTxs: 1000, // XXX + MaxCachedKeystones: 1000, // XXX Network: networkLocalnet, PeersWanted: 1, PrometheusListenAddress: "", + MempoolEnabled: false, Seeds: []string{"127.0.0.1:18444"}, } _ = loggo.ConfigureLoggers(cfg.LogLevel) @@ -1185,13 +1446,12 @@ func TestIndexNoFork(t *testing.T) { if direction <= 0 { t.Fatalf("expected 1 going from genesis to b3, got %v", direction) } - // Index to b3 err = s.SyncIndexersToHash(ctx, b3.Hash()) if err != nil { t.Fatal(err) } - err = mustHave(ctx, s, n.genesis, b1, b2, b3) + err = mustHave(ctx, t, s, n.genesis, b1, b2, b3) if err != nil { t.Fatal(err) } @@ -1210,11 +1470,27 @@ func TestIndexNoFork(t *testing.T) { t.Logf("%v: %v", address, utxos) } + // check if keystones exist in txs + foundKss, err := mustHaveKss(ctx, t, s, n.genesis, b1, b2, b3) + if err != nil { + t.Fatal(err) + } + if len(foundKss) < 1 { + t.Fatal("no keystone txs found") + } + + // check if keystones exist in DB + err = checkKeystoneDB(ctx, t, s, foundKss...) + if err != nil { + t.Fatal(err) + } + // make sure genesis tx is in db _, err = s.TxById(ctx, n.gtx.Hash()) if err != nil { t.Fatalf("genesis not found: %v", err) } + // make sure gensis was not spent _, err = s.SpentOutputsByTxId(ctx, n.gtx.Hash()) if err == nil { @@ -1248,7 +1524,8 @@ func TestIndexNoFork(t *testing.T) { if err != nil { t.Fatalf("unwinding to genesis should have returned nil, got %v", err) } - err = mustHave(ctx, s, n.genesis, b1) + + err = mustHave(ctx, t, s, n.genesis, b1) if err != nil { t.Fatalf("expected an error from mustHave: %v", err) } @@ -1268,12 +1545,36 @@ func TestIndexNoFork(t *testing.T) { if err != nil { t.Fatal(err) } - log.Infof("balance address %v %v", address, btcutil.Amount(balance)) + t.Logf("balance address %v %v", address, btcutil.Amount(balance)) if balance != 0 { t.Fatalf("%v (%v) invalid balance expected 0, got %v", key.name, address, btcutil.Amount(balance)) } } + + lastKssHeight, err := s.KeystoneIndexHash(ctx) + if err != nil { + t.Fatal(err) + } + // check if keystones unwound to genesis + if lastKssHeight.Height != 0 { + t.Fatalf("expected keystone index hash 0, got %v", lastKssHeight.Height) + } + finalKss, err := mustHaveKss(ctx, t, s, n.blocksAtHeight[0]...) + if err != nil { + t.Fatal(err) + } + if len(finalKss) > 0 { + t.Fatalf("expected no keystone txs, got %v", len(finalKss)) + } + + // check if keystones removed from DB + for _, k := range foundKss { + err = checkKeystoneDB(ctx, t, s, k) + if err == nil { + t.Fatal("expected fail in keystone db query") + } + } } func TestIndexFork(t *testing.T) { @@ -1292,6 +1593,17 @@ func TestIndexFork(t *testing.T) { t.Logf("node stop: %v", err) } }() + + popPriv, popPublic, popAddress, err := n.newKey("pop") + if err != nil { + t.Fatal(err) + } + popPrivate = popPriv + t.Logf("pop keys:") + t.Logf(" private : %x", popPrivate.Serialize()) + t.Logf(" public : %x", popPublic.SerializeCompressed()) + t.Logf(" address : %v", popAddress) + go func() { if err := n.Run(ctx); !errorIsOneOf(err, []error{net.ErrClosed, context.Canceled, rawpeer.ErrNoConn}) { panic(err) @@ -1305,10 +1617,12 @@ func TestIndexFork(t *testing.T) { BlockCacheSize: "10mb", BlockheaderCacheSize: "1mb", BlockSanity: false, + HemiIndex: true, // Test keystone index LevelDBHome: t.TempDir(), ListenAddress: "localhost:8883", // LogLevel: "tbcd=TRACE:tbc=TRACE:level=DEBUG", MaxCachedTxs: 1000, // XXX + MaxCachedKeystones: 1000, // XXX Network: networkLocalnet, PeersWanted: 1, PrometheusListenAddress: "", @@ -1372,6 +1686,8 @@ func TestIndexFork(t *testing.T) { t.Fatal(err) } + time.Sleep(2 * time.Second) + // Verify linear indexing. Current TxIndex is sitting at genesis // genesis -> b3 should work with negative direction (cdiff is less than target) @@ -1389,7 +1705,7 @@ func TestIndexFork(t *testing.T) { t.Fatal(err) } // XXX verify indexes - err = mustHave(ctx, s, n.genesis, b1, b2, b3) + err = mustHave(ctx, t, s, n.genesis, b1, b2, b3) if err != nil { t.Fatal(err) } @@ -1408,6 +1724,21 @@ func TestIndexFork(t *testing.T) { t.Logf("%v: %v", address, utxos) } + // check if keystones exist in txs + foundKss, err := mustHaveKss(ctx, t, s, n.genesis, b1, b2, b3) + if err != nil { + t.Fatal(err) + } + if len(foundKss) < 1 { + t.Fatal("no keystone txs found") + } + + // check if keystones exist in DB + err = checkKeystoneDB(ctx, t, s, foundKss...) + if err != nil { + t.Fatal(err) + } + // Verify linear indexing. Current TxIndex is sitting at b3 t.Logf("b3: %v", b3) @@ -1452,7 +1783,7 @@ func TestIndexFork(t *testing.T) { if err != nil { t.Fatalf("unwinding to genesis should have returned nil, got %v", err) } - err = mustHave(ctx, s, n.genesis, b1, b2, b3) + err = mustHave(ctx, t, s, n.genesis, b1, b2, b3) if err == nil { t.Fatalf("expected an error from mustHave") } @@ -1514,7 +1845,7 @@ func TestIndexFork(t *testing.T) { if err != nil { t.Fatalf("unwinding to genesis should have returned nil, got %v", err) } - err = mustHave(ctx, s, n.genesis, b1, b2, b3) + err = mustHave(ctx, t, s, n.genesis, b1, b2, b3) if err == nil { t.Fatalf("expected an error from mustHave") } @@ -1578,6 +1909,30 @@ func TestIndexFork(t *testing.T) { } t.Logf("%v: %v", address, utxos) } + + lastKssHeight, err := s.KeystoneIndexHash(ctx) + if err != nil { + t.Fatal(err) + } + // check if keystones unwound to genesis + if lastKssHeight.Height != 0 { + t.Fatalf("expected keystone index hash 0, got %v", lastKssHeight.Height) + } + finalKss, err := mustHaveKss(ctx, t, s, n.blocksAtHeight[0]...) + if err != nil { + t.Fatal(err) + } + if len(finalKss) > 0 { + t.Fatalf("expected no keystone txs, got %v", len(finalKss)) + } + + // check if keystones removed from DB + for _, k := range foundKss { + err = checkKeystoneDB(ctx, t, s, k) + if err == nil { + t.Fatal("expected fail in keystone db query") + } + } // err = mustHave(ctx, s, n.genesis, b1, b2, b3) // if err == nil { // t.Fatalf("expected an error from mustHave") @@ -1697,7 +2052,7 @@ func TestTransactions(t *testing.T) { t.Fatal(err) } redeemTx.TxIn[0].SignatureScript = sigScript - log.Infof("redeem tx: %v", spew.Sdump(redeemTx)) + t.Logf("redeem tx: %v", spew.Sdump(redeemTx)) flags := txscript.ScriptBip16 | txscript.ScriptVerifyDERSignatures | txscript.ScriptStrictMultiSig |