From 62129cda785d6481a9ccd0c97958723beea3de68 Mon Sep 17 00:00:00 2001 From: ucwong Date: Mon, 21 Oct 2024 03:52:51 +0800 Subject: [PATCH 1/2] prefetcher heap escape --- core/state/state_object.go | 24 +++--- core/state/statedb.go | 16 ++-- core/state/trie_prefetcher.go | 150 ++++++++++++++++++++++------------ 3 files changed, 115 insertions(+), 75 deletions(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index 6ba6c6dda..b32fd08e4 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -245,7 +245,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // Whilst this would be a bit weird if snapshots are disabled, but we still // want the trie nodes to end up in the prefetcher too, so just push through. if s.db.prefetcher != nil && s.data.Root != types.EmptyRootHash { - if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, [][]byte{key[:]}, true); err != nil { + if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, nil, []common.Hash{key}, true); err != nil { log.Error("Failed to prefetch storage slot", "addr", s.address, "key", key, "err", err) } } @@ -281,14 +281,14 @@ func (s *stateObject) setState(key common.Hash, value common.Hash, origin common // finalise moves all dirty storage slots into the pending area to be hashed or // committed later. It is invoked at the end of every transaction. func (s *stateObject) finalise() { - slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) + slotsToPrefetch := make([]common.Hash, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { // If the slot is different from its original value, move it into the // pending area to be committed at the end of the block (and prefetch // the pathways). if value != s.originStorage[key] { s.pendingStorage[key] = value - slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + slotsToPrefetch = append(slotsToPrefetch, key) // Copy needed for closure } else { // Otherwise, the slot was reverted to its original value, remove it // from the pending area to avoid thrashing the data structure. @@ -296,7 +296,7 @@ func (s *stateObject) finalise() { } } if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { - if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch, false); err != nil { + if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, nil, slotsToPrefetch, false); err != nil { log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err) } } @@ -343,7 +343,6 @@ func (s *stateObject) updateTrie() (Trie, error) { origin map[common.Hash][]byte ) // Insert all the pending storage updates into the trie - usedStorage := make([][]byte, 0, len(s.pendingStorage)) hasher := hasherPool.Get().(crypto.KeccakState) defer hasherPool.Put(hasher) @@ -358,7 +357,10 @@ func (s *stateObject) updateTrie() (Trie, error) { // If the deletion is handled first, then `P` would be left with only one child, thus collapsed // into a shortnode. This requires `B` to be resolved from disk. // Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved. - var deletions []common.Hash + var ( + deletions []common.Hash + used = make([]common.Hash, 0, len(s.pendingStorage)) + ) for key, value := range s.pendingStorage { // Skip noop changes, persist actual changes if value == s.originStorage[key] { @@ -411,7 +413,7 @@ func (s *stateObject) updateTrie() (Trie, error) { } } // Cache the items for preloading - usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure + used = append(used, key) // Copy needed for closure } for _, key := range deletions { if err := tr.TryDelete(key[:]); err != nil { @@ -420,13 +422,9 @@ func (s *stateObject) updateTrie() (Trie, error) { } s.db.StorageDeleted.Add(1) } - // If no slots were touched, issue a warning as we shouldn't have done all - // the above work in the first place - if len(usedStorage) == 0 { - log.Error("State object update was noop", "addr", s.address, "slots", len(s.pendingStorage)) - } + if s.db.prefetcher != nil { - s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage) + s.db.prefetcher.used(s.addrHash, s.data.Root, nil, used) } s.pendingStorage = make(Storage) // reset pending map diff --git a/core/state/statedb.go b/core/state/statedb.go index dd5cb6be8..267c58ece 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -215,7 +215,7 @@ func (s *StateDB) StartPrefetcher(namespace string, noreads bool) { // To prevent this, the account trie is always scheduled for prefetching once // the prefetcher is constructed. For more details, see: // https://github.com/ethereum/go-ethereum/issues/29880 - if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, nil, false); err != nil { + if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, nil, nil, false); err != nil { log.Error("Failed to prefetch account trie", "root", s.originalRoot, "err", err) } } @@ -690,7 +690,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject { // Whilst this would be a bit weird if snapshots are disabled, but we still // want the trie nodes to end up in the prefetcher too, so just push through. if s.prefetcher != nil { - if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, [][]byte{addr[:]}, true); err != nil { + if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, []common.Address{addr}, nil, true); err != nil { log.Error("Failed to prefetch account", "addr", addr, "err", err) } } @@ -859,7 +859,7 @@ func (s *StateDB) GetRefund() uint64 { // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { - addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) + addressesToPrefetch := make([]common.Address, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] if !exist { @@ -896,10 +896,10 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // At this point, also ship the address off to the precacher. The precacher // will start loading tries, and when the change is eventually committed, // the commit-phase will be a lot faster - addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + addressesToPrefetch = append(addressesToPrefetch, addr) // Copy needed for closure } if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch, false); err != nil { + if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch, nil, false); err != nil { log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err) } } @@ -967,7 +967,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // into a shortnode. This requires `B` to be resolved from disk. // Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved. var ( - usedAddrs [][]byte + usedAddrs []common.Address deletedAddrs []common.Address ) for addr, op := range s.mutations { @@ -982,7 +982,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { s.updateStateObject(s.stateObjects[addr]) s.AccountUpdated += 1 } - usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure + usedAddrs = append(usedAddrs, addr) // Copy needed for closure } for _, deletedAddr := range deletedAddrs { s.deleteStateObject(deletedAddr) @@ -991,7 +991,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { s.AccountUpdates += time.Since(start) if s.prefetcher != nil { - s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) + s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs, nil) } // Track the amount of time wasted on hashing the account trie defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 9140e4a0e..a2284fabb 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -116,31 +116,31 @@ func (p *triePrefetcher) report() { fetcher.wait() // ensure the fetcher's idle before poking in its internals if fetcher.root == p.root { - p.accountLoadReadMeter.Mark(int64(len(fetcher.seenRead))) - p.accountLoadWriteMeter.Mark(int64(len(fetcher.seenWrite))) + p.accountLoadReadMeter.Mark(int64(len(fetcher.seenReadAddr))) + p.accountLoadWriteMeter.Mark(int64(len(fetcher.seenWriteAddr))) p.accountDupReadMeter.Mark(int64(fetcher.dupsRead)) p.accountDupWriteMeter.Mark(int64(fetcher.dupsWrite)) p.accountDupCrossMeter.Mark(int64(fetcher.dupsCross)) - for _, key := range fetcher.used { - delete(fetcher.seenRead, string(key)) - delete(fetcher.seenWrite, string(key)) + for _, key := range fetcher.usedAddr { + delete(fetcher.seenReadAddr, key) + delete(fetcher.seenWriteAddr, key) } - p.accountWasteMeter.Mark(int64(len(fetcher.seenRead) + len(fetcher.seenWrite))) + p.accountWasteMeter.Mark(int64(len(fetcher.seenReadAddr) + len(fetcher.seenWriteAddr))) } else { - p.storageLoadReadMeter.Mark(int64(len(fetcher.seenRead))) - p.storageLoadWriteMeter.Mark(int64(len(fetcher.seenWrite))) + p.storageLoadReadMeter.Mark(int64(len(fetcher.seenReadSlot))) + p.storageLoadWriteMeter.Mark(int64(len(fetcher.seenWriteSlot))) p.storageDupReadMeter.Mark(int64(fetcher.dupsRead)) p.storageDupWriteMeter.Mark(int64(fetcher.dupsWrite)) p.storageDupCrossMeter.Mark(int64(fetcher.dupsCross)) - for _, key := range fetcher.used { - delete(fetcher.seenRead, string(key)) - delete(fetcher.seenWrite, string(key)) + for _, key := range fetcher.usedSlot { + delete(fetcher.seenReadSlot, key) + delete(fetcher.seenWriteSlot, key) } - p.storageWasteMeter.Mark(int64(len(fetcher.seenRead) + len(fetcher.seenWrite))) + p.storageWasteMeter.Mark(int64(len(fetcher.seenReadSlot) + len(fetcher.seenWriteSlot))) } } } @@ -156,7 +156,7 @@ func (p *triePrefetcher) report() { // upon the same contract, the parameters invoking this method may be // repeated. // 2. Finalize of the main account trie. This happens only once per block. -func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte, read bool) error { +func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, addrs []common.Address, slots []common.Hash, read bool) error { // If the state item is only being read, but reads are disabled, return if read && p.noreads { return nil @@ -173,7 +173,7 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr comm fetcher = newSubfetcher(p.db, p.root, owner, root, addr) p.fetchers[id] = fetcher } - return fetcher.schedule(keys, read) + return fetcher.schedule(addrs, slots, read) } // trie returns the trie matching the root hash, blocking until the fetcher of @@ -193,10 +193,12 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { // used marks a batch of state items used to allow creating statistics as to // how useful or wasteful the fetcher is. -func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) { +func (p *triePrefetcher) used(owner common.Hash, root common.Hash, usedAddr []common.Address, usedSlot []common.Hash) { if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil { fetcher.wait() // ensure the fetcher's idle before poking in its internals - fetcher.used = used + + fetcher.usedAddr = append(fetcher.usedAddr, usedAddr...) + fetcher.usedSlot = append(fetcher.usedSlot, usedSlot...) } } @@ -227,44 +229,50 @@ type subfetcher struct { stop chan struct{} // Channel to interrupt processing term chan struct{} // Channel to signal interruption - seenRead map[string]struct{} // Tracks the entries already loaded via read operations - seenWrite map[string]struct{} // Tracks the entries already loaded via write operations + seenReadAddr map[common.Address]struct{} // Tracks the accounts already loaded via read operations + seenWriteAddr map[common.Address]struct{} // Tracks the accounts already loaded via write operations + seenReadSlot map[common.Hash]struct{} // Tracks the storage already loaded via read operations + seenWriteSlot map[common.Hash]struct{} // Tracks the storage already loaded via write operations dupsRead int // Number of duplicate preload tasks via reads only dupsWrite int // Number of duplicate preload tasks via writes only dupsCross int // Number of duplicate preload tasks via read-write-crosses - used [][]byte // Tracks the entries used in the end + usedAddr []common.Address // Tracks the accounts used in the end + usedSlot []common.Hash // Tracks the storage used in the end } // subfetcherTask is a trie path to prefetch, tagged with whether it originates // from a read or a write request. type subfetcherTask struct { read bool - key []byte + addr *common.Address + slot *common.Hash } // newSubfetcher creates a goroutine to prefetch state items belonging to a // particular root hash. func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash, addr common.Address) *subfetcher { sf := &subfetcher{ - db: db, - state: state, - owner: owner, - root: root, - addr: addr, - wake: make(chan struct{}, 1), - stop: make(chan struct{}), - term: make(chan struct{}), - seenRead: make(map[string]struct{}), - seenWrite: make(map[string]struct{}), + db: db, + state: state, + owner: owner, + root: root, + addr: addr, + wake: make(chan struct{}, 1), + stop: make(chan struct{}), + term: make(chan struct{}), + seenReadAddr: make(map[common.Address]struct{}), + seenWriteAddr: make(map[common.Address]struct{}), + seenReadSlot: make(map[common.Hash]struct{}), + seenWriteSlot: make(map[common.Hash]struct{}), } go sf.loop() return sf } // schedule adds a batch of trie keys to the queue to prefetch. -func (sf *subfetcher) schedule(keys [][]byte, read bool) error { +func (sf *subfetcher) schedule(addrs []common.Address, slots []common.Hash, read bool) error { // Ensure the subfetcher is still alive select { case <-sf.term: @@ -273,8 +281,11 @@ func (sf *subfetcher) schedule(keys [][]byte, read bool) error { } // Append the tasks to the current queue sf.lock.Lock() - for _, key := range keys { - sf.tasks = append(sf.tasks, &subfetcherTask{read: read, key: key}) + for _, addr := range addrs { + sf.tasks = append(sf.tasks, &subfetcherTask{read: read, addr: &addr}) + } + for _, slot := range slots { + sf.tasks = append(sf.tasks, &subfetcherTask{read: read, slot: &slot}) } sf.lock.Unlock() @@ -352,35 +363,66 @@ func (sf *subfetcher) loop() { sf.lock.Unlock() for _, task := range tasks { - key := string(task.key) - if task.read { - if _, ok := sf.seenRead[key]; ok { - sf.dupsRead++ - continue - } - if _, ok := sf.seenWrite[key]; ok { - sf.dupsCross++ - continue + if task.addr != nil { + key := *task.addr + if task.read { + if _, ok := sf.seenReadAddr[key]; ok { + sf.dupsRead++ + continue + } + if _, ok := sf.seenWriteAddr[key]; ok { + sf.dupsCross++ + continue + } + } else { + if _, ok := sf.seenReadAddr[key]; ok { + sf.dupsCross++ + continue + } + if _, ok := sf.seenWriteAddr[key]; ok { + sf.dupsWrite++ + continue + } } } else { - if _, ok := sf.seenRead[key]; ok { - sf.dupsCross++ - continue - } - if _, ok := sf.seenWrite[key]; ok { - sf.dupsWrite++ - continue + key := *task.slot + if task.read { + if _, ok := sf.seenReadSlot[key]; ok { + sf.dupsRead++ + continue + } + if _, ok := sf.seenWriteSlot[key]; ok { + sf.dupsCross++ + continue + } + } else { + if _, ok := sf.seenReadSlot[key]; ok { + sf.dupsCross++ + continue + } + if _, ok := sf.seenWriteSlot[key]; ok { + sf.dupsWrite++ + continue + } } } - if len(task.key) == common.AddressLength { - sf.trie.GetAccount(common.BytesToAddress(task.key)) + if task.addr != nil { + sf.trie.GetAccount(*task.addr) } else { - sf.trie.GetStorage(sf.addr, task.key) + sf.trie.GetStorage(sf.addr, (*task.slot)[:]) } if task.read { - sf.seenRead[key] = struct{}{} + if task.addr != nil { + sf.seenReadAddr[*task.addr] = struct{}{} + } else { + sf.seenReadSlot[*task.slot] = struct{}{} + } } else { - sf.seenWrite[key] = struct{}{} + if task.addr != nil { + sf.seenWriteAddr[*task.addr] = struct{}{} + } else { + sf.seenWriteSlot[*task.slot] = struct{}{} + } } } From dc3a41c09736d05ffdc162641160d2294e201d20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Sun, 20 Oct 2024 14:41:51 +0300 Subject: [PATCH 2/2] get rid of custom MaxUint64 and MaxUint64 --- common/math/big_test.go | 45 --------------------------------- common/math/integer.go | 16 ------------ common/math/integer_test.go | 9 ++++--- core/blockchain_test.go | 3 +++ core/rawdb/freezer_batch.go | 2 +- core/rawdb/freezer_memory.go | 2 +- core/state/snapshot/generate.go | 2 +- core/types/block_test.go | 1 + core/vm/common.go | 3 ++- core/vm/contracts.go | 5 ++-- internal/ctxcapi/api.go | 2 +- params/config_test.go | 4 +++ rlp/decode_test.go | 3 ++- rpc/types_test.go | 2 +- 14 files changed, 25 insertions(+), 74 deletions(-) diff --git a/common/math/big_test.go b/common/math/big_test.go index 7cfbb6c08..5ab361816 100644 --- a/common/math/big_test.go +++ b/common/math/big_test.go @@ -270,48 +270,3 @@ func TestLittleEndianByteAt(t *testing.T) { } } - -func TestS256(t *testing.T) { - tests := []struct{ x, y *big.Int }{ - {x: big.NewInt(0), y: big.NewInt(0)}, - {x: big.NewInt(1), y: big.NewInt(1)}, - {x: big.NewInt(2), y: big.NewInt(2)}, - { - x: new(big.Int).Sub(BigPow(2, 255), big.NewInt(1)), - y: new(big.Int).Sub(BigPow(2, 255), big.NewInt(1)), - }, - { - x: BigPow(2, 255), - y: new(big.Int).Neg(BigPow(2, 255)), - }, - { - x: new(big.Int).Sub(BigPow(2, 256), big.NewInt(1)), - y: big.NewInt(-1), - }, - { - x: new(big.Int).Sub(BigPow(2, 256), big.NewInt(2)), - y: big.NewInt(-2), - }, - } - for _, test := range tests { - if y := S256(test.x); y.Cmp(test.y) != 0 { - t.Errorf("S256(%x) = %x, want %x", test.x, y, test.y) - } - } -} - -func TestExp(t *testing.T) { - tests := []struct{ base, exponent, result *big.Int }{ - {base: big.NewInt(0), exponent: big.NewInt(0), result: big.NewInt(1)}, - {base: big.NewInt(1), exponent: big.NewInt(0), result: big.NewInt(1)}, - {base: big.NewInt(1), exponent: big.NewInt(1), result: big.NewInt(1)}, - {base: big.NewInt(1), exponent: big.NewInt(2), result: big.NewInt(1)}, - {base: big.NewInt(3), exponent: big.NewInt(144), result: MustParseBig256("507528786056415600719754159741696356908742250191663887263627442114881")}, - {base: big.NewInt(2), exponent: big.NewInt(255), result: MustParseBig256("57896044618658097711785492504343953926634992332820282019728792003956564819968")}, - } - for _, test := range tests { - if result := Exp(test.base, test.exponent); result.Cmp(test.result) != 0 { - t.Errorf("Exp(%d, %d) = %d, want %d", test.base, test.exponent, result, test.result) - } - } -} diff --git a/common/math/integer.go b/common/math/integer.go index 5d9237f5b..c1c889033 100644 --- a/common/math/integer.go +++ b/common/math/integer.go @@ -22,22 +22,6 @@ import ( "strconv" ) -// Integer limit values. -const ( - MaxInt8 = 1<<7 - 1 - MinInt8 = -1 << 7 - MaxInt16 = 1<<15 - 1 - MinInt16 = -1 << 15 - MaxInt32 = 1<<31 - 1 - MinInt32 = -1 << 31 - MaxInt64 = 1<<63 - 1 - MinInt64 = -1 << 63 - MaxUint8 = 1<<8 - 1 - MaxUint16 = 1<<16 - 1 - MaxUint32 = 1<<32 - 1 - MaxUint64 = 1<<64 - 1 -) - // HexOrDecimal64 marshals uint64 as hex or decimal. type HexOrDecimal64 uint64 diff --git a/common/math/integer_test.go b/common/math/integer_test.go index 96f0e85a9..7bd5b8ee1 100644 --- a/common/math/integer_test.go +++ b/common/math/integer_test.go @@ -17,6 +17,7 @@ package math import ( + "math" "testing" ) @@ -36,8 +37,8 @@ func TestOverflow(t *testing.T) { op operation }{ // add operations - {MaxUint64, 1, true, add}, - {MaxUint64 - 1, 1, false, add}, + {math.MaxUint64, 1, true, add}, + {math.MaxUint64 - 1, 1, false, add}, // sub operations {0, 1, true, sub}, @@ -46,8 +47,8 @@ func TestOverflow(t *testing.T) { // mul operations {0, 0, false, mul}, {10, 10, false, mul}, - {MaxUint64, 2, true, mul}, - {MaxUint64, 1, false, mul}, + {math.MaxUint64, 2, true, mul}, + {math.MaxUint64, 1, false, mul}, } { var overflows bool switch test.op { diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 45e782e69..1efc5c042 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -17,6 +17,9 @@ package core import ( + "errors" + "fmt" + gomath "math" "math/big" "testing" diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go index b72decea7..a36a8c252 100644 --- a/core/rawdb/freezer_batch.go +++ b/core/rawdb/freezer_batch.go @@ -18,10 +18,10 @@ package rawdb import ( "fmt" + "math" "github.com/golang/snappy" - "github.com/CortexFoundation/CortexTheseus/common/math" "github.com/CortexFoundation/CortexTheseus/rlp" ) diff --git a/core/rawdb/freezer_memory.go b/core/rawdb/freezer_memory.go index 28b850b5e..8ab7fe1cf 100644 --- a/core/rawdb/freezer_memory.go +++ b/core/rawdb/freezer_memory.go @@ -19,10 +19,10 @@ package rawdb import ( "errors" "fmt" + "math" "sync" "github.com/CortexFoundation/CortexTheseus/common" - "github.com/CortexFoundation/CortexTheseus/common/math" "github.com/CortexFoundation/CortexTheseus/ctxcdb" "github.com/CortexFoundation/CortexTheseus/log" "github.com/CortexFoundation/CortexTheseus/rlp" diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index 82be1ae2d..53b434693 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -20,13 +20,13 @@ import ( "bytes" "encoding/binary" "fmt" + "math" "math/big" "time" "github.com/VictoriaMetrics/fastcache" "github.com/CortexFoundation/CortexTheseus/common" - "github.com/CortexFoundation/CortexTheseus/common/math" "github.com/CortexFoundation/CortexTheseus/core/rawdb" "github.com/CortexFoundation/CortexTheseus/core/types" "github.com/CortexFoundation/CortexTheseus/ctxcdb" diff --git a/core/types/block_test.go b/core/types/block_test.go index 362e53486..42d11d017 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -20,6 +20,7 @@ import ( "bytes" "encoding/binary" "hash" + gomath "math" "math/big" "reflect" "testing" diff --git a/core/vm/common.go b/core/vm/common.go index bfd285286..fd4b5364e 100644 --- a/core/vm/common.go +++ b/core/vm/common.go @@ -17,10 +17,11 @@ package vm import ( + "math" + "github.com/holiman/uint256" "github.com/CortexFoundation/CortexTheseus/common" - "github.com/CortexFoundation/CortexTheseus/common/math" ) // calcMemSize64 calculates the required memory size, and returns diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 4bd91e2db..015f0bec2 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "errors" "fmt" + gomath "math" "math/big" "github.com/consensys/gnark-crypto/ecc" @@ -379,7 +380,7 @@ func (c *bigModExp) RequiredGas(input []byte) uint64 { // 2. Different divisor (`GQUADDIVISOR`) (3) gas.Div(gas, big3) if gas.BitLen() > 64 { - return math.MaxUint64 + return gomath.MaxUint64 } // 3. Minimum price of 200 gas if gas.Uint64() < 200 { @@ -392,7 +393,7 @@ func (c *bigModExp) RequiredGas(input []byte) uint64 { gas.Div(gas, big20) if gas.BitLen() > 64 { - return math.MaxUint64 + return gomath.MaxUint64 } return gas.Uint64() } diff --git a/internal/ctxcapi/api.go b/internal/ctxcapi/api.go index 2cb382d11..86c4fb3cc 100644 --- a/internal/ctxcapi/api.go +++ b/internal/ctxcapi/api.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "math" "math/big" "time" @@ -29,7 +30,6 @@ import ( "github.com/CortexFoundation/CortexTheseus/accounts/keystore" "github.com/CortexFoundation/CortexTheseus/common" "github.com/CortexFoundation/CortexTheseus/common/hexutil" - "github.com/CortexFoundation/CortexTheseus/common/math" "github.com/CortexFoundation/CortexTheseus/consensus/cuckoo" "github.com/CortexFoundation/CortexTheseus/core" "github.com/CortexFoundation/CortexTheseus/core/rawdb" diff --git a/params/config_test.go b/params/config_test.go index a94722981..11d037d05 100644 --- a/params/config_test.go +++ b/params/config_test.go @@ -17,9 +17,13 @@ package params import ( + "math" "math/big" "reflect" "testing" + "time" + + "github.com/stretchr/testify/require" ) func TestCheckCompatible(t *testing.T) { diff --git a/rlp/decode_test.go b/rlp/decode_test.go index caa895f6c..a6dccd90c 100644 --- a/rlp/decode_test.go +++ b/rlp/decode_test.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + gomath "math" "math/big" "reflect" "strings" @@ -546,7 +547,7 @@ var decodeTests = []decodeTest{ // uint256 {input: "80", ptr: new(*uint256.Int), value: uint256.NewInt(0)}, {input: "01", ptr: new(*uint256.Int), value: uint256.NewInt(1)}, - {input: "88FFFFFFFFFFFFFFFF", ptr: new(*uint256.Int), value: uint256.NewInt(math.MaxUint64)}, + {input: "88FFFFFFFFFFFFFFFF", ptr: new(*uint256.Int), value: uint256.NewInt(gomath.MaxUint64)}, {input: "89FFFFFFFFFFFFFFFFFF", ptr: new(*uint256.Int), value: veryBigInt256}, {input: "10", ptr: new(uint256.Int), value: *uint256.NewInt(16)}, // non-pointer also works diff --git a/rpc/types_test.go b/rpc/types_test.go index 3fc98de0f..719e15dfe 100644 --- a/rpc/types_test.go +++ b/rpc/types_test.go @@ -18,11 +18,11 @@ package rpc import ( "encoding/json" + "math" "reflect" "testing" "github.com/CortexFoundation/CortexTheseus/common" - "github.com/CortexFoundation/CortexTheseus/common/math" ) func TestBlockNumberJSONUnmarshal(t *testing.T) {