diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go index 171d068d60a..36979b401de 100644 --- a/core/rawdb/rawdbreset/reset_stages.go +++ b/core/rawdb/rawdbreset/reset_stages.go @@ -163,7 +163,7 @@ func ResetExec(ctx context.Context, db kv.RwDB, agg *state.Aggregator, chain str cleanupList = append(cleanupList, stateBuckets...) cleanupList = append(cleanupList, stateHistoryBuckets...) cleanupList = append(cleanupList, agg.DomainTables(kv.AccountsDomain, kv.StorageDomain, kv.CodeDomain, kv.CommitmentDomain, kv.ReceiptDomain)...) - cleanupList = append(cleanupList, agg.InvertedIndexTables(kv.LogAddrIdxPos, kv.LogTopicIdxPos, kv.TracesFromIdxPos, kv.TracesToIdxPos)...) + cleanupList = append(cleanupList, agg.InvertedIndexTables(kv.LogAddrIdx, kv.LogTopicIdx, kv.TracesFromIdx, kv.TracesToIdx)...) return db.Update(ctx, func(tx kv.RwTx) error { if err := clearStageProgress(tx, stages.Execution); err != nil { diff --git a/core/state/rw_v3.go b/core/state/rw_v3.go index a0f92c4e475..80e8694ec3a 100644 --- a/core/state/rw_v3.go +++ b/core/state/rw_v3.go @@ -211,23 +211,23 @@ func (rs *StateV3) ApplyState4(ctx context.Context, txTask *TxTask) error { func (rs *StateV3) ApplyLogsAndTraces4(txTask *TxTask, domains *libstate.SharedDomains) error { for addr := range txTask.TraceFroms { - if err := domains.IndexAdd(kv.TblTracesFromIdx, addr[:]); err != nil { + if err := domains.IndexAdd(kv.TracesFromIdx, addr[:]); err != nil { return err } } for addr := range txTask.TraceTos { - if err := domains.IndexAdd(kv.TblTracesToIdx, addr[:]); err != nil { + if err := domains.IndexAdd(kv.TracesToIdx, addr[:]); err != nil { return err } } for _, lg := range txTask.Logs { - if err := domains.IndexAdd(kv.TblLogAddressIdx, lg.Address[:]); err != nil { + if err := domains.IndexAdd(kv.LogAddrIdx, lg.Address[:]); err != nil { return err } for _, topic := range lg.Topics { - if err := domains.IndexAdd(kv.TblLogTopicsIdx, topic[:]); err != nil { + if err := domains.IndexAdd(kv.LogTopicIdx, topic[:]); err != nil { return err } } diff --git a/erigon-lib/kv/kv_interface.go b/erigon-lib/kv/kv_interface.go index 8b7f8879137..0c100406dba 100644 --- a/erigon-lib/kv/kv_interface.go +++ b/erigon-lib/kv/kv_interface.go @@ -459,8 +459,6 @@ type ( Appendable uint16 History string InvertedIdx string - - InvertedIdxPos uint16 ) type TemporalGetter interface { diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go index 3300ce829f5..8f1a21a18e3 100644 --- a/erigon-lib/kv/tables.go +++ b/erigon-lib/kv/tables.go @@ -745,12 +745,6 @@ const ( LogAddrIdx InvertedIdx = "LogAddrIdx" TracesFromIdx InvertedIdx = "TracesFromIdx" TracesToIdx InvertedIdx = "TracesToIdx" - - LogAddrIdxPos InvertedIdxPos = 0 - LogTopicIdxPos InvertedIdxPos = 1 - TracesFromIdxPos InvertedIdxPos = 2 - TracesToIdxPos InvertedIdxPos = 3 - StandaloneIdxLen InvertedIdxPos = 4 ) const ( @@ -758,21 +752,6 @@ const ( AppendableLen Appendable = 0 ) -func (iip InvertedIdxPos) String() string { - switch iip { - case LogAddrIdxPos: - return "logAddr" - case LogTopicIdxPos: - return "logTopic" - case TracesFromIdxPos: - return "traceFrom" - case TracesToIdxPos: - return "traceTo" - default: - return "unknown inverted index" - } -} - func (d Domain) String() string { switch d { case AccountsDomain: diff --git a/erigon-lib/state/aggregator.go b/erigon-lib/state/aggregator.go index 8f17d44acc9..c98ba7ff0f5 100644 --- a/erigon-lib/state/aggregator.go +++ b/erigon-lib/state/aggregator.go @@ -56,7 +56,7 @@ import ( type Aggregator struct { db kv.RoDB d [kv.DomainLen]*Domain - iis [kv.StandaloneIdxLen]*InvertedIndex + iis map[kv.InvertedIdx]*InvertedIndex dirs datadir.Dirs tmpdir string aggregationStep uint64 @@ -141,6 +141,7 @@ func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint6 logger: logger, collateAndBuildWorkers: 1, mergeWorkers: 1, + iis: make(map[kv.InvertedIdx]*InvertedIndex), commitmentValuesTransform: AggregatorSqueezeCommitmentValues, @@ -209,7 +210,7 @@ func (a *Aggregator) registerDomain(name kv.Domain, salt *uint32, dirs datadir.D return nil } -func (a *Aggregator) registerII(idx kv.InvertedIdxPos, salt *uint32, dirs datadir.Dirs, aggregationStep uint64, filenameBase, indexKeysTable, indexTable string, logger log.Logger) error { +func (a *Aggregator) registerII(idx kv.InvertedIdx, salt *uint32, dirs datadir.Dirs, aggregationStep uint64, filenameBase, indexKeysTable, indexTable string, logger log.Logger) error { idxCfg := iiCfg{ salt: salt, dirs: dirs, aggregationStep: aggregationStep, @@ -218,7 +219,12 @@ func (a *Aggregator) registerII(idx kv.InvertedIdxPos, salt *uint32, dirs datadi valuesTable: indexTable, compression: seg.CompressNone, } + + if _, ok := a.iis[idx]; ok { + return fmt.Errorf("inverted index %s already registered", idx) + } var err error + a.iis[idx], err = NewInvertedIndex(idxCfg, logger) if err != nil { return err @@ -490,7 +496,13 @@ func (c AggV3Collation) Close() { type AggV3StaticFiles struct { d [kv.DomainLen]StaticFiles - ivfs [kv.StandaloneIdxLen]InvertedFiles + ivfs map[kv.InvertedIdx]InvertedFiles +} + +func NewAggV3StaticFiles() *AggV3StaticFiles { + return &AggV3StaticFiles{ + ivfs: make(map[kv.InvertedIdx]InvertedFiles), + } } // CleanupOnError - call it on collation fail. It's closing all files @@ -512,7 +524,7 @@ func (a *Aggregator) buildFiles(ctx context.Context, step uint64) error { txTo = a.FirstTxNumOfStep(step + 1) stepStartedAt = time.Now() - static AggV3StaticFiles + static = NewAggV3StaticFiles() closeCollations = true collListMu = sync.Mutex{} collations = make([]Collation, 0) @@ -572,7 +584,7 @@ func (a *Aggregator) buildFiles(ctx context.Context, step uint64) error { closeCollations = false // indices are built concurrently - for _, ii := range a.iis { + for iikey, ii := range a.iis { ii := ii dc := ii.BeginFilesRo() firstStepNotInFiles := dc.FirstStepNotInFiles() @@ -599,18 +611,7 @@ func (a *Aggregator) buildFiles(ctx context.Context, step uint64) error { return err } - switch ii.keysTable { - case kv.TblLogTopicsKeys: - static.ivfs[kv.LogTopicIdxPos] = sf - case kv.TblLogAddressKeys: - static.ivfs[kv.LogAddrIdxPos] = sf - case kv.TblTracesFromKeys: - static.ivfs[kv.TracesFromIdxPos] = sf - case kv.TblTracesToKeys: - static.ivfs[kv.TracesToIdxPos] = sf - default: - panic("unknown index " + ii.keysTable) - } + static.ivfs[iikey] = sf return nil }) } @@ -744,7 +745,7 @@ func (a *Aggregator) MergeLoop(ctx context.Context) error { } } -func (a *Aggregator) integrateDirtyFiles(sf AggV3StaticFiles, txNumFrom, txNumTo uint64) { +func (a *Aggregator) integrateDirtyFiles(sf *AggV3StaticFiles, txNumFrom, txNumTo uint64) { a.dirtyFilesLock.Lock() defer a.dirtyFilesLock.Unlock() @@ -762,7 +763,7 @@ func (a *Aggregator) DomainTables(domains ...kv.Domain) (tables []string) { } return tables } -func (a *Aggregator) InvertedIndexTables(indices ...kv.InvertedIdxPos) (tables []string) { +func (a *Aggregator) InvertedIndexTables(indices ...kv.InvertedIdx) (tables []string) { for _, idx := range indices { tables = append(tables, a.iis[idx].Tables()...) } @@ -1072,17 +1073,17 @@ func (ac *AggregatorRoTx) Prune(ctx context.Context, tx kv.RwTx, limit uint64, l return aggStat, err } } - var stats [kv.StandaloneIdxLen]*InvertedIndexPruneStat - for i := 0; i < int(kv.StandaloneIdxLen); i++ { - stat, err := ac.iis[i].Prune(ctx, tx, txFrom, txTo, limit, logEvery, false, nil) + + stats := make(map[kv.InvertedIdx]*InvertedIndexPruneStat) + for iikey := range ac.a.iis { + stat, err := ac.iis[iikey].Prune(ctx, tx, txFrom, txTo, limit, logEvery, false, nil) if err != nil { return nil, err } - stats[i] = stat + stats[iikey] = stat } - - for i := 0; i < int(kv.StandaloneIdxLen); i++ { - aggStat.Indices[ac.iis[i].ii.filenameBase] = stats[i] + for iikey, _ := range ac.a.iis { + aggStat.Indices[ac.iis[iikey].ii.filenameBase] = stats[iikey] } return aggStat, nil @@ -1232,7 +1233,13 @@ func (a *Aggregator) recalcVisibleFilesMinimaxTxNum() { type RangesV3 struct { domain [kv.DomainLen]DomainRanges - invertedIndex [kv.StandaloneIdxLen]*MergeRange + invertedIndex map[kv.InvertedIdx]*MergeRange +} + +func NewRangesV3() *RangesV3 { + return &RangesV3{ + invertedIndex: make(map[kv.InvertedIdx]*MergeRange), + } } func (r RangesV3) String() string { @@ -1246,7 +1253,7 @@ func (r RangesV3) String() string { aggStep := r.domain[kv.AccountsDomain].aggStep for p, mr := range r.invertedIndex { if mr != nil && mr.needMerge { - ss = append(ss, mr.String(kv.InvertedIdxPos(p).String(), aggStep)) + ss = append(ss, mr.String(string(p), aggStep)) } } return strings.Join(ss, ", ") @@ -1266,8 +1273,8 @@ func (r RangesV3) any() bool { return false } -func (ac *AggregatorRoTx) findMergeRange(maxEndTxNum, maxSpan uint64) RangesV3 { - var r RangesV3 +func (ac *AggregatorRoTx) findMergeRange(maxEndTxNum, maxSpan uint64) *RangesV3 { + r := NewRangesV3() if ac.a.commitmentValuesTransform { lmrAcc := ac.d[kv.AccountsDomain].files.LatestMergedRange() lmrSto := ac.d[kv.StorageDomain].files.LatestMergedRange() @@ -1328,8 +1335,8 @@ func (ac *AggregatorRoTx) RestrictSubsetFileDeletions(b bool) { ac.a.d[kv.CommitmentDomain].restrictSubsetFileDeletions = b } -func (ac *AggregatorRoTx) mergeFiles(ctx context.Context, files SelectedStaticFilesV3, r RangesV3) (MergedFilesV3, error) { - var mf MergedFilesV3 +func (ac *AggregatorRoTx) mergeFiles(ctx context.Context, files *SelectedStaticFilesV3, r *RangesV3) (*MergedFilesV3, error) { + mf := NewMergedFilesV3() g, ctx := errgroup.WithContext(ctx) g.SetLimit(ac.a.mergeWorkers) closeFiles := true @@ -1404,7 +1411,7 @@ func (ac *AggregatorRoTx) mergeFiles(ctx context.Context, files SelectedStaticFi return mf, err } -func (a *Aggregator) integrateMergedDirtyFiles(outs SelectedStaticFilesV3, in MergedFilesV3) { +func (a *Aggregator) integrateMergedDirtyFiles(outs *SelectedStaticFilesV3, in *MergedFilesV3) { a.dirtyFilesLock.Lock() defer a.dirtyFilesLock.Unlock() @@ -1418,7 +1425,7 @@ func (a *Aggregator) integrateMergedDirtyFiles(outs SelectedStaticFilesV3, in Me } -func (a *Aggregator) cleanAfterMerge(in MergedFilesV3) { +func (a *Aggregator) cleanAfterMerge(in *MergedFilesV3) { at := a.BeginFilesRo() defer at.Close() @@ -1564,17 +1571,12 @@ func (ac *AggregatorRoTx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs return ac.d[kv.StorageDomain].ht.IdxRange(k, fromTs, toTs, asc, limit, tx) case kv.ReceiptHistoryIdx: return ac.d[kv.ReceiptDomain].ht.IdxRange(k, fromTs, toTs, asc, limit, tx) - //case kv.GasUsedHistoryIdx: - // return ac.d[kv.GasUsedDomain].ht.IdxRange(k, fromTs, toTs, asc, limit, tx) - case kv.LogTopicIdx: - return ac.iis[kv.LogTopicIdxPos].IdxRange(k, fromTs, toTs, asc, limit, tx) - case kv.LogAddrIdx: - return ac.iis[kv.LogAddrIdxPos].IdxRange(k, fromTs, toTs, asc, limit, tx) - case kv.TracesFromIdx: - return ac.iis[kv.TracesFromIdxPos].IdxRange(k, fromTs, toTs, asc, limit, tx) - case kv.TracesToIdx: - return ac.iis[kv.TracesToIdxPos].IdxRange(k, fromTs, toTs, asc, limit, tx) default: + // check the ii + if v, ok := ac.iis[name]; ok { + return v.IdxRange(k, fromTs, toTs, asc, limit, tx) + } + return nil, fmt.Errorf("unexpected history name: %s", name) } } @@ -1635,7 +1637,7 @@ func (ac *AggregatorRoTx) nastyFileRead(name kv.Domain, from, to uint64) (*seg.R type AggregatorRoTx struct { a *Aggregator d [kv.DomainLen]*DomainRoTx - iis [kv.StandaloneIdxLen]*InvertedIndexRoTx + iis map[kv.InvertedIdx]*InvertedIndexRoTx id uint64 // auto-increment id of ctx for logs _leakID uint64 // set only if TRACE_AGG=true @@ -1646,6 +1648,7 @@ func (a *Aggregator) BeginFilesRo() *AggregatorRoTx { a: a, id: a.ctxAutoIncrement.Add(1), _leakID: a.leakDetector.Add(), + iis: make(map[kv.InvertedIdx]*InvertedIndexRoTx), } a.visibleFilesLock.RLock() diff --git a/erigon-lib/state/aggregator2.go b/erigon-lib/state/aggregator2.go index aa8983a7cd6..f98b0ec4d67 100644 --- a/erigon-lib/state/aggregator2.go +++ b/erigon-lib/state/aggregator2.go @@ -37,16 +37,16 @@ func NewAggregator2(ctx context.Context, dirs datadir.Dirs, aggregationStep uint if err := a.registerDomain(kv.ReceiptDomain, salt, dirs, aggregationStep, logger); err != nil { return nil, err } - if err := a.registerII(kv.LogAddrIdxPos, salt, dirs, aggregationStep, kv.FileLogAddressIdx, kv.TblLogAddressKeys, kv.TblLogAddressIdx, logger); err != nil { + if err := a.registerII(kv.LogAddrIdx, salt, dirs, aggregationStep, kv.FileLogAddressIdx, kv.TblLogAddressKeys, kv.TblLogAddressIdx, logger); err != nil { return nil, err } - if err := a.registerII(kv.LogTopicIdxPos, salt, dirs, aggregationStep, kv.FileLogTopicsIdx, kv.TblLogTopicsKeys, kv.TblLogTopicsIdx, logger); err != nil { + if err := a.registerII(kv.LogTopicIdx, salt, dirs, aggregationStep, kv.FileLogTopicsIdx, kv.TblLogTopicsKeys, kv.TblLogTopicsIdx, logger); err != nil { return nil, err } - if err := a.registerII(kv.TracesFromIdxPos, salt, dirs, aggregationStep, kv.FileTracesFromIdx, kv.TblTracesFromKeys, kv.TblTracesFromIdx, logger); err != nil { + if err := a.registerII(kv.TracesFromIdx, salt, dirs, aggregationStep, kv.FileTracesFromIdx, kv.TblTracesFromKeys, kv.TblTracesFromIdx, logger); err != nil { return nil, err } - if err := a.registerII(kv.TracesToIdxPos, salt, dirs, aggregationStep, kv.FileTracesToIdx, kv.TblTracesToKeys, kv.TblTracesToIdx, logger); err != nil { + if err := a.registerII(kv.TracesToIdx, salt, dirs, aggregationStep, kv.FileTracesToIdx, kv.TblTracesToKeys, kv.TblTracesToIdx, logger); err != nil { return nil, err } a.KeepRecentTxnsOfHistoriesWithDisabledSnapshots(100_000) // ~1k blocks of history diff --git a/erigon-lib/state/aggregator_files.go b/erigon-lib/state/aggregator_files.go index 88825229fe5..d397a3ef3bb 100644 --- a/erigon-lib/state/aggregator_files.go +++ b/erigon-lib/state/aggregator_files.go @@ -24,11 +24,15 @@ type SelectedStaticFilesV3 struct { d [kv.DomainLen][]*filesItem dHist [kv.DomainLen][]*filesItem dIdx [kv.DomainLen][]*filesItem - ii [kv.StandaloneIdxLen][]*filesItem + ii map[kv.InvertedIdx][]*filesItem +} + +func NewSelectedStaticFilesV3() *SelectedStaticFilesV3 { + return &SelectedStaticFilesV3{ii: make(map[kv.InvertedIdx][]*filesItem)} } func (sf SelectedStaticFilesV3) Close() { - clist := make([][]*filesItem, 0, int(kv.DomainLen)+int(kv.StandaloneIdxLen)) + clist := make([][]*filesItem, 0, int(kv.DomainLen)+len(sf.ii)) for id := range sf.d { clist = append(clist, sf.d[id], sf.dIdx[id], sf.dHist[id]) } @@ -50,7 +54,8 @@ func (sf SelectedStaticFilesV3) Close() { } } -func (ac *AggregatorRoTx) staticFilesInRange(r RangesV3) (sf SelectedStaticFilesV3, err error) { +func (ac *AggregatorRoTx) staticFilesInRange(r *RangesV3) (*SelectedStaticFilesV3, error) { + sf := NewSelectedStaticFilesV3() for id := range ac.d { if !r.domain[id].any() { continue @@ -63,14 +68,18 @@ func (ac *AggregatorRoTx) staticFilesInRange(r RangesV3) (sf SelectedStaticFiles } sf.ii[id] = ac.iis[id].staticFilesInRange(rng.from, rng.to) } - return sf, err + return sf, nil } type MergedFilesV3 struct { d [kv.DomainLen]*filesItem dHist [kv.DomainLen]*filesItem dIdx [kv.DomainLen]*filesItem - iis [kv.StandaloneIdxLen]*filesItem + iis map[kv.InvertedIdx]*filesItem +} + +func NewMergedFilesV3() *MergedFilesV3 { + return &MergedFilesV3{iis: make(map[kv.InvertedIdx]*filesItem)} } func (mf MergedFilesV3) FrozenList() (frozen []string) { @@ -100,7 +109,9 @@ func (mf MergedFilesV3) Close() { for id := range mf.d { clist = append(clist, mf.d[id], mf.dHist[id], mf.dIdx[id]) } - clist = append(clist, mf.iis[:]...) + for _, ii := range mf.iis { + clist = append(clist, ii) + } for _, item := range clist { if item != nil { diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index 495caa8462e..557bb60e607 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -98,7 +98,7 @@ type SharedDomains struct { storage *btree2.Map[string, dataWithPrevStep] domainWriters [kv.DomainLen]*domainBufferedWriter - iiWriters [kv.StandaloneIdxLen]*invertedIndexBufferedWriter + iiWriters map[kv.InvertedIdx]*invertedIndexBufferedWriter currentChangesAccumulator *StateChangeSet pastChangesAccumulator map[string]*StateChangeSet @@ -114,8 +114,9 @@ type HasAgg interface { func NewSharedDomains(tx kv.Tx, logger log.Logger) (*SharedDomains, error) { sd := &SharedDomains{ - logger: logger, - storage: btree2.NewMap[string, dataWithPrevStep](128), + logger: logger, + storage: btree2.NewMap[string, dataWithPrevStep](128), + iiWriters: map[kv.InvertedIdx]*invertedIndexBufferedWriter{}, //trace: true, } sd.SetTx(tx) @@ -661,19 +662,10 @@ func (sd *SharedDomains) delAccountStorage(addr, loc []byte, preVal []byte, prev } func (sd *SharedDomains) IndexAdd(table kv.InvertedIdx, key []byte) (err error) { - switch table { - case kv.LogAddrIdx, kv.TblLogAddressIdx: - err = sd.iiWriters[kv.LogAddrIdxPos].Add(key) - case kv.LogTopicIdx, kv.TblLogTopicsIdx, kv.LogTopicIndex: - err = sd.iiWriters[kv.LogTopicIdxPos].Add(key) - case kv.TblTracesToIdx: - err = sd.iiWriters[kv.TracesToIdxPos].Add(key) - case kv.TblTracesFromIdx: - err = sd.iiWriters[kv.TracesFromIdxPos].Add(key) - default: - panic(fmt.Errorf("unknown shared index %s", table)) + if writer, ok := sd.iiWriters[table]; ok { + return writer.Add(key) } - return err + panic(fmt.Errorf("unknown index %s", table)) } func (sd *SharedDomains) SetTx(tx kv.Tx) { diff --git a/erigon-lib/state/integrity.go b/erigon-lib/state/integrity.go index 6ac91d24943..b83b43e157a 100644 --- a/erigon-lib/state/integrity.go +++ b/erigon-lib/state/integrity.go @@ -58,32 +58,14 @@ func (ac *AggregatorRoTx) IntegrityInvertedIndexAllValuesAreInRange(ctx context. if err != nil { return err } - //case kv.GasUsedHistoryIdx: - // err := ac.d[kv.GasUsedDomain].ht.iit.IntegrityInvertedIndexAllValuesAreInRange(ctx) - // if err != nil { - // return err - // } - case kv.TracesFromIdx: - err := ac.iis[kv.TracesFromIdxPos].IntegrityInvertedIndexAllValuesAreInRange(ctx, failFast, fromStep) - if err != nil { - return err - } - case kv.TracesToIdx: - err := ac.iis[kv.TracesToIdxPos].IntegrityInvertedIndexAllValuesAreInRange(ctx, failFast, fromStep) - if err != nil { - return err - } - case kv.LogAddrIdx: - err := ac.iis[kv.LogAddrIdxPos].IntegrityInvertedIndexAllValuesAreInRange(ctx, failFast, fromStep) - if err != nil { - return err - } - case kv.LogTopicIdx: - err := ac.iis[kv.LogTopicIdxPos].IntegrityInvertedIndexAllValuesAreInRange(ctx, failFast, fromStep) - if err != nil { - return err - } default: + // check the ii + if v, ok := ac.iis[name]; ok { + err := v.IntegrityInvertedIndexAllValuesAreInRange(ctx, failFast, fromStep) + if err != nil { + return err + } + } panic(fmt.Sprintf("unexpected: %s", name)) } return nil diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index 3f1c1345662..99df82db5aa 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -731,7 +731,7 @@ func (iit *InvertedIndexRoTx) iterateRangeOnFiles(key []byte, startTxNum, endTxN break } if iit.files[i].src.index == nil { // assert - err := fmt.Errorf("why file has not index: %s\n", iit.files[i].src.decompressor.FileName()) + err := fmt.Errorf("why file has not index: %s", iit.files[i].src.decompressor.FileName()) panic(err) } if iit.files[i].src.index.KeyCount() == 0 { diff --git a/erigon-lib/state/merge.go b/erigon-lib/state/merge.go index bfa69b354a3..d6b6baf2c32 100644 --- a/erigon-lib/state/merge.go +++ b/erigon-lib/state/merge.go @@ -1081,5 +1081,5 @@ func hasCoverVisibleFile(visibleFiles []visibleFile, item *filesItem) bool { return false } -func (ac *AggregatorRoTx) DbgDomain(idx kv.Domain) *DomainRoTx { return ac.d[idx] } -func (ac *AggregatorRoTx) DbgII(idx kv.InvertedIdxPos) *InvertedIndexRoTx { return ac.iis[idx] } +func (ac *AggregatorRoTx) DbgDomain(idx kv.Domain) *DomainRoTx { return ac.d[idx] } +func (ac *AggregatorRoTx) DbgII(idx kv.InvertedIdx) *InvertedIndexRoTx { return ac.iis[idx] } diff --git a/erigon-lib/state/squeeze.go b/erigon-lib/state/squeeze.go index 4a611627ed5..fe8cf6a663a 100644 --- a/erigon-lib/state/squeeze.go +++ b/erigon-lib/state/squeeze.go @@ -110,28 +110,26 @@ func (ac *AggregatorRoTx) SqueezeCommitmentFiles() error { return nil } - rng := RangesV3{ - domain: [5]DomainRanges{ - kv.AccountsDomain: { - name: kv.AccountsDomain, - values: MergeRange{true, 0, math.MaxUint64}, - history: HistoryRanges{}, - aggStep: ac.a.StepSize(), - }, - kv.StorageDomain: { - name: kv.StorageDomain, - values: MergeRange{true, 0, math.MaxUint64}, - history: HistoryRanges{}, - aggStep: ac.a.StepSize(), - }, - kv.CommitmentDomain: { - name: kv.CommitmentDomain, - values: MergeRange{true, 0, math.MaxUint64}, - history: HistoryRanges{}, - aggStep: ac.a.StepSize(), - }, + rng := NewRangesV3() + rng.domain = [5]DomainRanges{ + kv.AccountsDomain: { + name: kv.AccountsDomain, + values: MergeRange{true, 0, math.MaxUint64}, + history: HistoryRanges{}, + aggStep: ac.a.StepSize(), + }, + kv.StorageDomain: { + name: kv.StorageDomain, + values: MergeRange{true, 0, math.MaxUint64}, + history: HistoryRanges{}, + aggStep: ac.a.StepSize(), + }, + kv.CommitmentDomain: { + name: kv.CommitmentDomain, + values: MergeRange{true, 0, math.MaxUint64}, + history: HistoryRanges{}, + aggStep: ac.a.StepSize(), }, - invertedIndex: [4]*MergeRange{}, } sf, err := ac.staticFilesInRange(rng) if err != nil { @@ -322,16 +320,14 @@ func (a *Aggregator) RebuildCommitmentFiles(ctx context.Context, rwDb kv.RwDB, t acRo := a.BeginFilesRo() // this tx is used to read existing domain files and closed in the end defer acRo.Close() - rng := RangesV3{ - domain: [5]DomainRanges{ - kv.AccountsDomain: { - name: kv.AccountsDomain, - values: MergeRange{true, 0, math.MaxUint64}, - history: HistoryRanges{}, - aggStep: a.StepSize(), - }, + rng := NewRangesV3() + rng.domain = [5]DomainRanges{ + kv.AccountsDomain: { + name: kv.AccountsDomain, + values: MergeRange{true, 0, math.MaxUint64}, + history: HistoryRanges{}, + aggStep: a.StepSize(), }, - invertedIndex: [4]*MergeRange{}, } sf, err := acRo.staticFilesInRange(rng) if err != nil {