diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dd26da68f..52c72c115 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,3 +44,21 @@ jobs: run: | make test-unit-cover if: env.GIT_DIFF + test-unit-e2e: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v4 + with: + go-version: '1.21' + check-latest: true + - uses: actions/checkout@v4 + - uses: technote-space/get-diff-action@v6.1.2 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - name: Test e2e cases + run: | + make test-unit-e2e + if: env.GIT_DIFF diff --git a/Makefile b/Makefile index 1ccac8b32..939995500 100644 --- a/Makefile +++ b/Makefile @@ -186,7 +186,7 @@ all: build build-all: tools build lint test vulncheck -.PHONY: distclean clean build-all +.PHONY: distclean clean build-all build ############################################################################### ### makTools & Dependencies ### @@ -305,23 +305,27 @@ test-all: test-unit test-race # we want to include all unit tests in the subfolders (tests/e2e/*) # We also want to exclude the testutil folder because it contains only # helper functions for the tests. -PACKAGES_UNIT=$(shell go list ./... | grep -v '/tests/e2e$$' | grep -v 'testutil') +PACKAGES_UNIT=$(shell go list ./... | grep -v '/tests/e2e' | grep -v 'testutil') +PACKAGES_UNIT_E2E=$(shell go list ./... | grep '/tests/e2e') TEST_PACKAGES=./... -TEST_TARGETS := test-unit test-unit-cover test-race +TEST_TARGETS := test-unit test-unit-cover test-race test-unit-e2e # Test runs-specific rules. To add a new test target, just add # a new rule, customise ARGS or TEST_PACKAGES ad libitum, and # append the new rule to the TEST_TARGETS list. -test-unit: ARGS=-timeout=15m -gcflags=all=-l +test-unit: ARGS=-timeout=15m -gcflags=all=-l --tags devmode test-unit: TEST_PACKAGES=$(PACKAGES_UNIT) test-race: ARGS=-race test-race: TEST_PACKAGES=$(PACKAGES_NOSIMULATION) $(TEST_TARGETS): run-tests -test-unit-cover: ARGS=-timeout=15m -coverprofile=cover.out -covermode=atomic -gcflags=all=-l +test-unit-cover: ARGS=-timeout=15m -coverprofile=cover.out -covermode=atomic -gcflags=all=-l --tags devmode test-unit-cover: TEST_PACKAGES=$(PACKAGES_UNIT) +test-unit-e2e: ARGS=-timeout=15m --tags devmode +test-unit-e2e: TEST_PACKAGES=$(PACKAGES_UNIT_E2E) + test-e2e: @if [ -z "$(TARGET_VERSION)" ]; then \ echo "Building docker image from local codebase"; \ @@ -634,4 +638,4 @@ check-licenses: @python3 scripts/check_licenses.py . swagger-ui: - docker run -p 8080:8080 -e SWAGGER_JSON=/app/swagger.json -v $(pwd)/client/docs/swagger-ui:/app swaggerapi/swagger-ui \ No newline at end of file + docker run -p 8080:8080 -e SWAGGER_JSON=/app/swagger.json -v $(pwd)/client/docs/swagger-ui:/app swaggerapi/swagger-ui diff --git a/app/ante/cosmos/sigverify.go b/app/ante/cosmos/sigverify.go index 3d91e1cdd..92986fe28 100644 --- a/app/ante/cosmos/sigverify.go +++ b/app/ante/cosmos/sigverify.go @@ -391,6 +391,8 @@ func (isd IncrementSequenceDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, sim msg := msg.(*oracletypes.MsgCreatePrice) if accAddress, err := sdk.AccAddressFromBech32(msg.Creator); err != nil { return ctx, errors.New("invalid address") + // #nosec G115 // safe conversion + // TODO: define msg.Nonce as uint32 to avoid conversion } else if _, err := isd.oracleKeeper.CheckAndIncreaseNonce(ctx, sdk.ConsAddress(accAddress).String(), msg.FeederID, uint32(msg.Nonce)); err != nil { return ctx, err } @@ -445,6 +447,7 @@ func (vscd ValidateSigCountDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, sim sigCount := 0 for _, pk := range pubKeys { sigCount += CountSubKeys(pk) + // #nosec G115 if uint64(sigCount) > params.TxSigLimit { return ctx, sdkerrors.ErrTooManySignatures.Wrapf("signatures: %d, limit: %d", sigCount, params.TxSigLimit) } diff --git a/app/ante/cosmos/txsize_gas.go b/app/ante/cosmos/txsize_gas.go index b9684cd11..db448c75a 100644 --- a/app/ante/cosmos/txsize_gas.go +++ b/app/ante/cosmos/txsize_gas.go @@ -76,12 +76,14 @@ func (cgts ConsumeTxSizeGasDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, sim } // use stdsignature to mock the size of a full signature + // #nosec G115 simSig := legacytx.StdSignature{ // nolint:staticcheck // this will be removed when proto is ready Signature: simSecp256k1Sig[:], PubKey: pubkey, } sigBz := legacy.Cdc.MustMarshal(simSig) + // #nosec G115 cost := sdk.Gas(len(sigBz) + 6) // If the pubkey is a multi-signature pubkey, then we estimate for the maximum diff --git a/app/app.go b/app/app.go index 4879a8a5c..dfec980b0 100644 --- a/app/app.go +++ b/app/app.go @@ -915,14 +915,15 @@ func NewExocoreApp( app.mm.SetOrderBeginBlockers( upgradetypes.ModuleName, // to upgrade the chain capabilitytypes.ModuleName, // before any module with capabilities like IBC - epochstypes.ModuleName, // to update the epoch - feemarkettypes.ModuleName, // set EIP-1559 gas prices - evmtypes.ModuleName, // stores chain id in memory - slashingtypes.ModuleName, // TODO after reward - evidencetypes.ModuleName, // TODO after reward - stakingtypes.ModuleName, // track historical info - ibcexported.ModuleName, // handles upgrades of chain and hence client - authz.ModuleName, // clear expired approvals + oracleTypes.ModuleName, + epochstypes.ModuleName, // to update the epoch + feemarkettypes.ModuleName, // set EIP-1559 gas prices + evmtypes.ModuleName, // stores chain id in memory + slashingtypes.ModuleName, // TODO after reward + evidencetypes.ModuleName, // TODO after reward + stakingtypes.ModuleName, // track historical info + ibcexported.ModuleName, // handles upgrades of chain and hence client + authz.ModuleName, // clear expired approvals // no-op modules ibctransfertypes.ModuleName, icatypes.ModuleName, @@ -943,7 +944,6 @@ func NewExocoreApp( rewardTypes.ModuleName, exoslashTypes.ModuleName, avsManagerTypes.ModuleName, - oracleTypes.ModuleName, distrtypes.ModuleName, ) diff --git a/precompiles/avs/events.go b/precompiles/avs/events.go index 0bf1a5234..8eac48bf3 100644 --- a/precompiles/avs/events.go +++ b/precompiles/avs/events.go @@ -271,6 +271,8 @@ func (p Precompile) EmitTaskSubmittedByOperator(ctx sdk.Context, stateDB vm.Stat } // Prepare the event data:sender,TaskResponse, BlsSignature, Phase arguments := abi.Arguments{event.Inputs[2], event.Inputs[3], event.Inputs[4], event.Inputs[5]} + // #nosec G115 + // TODO: consider modify define of Phase to uint8 packed, err := arguments.Pack(params.CallerAddress.String(), params.TaskResponse, params.BlsSignature, uint8(params.Phase)) if err != nil { return err diff --git a/tests/e2e/oracle/create_price.go b/tests/e2e/oracle/create_price.go index c5ac70280..1294ab9e4 100644 --- a/tests/e2e/oracle/create_price.go +++ b/tests/e2e/oracle/create_price.go @@ -101,7 +101,10 @@ func (s *E2ETestSuite) testCreatePriceLST() { // query final price res, err := s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) s.Require().NoError(err) - s.Require().Equal(priceTest1R1.getPriceTimeRound(1), res.Price) + // NOTE: update timestamp manually to ignore + ret := priceTest1R1.getPriceTimeRound(1) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) // TODO: there might be a small chance that the blockHeight grows to more than 13, try bigger price window(nonce>3) to be more confident // send create-price from validator3 to avoid being slashed for downtime @@ -134,8 +137,10 @@ func (s *E2ETestSuite) testCreatePriceLST() { res, err = s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) s.Require().NoError(err) // price update fail, round 2 still have price{p1} - s.Require().Equal(priceTest1R1.getPriceTimeRound(2), res.Price) - + // NOTE: update timestamp manually to ignore + ret = priceTest1R1.getPriceTimeRound(2) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) // case_3. slashing_{miss_v3:2, window:3} [1.0.1] // update timestamp priceTest2R3 := price2.updateTimestamp() @@ -167,7 +172,10 @@ func (s *E2ETestSuite) testCreatePriceLST() { res, err = s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) s.Require().NoError(err) // price updated, round 3 has price{p2} - s.Require().Equal(priceTest2R3.getPriceTimeRound(3), res.Price) + // NOTE: update timestamp manually to ignore + ret = priceTest2R3.getPriceTimeRound(3) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) // case_4. slashing_{miss_v3:2, window:4}.maxWindow=4 [1.0.1.0] // update timestamp @@ -186,7 +194,10 @@ func (s *E2ETestSuite) testCreatePriceLST() { res, err = s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) s.Require().NoError(err) // price updated, round 4 has price{p1} - s.Require().Equal(priceTest1R4.getPriceTimeRound(4), res.Price) + // NOTE: update timestamp manually to ignore + ret = priceTest1R4.getPriceTimeRound(4) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) // send create-price from validator3 to avoid being slashed for downtime msg3 = oracletypes.NewMsgCreatePrice(creator3.String(), 1, []*oracletypes.PriceSource{&priceSource1R4}, 40, 1) err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg3}, "valconskey3", kr3) @@ -266,11 +277,13 @@ func (s *E2ETestSuite) testCreatePriceNST() { s.Require().Equal([]*oracletypes.BalanceInfo{ { Block: 6, + Index: 0, Balance: 32, Change: oracletypes.Action_ACTION_DEPOSIT, }, { RoundID: 1, + Index: 1, Block: 8, Balance: 28, Change: oracletypes.Action_ACTION_SLASH_REFUND, @@ -299,7 +312,10 @@ func (s *E2ETestSuite) testSlashing() { res, err := s.network.QueryOracle().LatestPrice(context.Background(), &oracletypes.QueryGetLatestPriceRequest{TokenId: 1}) s.Require().NoError(err) // price updated, round 4 has price{p1} - s.Require().Equal(priceTest1R5.getPriceTimeRound(5), res.Price) + // NOTE: update timestamp manually to ignore + ret := priceTest1R5.getPriceTimeRound(5) + ret.Timestamp = res.Price.Timestamp + s.Require().Equal(ret, res.Price) s.moveToAndCheck(60) // slashing_{miss_v3:3, window:5} [0.1.0.1.1] -> {miss_v3:2, window:4} [1.0.1.1] _, priceSource1R6 := price1.generateRealTimeStructs("14", 1) @@ -312,7 +328,7 @@ func (s *E2ETestSuite) testSlashing() { s.Require().NoError(err) err = s.network.SendTxOracleCreateprice([]sdk.Msg{msg2}, "valconskey2", kr2) s.Require().NoError(err) - s.moveToAndCheck(63) + s.moveToAndCheck(64) resSigningInfo, err := s.network.QuerySlashing().SigningInfo(context.Background(), &slashingtypes.QuerySigningInfoRequest{ConsAddress: sdk.ConsAddress(s.network.Validators[3].PubKey.Address()).String()}) s.Require().NoError(err) // validator3 is jailed @@ -331,7 +347,6 @@ func (s *E2ETestSuite) testSlashing() { s.moveNAndCheck(2) resOperator, err = s.network.QueryOperator().QueryOptInfo(context.Background(), &operatortypes.QueryOptInfoRequest{OperatorAVSAddress: &operatortypes.OperatorAVSAddress{OperatorAddr: s.network.Validators[3].Address.String(), AvsAddress: avsAddr}}) s.Require().NoError(err) - fmt.Println("debug----->jailed:", resOperator.Jailed) s.Require().False(resOperator.Jailed) } @@ -353,7 +368,7 @@ func (s *E2ETestSuite) testRegisterTokenThroughPrecompile() { // registerToken will automaticlly register that token into oracle module res, err := s.network.QueryOracle().Params(context.Background(), &oracletypes.QueryParamsRequest{}) s.Require().NoError(err) - s.Require().Equal(name, res.Params.Tokens[3].Name) + s.Require().Equal(name, res.Params.Tokens[len(res.Params.Tokens)-1].Name) } func (s *E2ETestSuite) moveToAndCheck(height int64) { diff --git a/tests/e2e/oracle/helper_nstconvert.go b/tests/e2e/oracle/helper_nstconvert.go index 57dcb6910..3cae1a4ff 100644 --- a/tests/e2e/oracle/helper_nstconvert.go +++ b/tests/e2e/oracle/helper_nstconvert.go @@ -2,6 +2,7 @@ package oracle import ( "encoding/binary" + "math" "strings" "github.com/imroc/biu" @@ -10,8 +11,7 @@ import ( func convertBalanceChangeToBytes(stakerChanges [][]int) []byte { if len(stakerChanges) == 0 { // length equals to 0 means that alls takers have efb of 32 with 0 changes - ret := make([]byte, 32) - return ret + return make([]byte, 32) } str := "" index := 0 @@ -23,6 +23,10 @@ func convertBalanceChangeToBytes(stakerChanges [][]int) []byte { // change amount -> bytes change := stakerChange[1] + if (change > 0 && change > math.MaxUint16) || + (change < 0 && (-1*change) > math.MaxUint16) { + return make([]byte, 32) + } var changeBytes []byte symbol := 1 if change < 0 { @@ -47,6 +51,7 @@ func convertBalanceChangeToBytes(stakerChanges [][]int) []byte { } else { // 2 byte changeBytes = make([]byte, 2) + // #nosec G115 // change has been checked to make sure no overflow binary.BigEndian.PutUint16(changeBytes, uint16(change)) moveLength := 16 - bits changeBytes[0] <<= moveLength diff --git a/testutil/utils.go b/testutil/utils.go index 5671412fd..c6fbf9e03 100644 --- a/testutil/utils.go +++ b/testutil/utils.go @@ -504,6 +504,7 @@ func (suite *BaseTestSuite) DoSetupTest() { queryHelperEvm := baseapp.NewQueryServerTestHelper(suite.Ctx, suite.App.InterfaceRegistry()) evmtypes.RegisterQueryServer(queryHelperEvm, suite.App.EvmKeeper) suite.QueryClientEVM = evmtypes.NewQueryClient(queryHelperEvm) + suite.App.OracleKeeper.FeederManager.BeginBlock(suite.Ctx) } // DeployContract deploys a contract that calls the deposit precompile's methods for testing purposes. diff --git a/x/avs/client/cli/tx.go b/x/avs/client/cli/tx.go index b73524a26..7623857b7 100644 --- a/x/avs/client/cli/tx.go +++ b/x/avs/client/cli/tx.go @@ -116,7 +116,7 @@ func newBuildMsg( taskContractAddress, _ := fs.GetString(FlagTaskContractAddress) taskID, _ := fs.GetUint64(FlagTaskID) - phase, _ := fs.GetUint32(FlagPhase) + phase, _ := fs.GetInt32(FlagPhase) if err := types.ValidatePhase(types.Phase(phase)); err != nil { return nil, err } diff --git a/x/avs/keeper/keeper.go b/x/avs/keeper/keeper.go index 6a29a4da1..99233cb41 100644 --- a/x/avs/keeper/keeper.go +++ b/x/avs/keeper/keeper.go @@ -447,12 +447,14 @@ func (k Keeper) RaiseAndResolveChallenge(ctx sdk.Context, params *types.Challeng return errorsmod.Wrap(types.ErrEpochNotFound, fmt.Sprintf("epoch info not found %s", avsInfo.EpochIdentifier)) } + // #nosec G115 if epoch.CurrentEpoch <= int64(taskInfo.StartingEpoch)+int64(taskInfo.TaskResponsePeriod)+int64(taskInfo.TaskStatisticalPeriod) { return errorsmod.Wrap( types.ErrSubmitTooSoonError, fmt.Sprintf("SetTaskResultInfo:the challenge period has not started , CurrentEpoch:%d", epoch.CurrentEpoch), ) } + // #nosec G115 if epoch.CurrentEpoch > int64(taskInfo.StartingEpoch)+int64(taskInfo.TaskResponsePeriod)+int64(taskInfo.TaskStatisticalPeriod)+int64(taskInfo.TaskChallengePeriod) { return errorsmod.Wrap( types.ErrSubmitTooLateError, @@ -589,6 +591,7 @@ func (k Keeper) SubmitTaskResult(ctx sdk.Context, addr string, info *types.TaskR fmt.Sprintf("SetTaskResultInfo:the TaskResponse period has not started , CurrentEpoch:%d", epoch.CurrentEpoch), ) } + // #nosec G115 if epoch.CurrentEpoch > int64(task.StartingEpoch)+int64(task.TaskResponsePeriod)+int64(task.TaskStatisticalPeriod) { return errorsmod.Wrap( types.ErrSubmitTooLateError, diff --git a/x/evm/keeper/grpc_query.go b/x/evm/keeper/grpc_query.go index 851ce1dd0..518611326 100644 --- a/x/evm/keeper/grpc_query.go +++ b/x/evm/keeper/grpc_query.go @@ -472,6 +472,7 @@ func (k Keeper) TraceTx(c context.Context, req *types.QueryTraceTxRequest) (*typ continue } txConfig.TxHash = ethTx.Hash() + // #nosec G115 txConfig.TxIndex = uint(i) // reset gas meter for each transaction ctx = ctx.WithGasMeter(evmostypes.NewInfiniteGasMeterWithLimit(msg.Gas())) @@ -565,6 +566,7 @@ func (k Keeper) TraceBlock(c context.Context, req *types.QueryTraceBlockRequest) result := types.TxTraceResult{} ethTx := tx.AsTransaction() txConfig.TxHash = ethTx.Hash() + // #nosec G115 txConfig.TxIndex = uint(i) traceResult, logIndex, err := k.traceTx(ctx, cfg, txConfig, signer, ethTx, req.TraceConfig, true, nil) if err != nil { diff --git a/x/operator/keeper/slash.go b/x/operator/keeper/slash.go index ec7634117..80634d9a5 100644 --- a/x/operator/keeper/slash.go +++ b/x/operator/keeper/slash.go @@ -19,6 +19,7 @@ import ( // GetSlashIDForDogfood It use infractionType+'_'+'infractionHeight' as the slashID, because /* the slash */event occurs in dogfood doesn't have a TxID. It isn't submitted through an external transaction. func GetSlashIDForDogfood(infraction stakingtypes.Infraction, infractionHeight int64) string { // #nosec G701 + // #nosec G115 return strings.Join([]string{hexutil.EncodeUint64(uint64(infraction)), hexutil.EncodeUint64(uint64(infractionHeight))}, utils.DelimiterForID) } diff --git a/x/operator/types/keys.go b/x/operator/types/keys.go index ad63c5de5..24b0da57a 100644 --- a/x/operator/types/keys.go +++ b/x/operator/types/keys.go @@ -157,7 +157,7 @@ func KeyForVotingPowerSnapshot(avs common.Address, height int64) []byte { return utils.AppendMany( avs.Bytes(), // Append the height - sdk.Uint64ToBigEndian(uint64(height)), + sdk.Uint64ToBigEndian(uint64(height)), // #nosec G115 // height is not negative ) } diff --git a/x/oracle/keeper/aggregator/aggregator.go b/x/oracle/keeper/aggregator/aggregator.go deleted file mode 100644 index 1d21ceb00..000000000 --- a/x/oracle/keeper/aggregator/aggregator.go +++ /dev/null @@ -1,240 +0,0 @@ -package aggregator - -import ( - "math/big" - "sort" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" -) - -type priceWithTimeAndRound struct { - price string - decimal int32 - timestamp string - detRoundID string // roundId from source if exists -} - -type reportPrice struct { - validator string - // final price, set to -1 as initial - price string - // sourceId->priceWithTimeAndRound - prices map[uint64]*priceWithTimeAndRound - power *big.Int -} - -func (r *reportPrice) aggregate() string { - if len(r.price) > 0 { - return r.price - } - tmp := make([]*big.Int, 0, len(r.prices)) - for _, p := range r.prices { - priceInt, ok := new(big.Int).SetString(p.price, 10) - // price is not a number (NST), we will just return instead of calculation - if !ok { - return p.price - } - tmp = append(tmp, priceInt) - } - r.price = common.BigIntList(tmp).Median().String() - return r.price -} - -type aggregator struct { - finalPrice string - reports []*reportPrice - // total valiadtor power who has submitted price - reportPower *big.Int - totalPower *big.Int - // validator set total power - // totalPower string - // sourceId->roundId used to track the confirmed DS roundId - // updated by calculator, detId use string - dsPrices map[uint64]string -} - -func (agg *aggregator) copy4CheckTx() *aggregator { - ret := &aggregator{ - finalPrice: agg.finalPrice, - reportPower: copyBigInt(agg.reportPower), - totalPower: copyBigInt(agg.totalPower), - - reports: make([]*reportPrice, 0, len(agg.reports)), - dsPrices: make(map[uint64]string), - } - for k, v := range agg.dsPrices { - ret.dsPrices[k] = v - } - for _, report := range agg.reports { - rTmp := *report - rTmp.price = report.price - rTmp.power = copyBigInt(report.power) - - for k, v := range report.prices { - // prices are information submitted by validators, these data will not change under deterministic sources, but with non-deterministic sources they might be overwrite by later prices - tmpV := *v - tmpV.price = v.price - rTmp.prices[k] = &tmpV - } - - ret.reports = append(ret.reports, &rTmp) - } - - return ret -} - -// fill price from validator submitting into aggregator, and calculation the voting power and check with the consensus status of deterministic source value to decide when to do the aggregation -// TODO: currently apply mode=1 in V1, add swith modes -func (agg *aggregator) fillPrice(pSources []*types.PriceSource, validator string, power *big.Int) { - report := agg.getReport(validator) - if report == nil { - report = &reportPrice{ - validator: validator, - prices: make(map[uint64]*priceWithTimeAndRound), - power: power, - } - agg.reports = append(agg.reports, report) - agg.reportPower = new(big.Int).Add(agg.reportPower, power) - } - - for _, pSource := range pSources { - if len(pSource.Prices[0].DetID) == 0 { - // this is an NS price report, price will just be updated instead of append - if pTR := report.prices[pSource.SourceID]; pTR == nil { - pTmp := pSource.Prices[0] - pTR = &priceWithTimeAndRound{ - price: pTmp.Price, - decimal: pTmp.Decimal, - timestamp: pTmp.Timestamp, - } - report.prices[pSource.SourceID] = pTR - } else { - pTR.price = pSource.Prices[0].Price - } - } else { - // this is an DS price report - if pTR := report.prices[pSource.SourceID]; pTR == nil { - pTmp := pSource.Prices[0] - pTR = &priceWithTimeAndRound{ - decimal: pTmp.Decimal, - } - if len(agg.dsPrices[pSource.SourceID]) > 0 { - for _, reportTmp := range agg.reports { - if priceTmp := reportTmp.prices[pSource.SourceID]; priceTmp != nil && len(priceTmp.price) > 0 { - pTR.price = priceTmp.price - pTR.detRoundID = priceTmp.detRoundID - pTR.timestamp = priceTmp.timestamp - break - } - } - } - report.prices[pSource.SourceID] = pTR - } - // skip if this DS's slot exists, DS's value only updated by calculator - } - } -} - -// TODO: for v1 use mode=1, which means agg.dsPrices with each key only be updated once, switch modes -func (agg *aggregator) confirmDSPrice(confirmedRounds []*confirmedPrice) { - for _, priceSourceRound := range confirmedRounds { - // update the latest round-detId for DS, TODO: in v1 we only update this value once since calculator will just ignore any further value once a detId has reached consensus - // agg.dsPrices[priceSourceRound.sourceId] = priceSourceRound.detId - // this id's comparison need to format id to make sure them be the same length - if id := agg.dsPrices[priceSourceRound.sourceID]; len(id) == 0 || (len(id) > 0 && id < priceSourceRound.detID) { - agg.dsPrices[priceSourceRound.sourceID] = priceSourceRound.detID - for _, report := range agg.reports { - if len(report.price) > 0 { - // price of IVA has completed - continue - } - if price := report.prices[priceSourceRound.sourceID]; price != nil { - price.detRoundID = priceSourceRound.detID - price.timestamp = priceSourceRound.timestamp - price.price = priceSourceRound.price - } // else TODO: panic in V1 - } - } - } -} - -func (agg *aggregator) getReport(validator string) *reportPrice { - for _, r := range agg.reports { - if r.validator == validator { - return r - } - } - return nil -} - -func (agg *aggregator) aggregate() string { - if len(agg.finalPrice) > 0 { - return agg.finalPrice - } - // TODO: implemetn different MODE for definition of consensus, - // currently: use rule_1+MODE_1: {rule:specified source:`chainlink`, MODE: asap when power exceeds the threshold} - // 1. check OVA threshold - // 2. check IVA consensus with rule, TODO: for v1 we only implement with mode=1&rule=1 - if common.ExceedsThreshold(agg.reportPower, agg.totalPower) { - // TODO: this is kind of a mock way to suite V1, need update to check with params.rule - // check if IVA all reached consensus - if len(agg.dsPrices) > 0 { - validatorPrices := make([]*big.Int, 0, len(agg.reports)) - // do the aggregation to find out the 'final price' - for _, validatorReport := range agg.reports { - priceInt, ok := new(big.Int).SetString(validatorReport.aggregate(), 10) - if !ok { - // price is not number, we just return the price when power exceeds threshold - agg.finalPrice = validatorReport.aggregate() - return agg.finalPrice - } - validatorPrices = append(validatorPrices, priceInt) - } - // vTmp := bigIntList(validatorPrices) - agg.finalPrice = common.BigIntList(validatorPrices).Median().String() - // clear relative aggregator for this feeder, all the aggregator,calculator, filter can be removed since this round has been sealed - } - } - return agg.finalPrice -} - -// TODO: this only suites for DS. check source type for extension -// GetFinaPriceListForFeederIDs retrieve final price info as an array ordered by sourceID asc -func (agg *aggregator) getFinalPriceList(feederID uint64) []*types.AggFinalPrice { - sourceIDs := make([]uint64, 0, len(agg.dsPrices)) - for sID := range agg.dsPrices { - sourceIDs = append(sourceIDs, sID) - } - sort.Slice(sourceIDs, func(i, j int) bool { - return sourceIDs[i] < sourceIDs[j] - }) - ret := make([]*types.AggFinalPrice, 0, len(sourceIDs)) - for _, sID := range sourceIDs { - for _, report := range agg.reports { - price := report.prices[sID] - if price == nil || price.detRoundID != agg.dsPrices[sID] { - // the DetID mismatch should not happen - continue - } - ret = append(ret, &types.AggFinalPrice{ - FeederID: feederID, - SourceID: sID, - DetID: price.detRoundID, - Price: price.price, - }) - // {feederID, sourceID} has been found, skip rest reports - break - } - } - return ret -} - -func newAggregator(validatorSetLength int, totalPower *big.Int) *aggregator { - return &aggregator{ - reports: make([]*reportPrice, 0, validatorSetLength), - reportPower: big.NewInt(0), - dsPrices: make(map[uint64]string), - totalPower: totalPower, - } -} diff --git a/x/oracle/keeper/aggregator/aggregator_test.go b/x/oracle/keeper/aggregator/aggregator_test.go deleted file mode 100644 index 35adf0aa1..000000000 --- a/x/oracle/keeper/aggregator/aggregator_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package aggregator - -import ( - "math/big" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func TestAggregator(t *testing.T) { - Convey("fill prices into aggregator", t, func() { - a := newAggregator(5, big.NewInt(4)) - // a.fillPrice(pS1, "v1", one) //v1:{1, 2} - - Convey("fill v1's report", func() { - a.fillPrice(pS1, "v1", one) // v1:{1, 2} - report := a.getReport("v1") - So(report.prices[1].price, ShouldEqual, "") - Convey("fill v2's report", func() { - a.fillPrice(pS2, "v2", one) - report := a.getReport("v2") - So(report.prices[1].price, ShouldEqual, "") - Convey("fill more v1's report", func() { - a.fillPrice(pS21, "v1", one) - report := a.getReport("v1") - So(report.prices[1].price, ShouldEqual, "") - So(report.prices[2].price, ShouldEqual, "") - Convey("confirm deterministic source_1 and source 2", func() { - a.confirmDSPrice([]*confirmedPrice{ - { - sourceID: 1, - detID: "9", - price: "10", - timestamp: "-", - }, - { - sourceID: 2, - detID: "3", - price: "20", - timestamp: "-", - }, - }) - reportV1 := a.getReport("v1") - reportV2 := a.getReport("v2") - So(reportV1.prices[1].price, ShouldResemble, "10") - So(reportV1.prices[1].detRoundID, ShouldEqual, "9") - - So(reportV2.prices[1].price, ShouldResemble, "10") - So(reportV2.prices[1].detRoundID, ShouldEqual, "9") - - So(reportV1.prices[2].price, ShouldResemble, "20") - So(reportV1.prices[2].detRoundID, ShouldEqual, "3") - - // current implementation only support v1's single source - Convey("aggregate after all source confirmed", func() { - a.fillPrice(pS6, "v3", one) - a.aggregate() // v1:{s1:9-10, s2:3-20}:15, v2:{s1:9-10}:10 - So(a.getReport("v1").price, ShouldEqual, "15") - So(a.getReport("v2").price, ShouldEqual, "10") - So(a.getReport("v3").price, ShouldEqual, "20") - So(a.finalPrice, ShouldEqual, "15") - }) - }) - }) - }) - }) - }) -} diff --git a/x/oracle/keeper/aggregator/calculator.go b/x/oracle/keeper/aggregator/calculator.go deleted file mode 100644 index 5e1b8fd5c..000000000 --- a/x/oracle/keeper/aggregator/calculator.go +++ /dev/null @@ -1,197 +0,0 @@ -package aggregator - -import ( - "math/big" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" -) - -type confirmedPrice struct { - sourceID uint64 - detID string - price string - timestamp string -} - -// internal struct -type priceAndPower struct { - price string - power *big.Int -} - -// for a specific DS round, it could have multiple values provided by different validators(should not be true if there's no malicious validator) -type roundPrices struct { // 0 means NS - detID string - prices []*priceAndPower - price string - timestamp string - // confirmed bool -} - -// udpate priceAndPower for a specific DSRoundID, if the price exists, increase its power with provided data -// return confirmed=true, when detect power exceeds the threshold -func (r *roundPrices) updatePriceAndPower(pw *priceAndPower, totalPower *big.Int) (updated bool, confirmed bool) { - if len(r.price) > 0 { - confirmed = true - return - } - for _, item := range r.prices { - if item.price == pw.price { - item.power = new(big.Int).Add(item.power, pw.power) - updated = true - if common.ExceedsThreshold(item.power, totalPower) { - r.price = item.price - confirmed = true - } - return - } - } - if len(r.prices) < cap(r.prices) { - r.prices = append(r.prices, pw) - updated = true - if common.ExceedsThreshold(pw.power, totalPower) { - r.price = pw.price - confirmed = true - } - } - return -} - -// each DS corresponding a roundPriceList to represent its multiple rounds(DS round) in one oracle-round -type roundPricesList struct { - roundPricesList []*roundPrices - // each round can have at most roundPricesCount priceAndPower - roundPricesCount int -} - -func (r *roundPricesList) copy4CheckTx() *roundPricesList { - ret := &roundPricesList{ - roundPricesList: make([]*roundPrices, 0, len(r.roundPricesList)), - roundPricesCount: r.roundPricesCount, - } - - for _, v := range r.roundPricesList { - tmpRP := &roundPrices{ - detID: v.detID, - price: v.price, - prices: make([]*priceAndPower, 0, len(v.prices)), - timestamp: v.timestamp, - } - for _, pNP := range v.prices { - tmpPNP := *pNP - // power will be modified during execution - tmpPNP.power = copyBigInt(pNP.power) - tmpRP.prices = append(tmpRP.prices, &tmpPNP) - } - - ret.roundPricesList = append(ret.roundPricesList, tmpRP) - } - return ret -} - -// to tell if any round of this DS has reached consensus/confirmed -func (r *roundPricesList) hasConfirmedDetID() bool { - for _, round := range r.roundPricesList { - if len(round.price) > 0 { - return true - } - } - return false -} - -// get the roundPriceList correspond to specifid detID of a DS -// if no required data and the pricesList has not reach its limitation, we will add a new slot for this detId -func (r *roundPricesList) getOrNewRound(detID string, timestamp string) (round *roundPrices) { - for _, round = range r.roundPricesList { - if round.detID == detID { - if len(round.price) > 0 { - round = nil - } - return - } - } - - if len(r.roundPricesList) < cap(r.roundPricesList) { - round = &roundPrices{ - detID: detID, - prices: make([]*priceAndPower, 0, r.roundPricesCount), - timestamp: timestamp, - } - r.roundPricesList = append(r.roundPricesList, round) - return - } - return -} - -// calculator used to get consensus on deterministic source based data from validator set reports of price -type calculator struct { - // sourceId->{[]{roundId, []{price,power}, confirmed}}, confirmed value will be set in [0] - deterministicSource map[uint64]*roundPricesList - validatorLength int - totalPower *big.Int -} - -func (c *calculator) copy4CheckTx() *calculator { - ret := newCalculator(c.validatorLength, c.totalPower) - - // copy deterministicSource - for k, v := range c.deterministicSource { - ret.deterministicSource[k] = v.copy4CheckTx() - } - - return ret -} - -func (c *calculator) newRoundPricesList() *roundPricesList { - return &roundPricesList{ - roundPricesList: make([]*roundPrices, 0, int(common.MaxDetID)*c.validatorLength), - // for each DS-roundId, the count of prices provided is the number of validators at most - roundPricesCount: c.validatorLength, - } -} - -func (c *calculator) getOrNewSourceID(sourceID uint64) *roundPricesList { - rounds := c.deterministicSource[sourceID] - if rounds == nil { - rounds = c.newRoundPricesList() - c.deterministicSource[sourceID] = rounds - } - return rounds -} - -// fillPrice called upon new MsgCreatPrice arrived, to trigger the calculation to get to consensus on the same roundID_of_deterministic_source -// v1 use mode1, TODO: switch modes -func (c *calculator) fillPrice(pSources []*types.PriceSource, _ string, power *big.Int) (confirmedRounds []*confirmedPrice) { - for _, pSource := range pSources { - rounds := c.getOrNewSourceID(pSource.SourceID) - if rounds.hasConfirmedDetID() { - // TODO: this skip is just for V1 to do fast calculation and release EndBlocker pressure, may lead to 'not latest detId' be chosen - break - } - for _, pDetID := range pSource.Prices { - round := rounds.getOrNewRound(pDetID.DetID, pDetID.Timestamp) - if round == nil { - // this sourceId has reach the limitation of different detId, or has confirmed - continue - } - - updated, confirmed := round.updatePriceAndPower(&priceAndPower{pDetID.Price, power}, c.totalPower) - if updated && confirmed { - // sourceId, detId, price - confirmedRounds = append(confirmedRounds, &confirmedPrice{pSource.SourceID, round.detID, round.price, round.timestamp}) // TODO: just in v1 with mode==1, we use asap, so we just ignore any further data from this DS, even higher detId may get to consensus, in this way, in most case, we can complete the calculation in the transaction execution process. Release the pressure in EndBlocker - // TODO: this may delay to current block finish - break - } - } - } - return -} - -func newCalculator(validatorSetLength int, totalPower *big.Int) *calculator { - return &calculator{ - deterministicSource: make(map[uint64]*roundPricesList), - validatorLength: validatorSetLength, - totalPower: totalPower, - } -} diff --git a/x/oracle/keeper/aggregator/calculator_test.go b/x/oracle/keeper/aggregator/calculator_test.go deleted file mode 100644 index e5b85a46e..000000000 --- a/x/oracle/keeper/aggregator/calculator_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package aggregator - -import ( - "math/big" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -/* - 1-10, 2-12, 3-15 - -ps1: 1-10, 2-12 -ps2: 2-12, 3-15 -ps3: 1-10, 2-11(m) ---- -ps4: 2-12, 3-19(m) -ps5: 1-10, 3-19(m) ----- -ps1, ps2, ps3, ps4 ---> 2-12 -ps2, ps2, ps3, ps5 ---> 1-10 -*/ -func TestCalculator(t *testing.T) { - one := big.NewInt(1) - Convey("fill prices into calculator", t, func() { - c := newCalculator(5, big.NewInt(4)) - Convey("fill prices from single deterministic source", func() { - c.fillPrice(pS1, "v1", one) // 1-10, 2-12 - c.fillPrice(pS2, "v2", one) // 2-12, 3-15 - c.fillPrice(pS3, "v3", one) // 1-10, 2-11 - Convey("consensus on detid=2 and price=12", func() { - confirmed := c.fillPrice(pS4, "v4", one) // 2-12, 3-19 - So(confirmed[0].detID, ShouldEqual, "2") - So(confirmed[0].price, ShouldResemble, "12") - }) - Convey("consensus on detid=1 and price=10", func() { - confirmed := c.fillPrice(pS5, "v5", one) // 1-10, 3-19 - So(confirmed[0].detID, ShouldEqual, "1") - So(confirmed[0].price, ShouldResemble, "10") - - confirmed = c.fillPrice(pS4, "v4", one) - So(confirmed, ShouldBeNil) - }) - }) - Convey("fill prices from multiple deterministic sources", func() { - c.fillPrice(pS21, "v1", one) - c.fillPrice(pS22, "v2", one) - c.fillPrice(pS23, "v3", one) - Convey("consensus on both source 1 and source 2", func() { - confirmed := c.fillPrice(pS24, "v4", one) - So(len(confirmed), ShouldEqual, 2) - i := 0 - if confirmed[0].sourceID == 2 { - i = 1 - } - So(confirmed[i].detID, ShouldEqual, "2") - So(confirmed[i].price, ShouldResemble, "12") - - So(confirmed[1-i].detID, ShouldEqual, "3") - So(confirmed[1-i].price, ShouldResemble, "15") - }) - Convey("consenus on source 1 only", func() { - confirmed := c.fillPrice(pS25, "v5", one) - So(len(confirmed), ShouldEqual, 1) - So(confirmed[0].detID, ShouldEqual, "1") - So(confirmed[0].price, ShouldResemble, "10") - }) - }) - }) -} diff --git a/x/oracle/keeper/aggregator/context.go b/x/oracle/keeper/aggregator/context.go deleted file mode 100644 index ffa3cedda..000000000 --- a/x/oracle/keeper/aggregator/context.go +++ /dev/null @@ -1,450 +0,0 @@ -package aggregator - -import ( - "errors" - "fmt" - "math/big" - "sort" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type PriceItemKV struct { - TokenID uint64 - PriceTR types.PriceTimeRound -} - -type roundInfo struct { - // this round of price will start from block basedBlock+1, the basedBlock served as a trigger to notify validators to submit prices - basedBlock uint64 - // next round id of the price oracle service, price with the id will be record on block basedBlock+1 if all prices submitted by validators(for v1, validators serve as oracle nodes) get to consensus immediately - nextRoundID uint64 - // indicate if this round is open for collecting prices or closed in either condition that success with a consensused price or not - // 1: open, 2: closed - status roundStatus -} - -// roundStatus is an enum type to indicate the status of a roundInfo -type roundStatus int32 - -const ( - // roundStatusOpen indicates the round is open for collecting prices - roundStatusOpen roundStatus = iota + 1 - // roundStatusClosed indicates the round is closed, either success with a consensused price or not - roundStatusClosed -) - -// AggregatorContext keeps memory cache for state params, validatorset, and updatedthese values as they updated on chain. And it keeps the information to track all tokenFeeders' status and data collection -// nolint -type AggregatorContext struct { - params *types.Params - - // validator->power - validatorsPower map[string]*big.Int - totalPower *big.Int - - // each active feederToken has a roundInfo - rounds map[uint64]*roundInfo - - // each roundInfo has a worker - aggregators map[uint64]*worker -} - -func (agc *AggregatorContext) Copy4CheckTx() *AggregatorContext { - ret := &AggregatorContext{ - // params, validatorsPower, totalPower, these values won't change during block executing - params: agc.params, - validatorsPower: agc.validatorsPower, - totalPower: agc.totalPower, - - rounds: make(map[uint64]*roundInfo), - aggregators: make(map[uint64]*worker), - } - - for k, v := range agc.rounds { - vTmp := *v - ret.rounds[k] = &vTmp - } - - for k, v := range agc.aggregators { - w := newWorker(k, ret) - w.sealed = v.sealed - w.price = v.price - - w.f = v.f.copy4CheckTx() - w.c = v.c.copy4CheckTx() - w.a = v.a.copy4CheckTx() - } - - return ret -} - -// sanity check for the msgCreatePrice -func (agc *AggregatorContext) sanityCheck(msg *types.MsgCreatePrice) error { - // sanity check - // TODO: check the msgCreatePrice's Decimal is correct with params setting - // TODO: check len(price.prices)>0, len(price.prices._range_eachPriceSource.Prices)>0, at least has one source, and for each source has at least one price - - if accAddress, err := sdk.AccAddressFromBech32(msg.Creator); err != nil { - return errors.New("invalid address") - } else if _, ok := agc.validatorsPower[sdk.ConsAddress(accAddress).String()]; !ok { - return errors.New("signer is not validator") - } - - if len(msg.Prices) == 0 { - return errors.New("msg should provide at least one price") - } - - for _, pSource := range msg.Prices { - if len(pSource.Prices) == 0 || len(pSource.Prices) > int(common.MaxDetID) || !agc.params.IsValidSource(pSource.SourceID) { - return errors.New("source should be valid and provide at least one price") - } - // check with params is coressponding source is deteministic - if agc.params.IsDeterministicSource(pSource.SourceID) { - for _, pDetID := range pSource.Prices { - // TODO: verify the format of DetId is correct, since this is string, and we will make consensus with validator's power, so it's ok not to verify the format - // just make sure the DetId won't mess up with NS's placeholder id, the limitation of maximum count one validator can submit will be check by filter - if len(pDetID.DetID) == 0 { - // deterministic must have specified deterministicId - return errors.New("ds should have roundid") - } - // DS's price value will go through consensus process, so it's safe to skip the check here - } - // sanity check: NS submit only one price with detId=="" - } else if len(pSource.Prices) > 1 || len(pSource.Prices[0].DetID) > 0 { - return errors.New("ns should not have roundid") - } - } - return nil -} - -func (agc *AggregatorContext) checkMsg(msg *types.MsgCreatePrice, isCheckMode bool) error { - if err := agc.sanityCheck(msg); err != nil { - return err - } - - // check feeder is active - feederContext := agc.rounds[msg.FeederID] - if feederContext == nil { - return fmt.Errorf("context not exist for feederID:%d", msg.FeederID) - } - // This round had been sealed but current window not closed - if feederContext.status != roundStatusOpen { - if feederWorker := agc.aggregators[msg.FeederID]; feederWorker != nil { - if _, list4Aggregator := feederWorker.filtrate(msg); list4Aggregator != nil { - // record this message for performance evaluation(used for slashing) - feederWorker.recordMessage(msg.Creator, msg.FeederID, list4Aggregator) - } - } - // if the validator send a tx inside an alive window but the status had been changed to closed by enough power collected - // we should ignore the error for simulation to complete - if !isCheckMode { - return fmt.Errorf("context is not available for feederID:%d", msg.FeederID) - } - } - - // senity check on basedBlock - if msg.BasedBlock != feederContext.basedBlock { - return errors.New("baseblock not match") - } - - // check sources rule matches - if ok, err := agc.params.CheckRules(msg.FeederID, msg.Prices); !ok { - return err - } - - for _, pSource := range msg.Prices { - for _, pTimeDetID := range pSource.Prices { - if ok := agc.params.CheckDecimal(msg.FeederID, pTimeDetID.Decimal); !ok { - return fmt.Errorf("decimal not match for source ID %d and price ID %s", pSource.SourceID, pTimeDetID.DetID) - } - } - } - return nil -} - -func (agc *AggregatorContext) FillPrice(msg *types.MsgCreatePrice) (*PriceItemKV, *cache.ItemM, error) { - feederWorker := agc.aggregators[msg.FeederID] - // worker initialzed here reduce workload for Endblocker - if feederWorker == nil { - feederWorker = newWorker(msg.FeederID, agc) - agc.aggregators[msg.FeederID] = feederWorker - } - - if feederWorker.sealed { - if _, list4Aggregator := feederWorker.filtrate(msg); list4Aggregator != nil { - // record this message for performance evaluation(used for slashing) - feederWorker.recordMessage(msg.Creator, msg.FeederID, list4Aggregator) - } - return nil, nil, types.ErrPriceProposalIgnored.Wrap("price aggregation for this round has sealed") - } - - if listFilled := feederWorker.do(msg); listFilled != nil { - feederWorker.recordMessage(msg.Creator, msg.FeederID, listFilled) - if finalPrice := feederWorker.aggregate(); len(finalPrice) > 0 { - agc.rounds[msg.FeederID].status = roundStatusClosed - feederWorker.seal() - return &PriceItemKV{agc.params.GetTokenFeeder(msg.FeederID).TokenID, types.PriceTimeRound{ - Price: finalPrice, - Decimal: agc.params.GetTokenInfo(msg.FeederID).Decimal, - // TODO: check the format - Timestamp: msg.Prices[0].Prices[0].Timestamp, - RoundID: agc.rounds[msg.FeederID].nextRoundID, - }}, &cache.ItemM{FeederID: msg.FeederID}, nil - } - return nil, &cache.ItemM{FeederID: msg.FeederID, PSources: listFilled, Validator: msg.Creator}, nil - } - - // return nil, nil, errors.New("no valid price proposal to add for aggregation") - return nil, nil, types.ErrPriceProposalIgnored -} - -// NewCreatePrice receives msgCreatePrice message, and goes process: filter->aggregator, filter->calculator->aggregator -// non-deterministic data will goes directly into aggregator, and deterministic data will goes into calculator first to get consensus on the deterministic id. -func (agc *AggregatorContext) NewCreatePrice(ctx sdk.Context, msg *types.MsgCreatePrice) (*PriceItemKV, *cache.ItemM, error) { - if err := agc.checkMsg(msg, ctx.IsCheckTx()); err != nil { - return nil, nil, types.ErrInvalidMsg.Wrap(err.Error()) - } - return agc.FillPrice(msg) -} - -// prepare for new roundInfo, just update the status kept in memory -// executed at EndBlock stage, seall all success or expired roundInfo -// including possible aggregation and state update -// when validatorSet update, set force to true, to seal all alive round -// returns: 1st successful sealed, need to be written to KVStore, 2nd: failed sealed tokenID, use previous price to write to KVStore -func (agc *AggregatorContext) SealRound(ctx sdk.Context, force bool) (success []*PriceItemKV, failed []uint64, sealed []uint64, windowClosed []uint64) { - logger := ctx.Logger() - feederIDs := make([]uint64, 0, len(agc.rounds)) - for fID := range agc.rounds { - feederIDs = append(feederIDs, fID) - } - sort.Slice(feederIDs, func(i, j int) bool { - return feederIDs[i] < feederIDs[j] - }) - height := uint64(ctx.BlockHeight()) - // make sure feederIDs are accessed in order to calculate the indexOffset for slashing - windowClosedMap := make(map[uint64]bool) - for _, feederID := range feederIDs { - if agc.windowEnd(feederID, height) { - windowClosed = append(windowClosed, feederID) - windowClosedMap[feederID] = true - } - round := agc.rounds[feederID] - if round.status == roundStatusOpen { - feeder := agc.params.GetTokenFeeder(feederID) - // TODO: for mode=1, we don't do aggregate() here, since if it donesn't success in the transaction execution stage, it won't success here - // but it's not always the same for other modes, switch modes - switch common.Mode { - case types.ConsensusModeASAP: - offset := height - round.basedBlock - expired := feeder.EndBlock > 0 && height >= feeder.EndBlock - outOfWindow := offset >= uint64(common.MaxNonce) - - // an open round reach its end of window, increase offsetIndex for active valdiator and chech the performance(missing/malicious) - - if expired || outOfWindow || force { - failed = append(failed, feeder.TokenID) - if !expired { - logger.Debug("set round status from open to closed", "feederID", feederID, "force", force, "block", height) - round.status = roundStatusClosed - } - // TODO: optimize operformance - sealed = append(sealed, feederID) - if !windowClosedMap[feederID] { - logger.Debug("remove aggregators(workers) force/expired", "feederID", feederID) - agc.RemoveWorker(feederID) - } - } - default: - logger.Info("mode other than 1 is not support now") - } - } - // all status: 1->2, remove its aggregator - if agc.aggregators[feederID] != nil && agc.aggregators[feederID].sealed { - sealed = append(sealed, feederID) - } - } - return success, failed, sealed, windowClosed -} - -// PrepareEndBlock is called at EndBlock stage, to prepare the roundInfo for the next block(of input block) -func (agc *AggregatorContext) PrepareRoundEndBlock(ctx sdk.Context, block int64, forceSealHeight uint64) (newRoundFeederIDs []uint64) { - if block < 1 { - return newRoundFeederIDs - } - logger := ctx.Logger() - blockUint64 := uint64(block) - - for feederID, feeder := range agc.params.GetTokenFeeders() { - if feederID == 0 { - continue - } - if (feeder.EndBlock > 0 && feeder.EndBlock <= blockUint64) || feeder.StartBaseBlock > blockUint64 { - // this feeder is inactive - continue - } - - delta := blockUint64 - feeder.StartBaseBlock - left := delta % feeder.Interval - count := delta / feeder.Interval - latestBasedblock := blockUint64 - left - latestNextRoundID := feeder.StartRoundID + count - - logger.Info("PrepareRoundEndBlock", "feederID", feederID, "block", block, "latestBasedblock", latestBasedblock, "forceSealHeight", forceSealHeight, "position_in_round", left) - - feederIDUint64 := uint64(feederID) - round := agc.rounds[feederIDUint64] - if round == nil { - logger.Info("PrepareRoundEndBlock: initialize round info") - round = &roundInfo{ - basedBlock: latestBasedblock, - nextRoundID: latestNextRoundID, - } - if left >= uint64(common.MaxNonce) { - // since do sealround properly before prepareRound, this only possible happens in node restart, and nonce has been taken care of in kvStore - round.status = roundStatusClosed - logger.Info("PrepareRoundEndBlock: status_closed") - } else { - round.status = roundStatusOpen - logger.Info("PrepareRoundEndBlock: status_open") - if latestBasedblock < forceSealHeight { - // debug - logger.Debug("PrepareRoundEndBlock: status_closed due to forceseal") - round.status = roundStatusClosed - } - if left == 0 { - logger.Info("PrepareRoundEndBlock: add a new round") - // set nonce for corresponding feederID for new roud start - newRoundFeederIDs = append(newRoundFeederIDs, feederIDUint64) - } - } - agc.rounds[feederIDUint64] = round - } else { - // prepare a new round for exist roundInfo - if left == 0 { - logger.Info("PrepareRoundEndBlock: set existing round status to open") - round.basedBlock = latestBasedblock - round.nextRoundID = latestNextRoundID - round.status = roundStatusOpen - // set nonce for corresponding feederID for new roud start - newRoundFeederIDs = append(newRoundFeederIDs, feederIDUint64) - // drop previous worker - agc.RemoveWorker(feederIDUint64) - } else if round.status == roundStatusOpen && left >= uint64(common.MaxNonce) { - logger.Info("PrepareRoundEndBlock: set existing round status to closed") - // this shouldn't happen, if do sealround properly before prepareRound, basically for test only - // TODO: print error log here - round.status = roundStatusClosed - // TODO: just modify the status here, since sealRound should do all the related seal actions already when parepare invoked - } - } - } - return newRoundFeederIDs -} - -// SetParams sets the params field of aggregatorContext“ -func (agc *AggregatorContext) SetParams(p *types.Params) { - agc.params = p -} - -// SetValidatorPowers sets the map of validator's power for aggreagtorContext -func (agc *AggregatorContext) SetValidatorPowers(vp map[string]*big.Int) { - // t := big.NewInt(0) - agc.totalPower = big.NewInt(0) - agc.validatorsPower = make(map[string]*big.Int) - for addr, power := range vp { - agc.validatorsPower[addr] = power - agc.totalPower = new(big.Int).Add(agc.totalPower, power) - } -} - -// GetValidatorPowers returns the map of validator's power stored in aggregatorContext -func (agc *AggregatorContext) GetValidatorPowers() (vp map[string]*big.Int) { - return agc.validatorsPower -} - -func (agc *AggregatorContext) GetValidators() (validators []string) { - for k := range agc.validatorsPower { - validators = append(validators, k) - } - return -} - -// GetTokenIDFromAssetID returns tokenID for corresponding tokenID, it returns 0 if agc.params is nil or assetID not found in agc.params -func (agc *AggregatorContext) GetTokenIDFromAssetID(assetID string) int { - if agc.params == nil { - return 0 - } - return agc.params.GetTokenIDFromAssetID(assetID) -} - -// GetParams returns the params field of aggregatorContext -func (agc *AggregatorContext) GetParams() types.Params { - return *agc.params -} - -func (agc *AggregatorContext) GetParamsMaxSizePrices() uint64 { - return uint64(agc.params.MaxSizePrices) -} - -// GetFinalPriceListForFeederIDs get final price list for required feederIDs in format []{feederID, sourceID, detID, price} with asc of {feederID, sourceID} -// feederIDs is required to be ordered asc -func (agc *AggregatorContext) GetFinalPriceListForFeederIDs(feederIDs []uint64) []*types.AggFinalPrice { - ret := make([]*types.AggFinalPrice, 0, len(feederIDs)) - for _, feederID := range feederIDs { - feederWorker := agc.aggregators[feederID] - if feederWorker != nil { - if pList := feederWorker.getFinalPriceList(feederID); len(pList) > 0 { - ret = append(ret, pList...) - } - } - } - return ret -} - -// PerformanceReview compare results to decide whether the validator is effective, honest -func (agc *AggregatorContext) PerformanceReview(ctx sdk.Context, finalPrice *types.AggFinalPrice, validator string) (exist, matched bool) { - feederWorker := agc.aggregators[finalPrice.FeederID] - if feederWorker == nil { - // Log unexpected nil feederWorker for debugging - ctx.Logger().Error( - "unexpected nil feederWorker in PerformanceReview", - "feederID", finalPrice.FeederID, - "validator", validator, - ) - // Treat validator as effective & honest to avoid unfair penalties - exist = true - matched = true - return - } - exist, matched = feederWorker.check(validator, finalPrice.FeederID, finalPrice.SourceID, finalPrice.Price, finalPrice.DetID) - return -} - -func (agc AggregatorContext) windowEnd(feederID, height uint64) bool { - feeder := agc.params.TokenFeeders[feederID] - if (feeder.EndBlock > 0 && feeder.EndBlock <= height) || feeder.StartBaseBlock > height { - return false - } - delta := height - feeder.StartBaseBlock - left := delta % feeder.Interval - return left == uint64(common.MaxNonce) -} - -func (agc *AggregatorContext) RemoveWorker(feederID uint64) { - delete(agc.aggregators, feederID) -} - -// NewAggregatorContext returns a new instance of AggregatorContext -func NewAggregatorContext() *AggregatorContext { - return &AggregatorContext{ - validatorsPower: make(map[string]*big.Int), - totalPower: big.NewInt(0), - rounds: make(map[uint64]*roundInfo), - aggregators: make(map[uint64]*worker), - } -} diff --git a/x/oracle/keeper/aggregator/context_test.go b/x/oracle/keeper/aggregator/context_test.go deleted file mode 100644 index ef97c5b73..000000000 --- a/x/oracle/keeper/aggregator/context_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package aggregator - -import ( - "math/big" - - . "github.com/agiledragon/gomonkey/v2" - sdk "github.com/cosmos/cosmos-sdk/types" - // . "github.com/smartystreets/goconvey/convey" -) - -// func TestAggregatorContext(t *testing.T) { -// Convey("init aggregatorContext with default params", t, func() { -// agc := initAggregatorContext4Test() -// var ctx sdk.Context -// Convey("prepare round to gengerate round info of feeders for next block", func() { -// Convey("pepare within the window", func() { -// p := patchBlockHeight(12) -// agc.PrepareRoundEndBlock(ctx, 11, 0) -// -// Convey("for empty round list", func() { -// So(*agc.rounds[1], ShouldResemble, roundInfo{10, 2, 1}) -// }) -// -// Convey("update already exist round info", func() { -// p.Reset() -// time.Sleep(1 * time.Second) -// patchBlockHeight(10 + int64(common.MaxNonce) + 1) -// -// agc.PrepareRoundEndBlock(ctx, 10+int64(common.MaxNonce), 0) -// So(agc.rounds[1].status, ShouldEqual, 2) -// }) -// p.Reset() -// time.Sleep(1 * time.Second) -// }) -// Convey("pepare outside the window", func() { -// Convey("for empty round list", func() { -// p := patchBlockHeight(10 + int64(common.MaxNonce) + 1) -// agc.PrepareRoundEndBlock(ctx, 10+int64(common.MaxNonce), 0) -// So(agc.rounds[1].status, ShouldEqual, 2) -// p.Reset() -// time.Sleep(1 * time.Second) -// }) -// }) -// }) -// -// Convey("seal existing round without any msg recieved", func() { -// p := patchBlockHeight(11) -// agc.PrepareRoundEndBlock(ctx, 10, 0) -// Convey("seal when exceed the window", func() { -// So(agc.rounds[1].status, ShouldEqual, 1) -// p.Reset() -// time.Sleep(1 * time.Second) -// patchBlockHeight(13) -// agc.SealRound(ctx, false) -// So(agc.rounds[1].status, ShouldEqual, 2) -// }) -// -// Convey("force seal by required", func() { -// p.Reset() -// time.Sleep(1 * time.Second) -// patchBlockHeight(12) -// agc.SealRound(ctx, false) -// So(agc.rounds[1].status, ShouldEqual, 1) -// agc.SealRound(ctx, true) -// So(agc.rounds[1].status, ShouldEqual, 2) -// }) -// p.Reset() -// time.Sleep(1 * time.Second) -// }) -// }) -// } - -func initAggregatorContext4Test() *AggregatorContext { - agc := NewAggregatorContext() - - validatorPowers := map[string]*big.Int{ - "v1": big.NewInt(1), - "v2": big.NewInt(1), - "v3": big.NewInt(1), - } - - p := defaultParams - - agc.SetValidatorPowers(validatorPowers) - agc.SetParams(&p) - return agc -} - -func patchBlockHeight(h int64) *Patches { - return ApplyMethod(sdk.Context{}, "BlockHeight", func(sdk.Context) int64 { - return h - }) -} diff --git a/x/oracle/keeper/aggregator/filter.go b/x/oracle/keeper/aggregator/filter.go deleted file mode 100644 index a179373a3..000000000 --- a/x/oracle/keeper/aggregator/filter.go +++ /dev/null @@ -1,102 +0,0 @@ -package aggregator - -import ( - "strconv" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" -) - -type filter struct { - maxNonce int - maxDetID int - // nonce start from 1 - validatorNonce map[string]*common.Set[int32] - // validator_sourceId -> roundID, NS use 0 - validatorSource map[string]*common.Set[string] -} - -func newFilter(maxNonce, maxDetID int) *filter { - return &filter{ - maxNonce: maxNonce, - maxDetID: maxDetID, - validatorNonce: make(map[string]*common.Set[int32]), - validatorSource: make(map[string]*common.Set[string]), - } -} - -func (f *filter) copy4CheckTx() *filter { - ret := *f - ret.validatorNonce = make(map[string]*common.Set[int32], len(f.validatorNonce)) - ret.validatorSource = make(map[string]*common.Set[string], len(f.validatorSource)) - - for k, v := range f.validatorNonce { - ret.validatorNonce[k] = v.Copy() - } - - for k, v := range f.validatorSource { - ret.validatorSource[k] = v.Copy() - } - - return &ret -} - -func (f *filter) newVNSet() *common.Set[int32] { - return common.NewSet[int32](f.maxNonce) -} - -func (f *filter) newVSSet() *common.Set[string] { - return common.NewSet[string](f.maxDetID) -} - -// add priceWithSource into calculator list and aggregator list depends on the source type(deterministic/non-deterministic) -func (f *filter) addPSource(pSources []*types.PriceSource, validator string) (list4Calculator []*types.PriceSource, list4Aggregator []*types.PriceSource) { - for _, pSource := range pSources { - // check conflicts or duplicate data for the same roundID within the same source - if len(pSource.Prices[0].DetID) > 0 { - // #nosec G115 - k := validator + strconv.Itoa(int(pSource.SourceID)) - detIDs := f.validatorSource[k] - if detIDs == nil { - detIDs = f.newVSSet() - f.validatorSource[k] = detIDs - } - - pSourceTmp := &types.PriceSource{ - SourceID: pSource.SourceID, - Prices: make([]*types.PriceTimeDetID, 0, len(pSource.Prices)), - Desc: pSource.Desc, - } - - for _, pDetID := range pSource.Prices { - if ok := detIDs.Add(pDetID.DetID); ok { - // deterministic id has not seen in filter and limitation of ids this souce has not reached - pSourceTmp.Prices = append(pSourceTmp.Prices, pDetID) - } - } - if len(pSourceTmp.Prices) > 0 { - list4Calculator = append(list4Calculator, pSourceTmp) - list4Aggregator = append(list4Aggregator, pSourceTmp) - } - } else { - // add non-deterministic pSource value into aggregator list - list4Aggregator = append(list4Aggregator, pSource) - } - } - return list4Calculator, list4Aggregator -} - -// filtrate checks data from MsgCreatePrice, and will drop the conflict or duplicate data, it will then fill data into calculator(for deterministic source data to get to consensus) and aggregator (for both deterministic and non0-deterministic source data run 2-layers aggregation to get the final price) -func (f *filter) filtrate(price *types.MsgCreatePrice) (list4Calculator []*types.PriceSource, list4Aggregator []*types.PriceSource) { - validator := price.Creator - nonces := f.validatorNonce[validator] - if nonces == nil { - nonces = f.newVNSet() - f.validatorNonce[validator] = nonces - } - - if ok := nonces.Add(price.Nonce); ok { - list4Calculator, list4Aggregator = f.addPSource(price.Prices, validator) - } - return -} diff --git a/x/oracle/keeper/aggregator/filter_test.go b/x/oracle/keeper/aggregator/filter_test.go deleted file mode 100644 index 085251f9b..000000000 --- a/x/oracle/keeper/aggregator/filter_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package aggregator - -import ( - "testing" - - "github.com/ExocoreNetwork/exocore/x/oracle/types" - . "github.com/smartystreets/goconvey/convey" -) - -func TestFilter(t *testing.T) { - Convey("test aggregator_filter", t, func() { - f := newFilter(3, 5) - ptd1 := newPTD("1", "600000") - ptd2 := newPTD("2", "600050") - ptd3 := newPTD("3", "600070") - ptd4 := newPTD("4", "601000") - ptd5 := newPTD("5", "602000") - ptd6 := newPTD("6", "603000") - - ps1 := &types.PriceSource{ - SourceID: 1, - Prices: []*types.PriceTimeDetID{ - ptd1, - ptd2, - }, - } - - ps := []*types.PriceSource{ps1} - msg := &types.MsgCreatePrice{ - Creator: "v1", - FeederID: 1, - Prices: ps, - BasedBlock: 10, - Nonce: 1, - } - l4c, l4a := f.filtrate(msg) - - Convey("add first valid msg", func() { - So(l4c, ShouldResemble, ps) - So(l4a, ShouldResemble, ps) - }) - - Convey("add duplicate nonce msg", func() { - ps1.Prices[0] = ptd3 - l4c, l4a = f.filtrate(msg) - So(l4c, ShouldBeNil) - So(l4a, ShouldBeNil) - }) - - Convey("add duplicate detId", func() { - msg.Nonce = 2 - l4c, l4a = f.filtrate(msg) - Convey("add with new nonce", func() { - So(l4c, ShouldBeNil) - So(l4a, ShouldBeNil) - }) - Convey("update with new detId but use duplicate nonce", func() { - msg.Nonce = 2 - ps1.Prices[0] = ptd3 - l4c, l4a := f.filtrate(msg) - So(l4c, ShouldBeNil) - So(l4a, ShouldBeNil) - }) - }) - - Convey("add new detId with new nonce", func() { - msg.Nonce = 2 - ps1.Prices[0] = ptd3 - l4c, l4a = f.filtrate(msg) - ps1.Prices = ps1.Prices[:1] - ps1.Prices[0] = ptd3 - psReturn := []*types.PriceSource{ps1} - So(l4c, ShouldResemble, psReturn) - So(l4a, ShouldResemble, psReturn) - }) - - Convey("add too many nonce", func() { - msg.Nonce = 2 - ps1.Prices[0] = ptd3 - f.filtrate(msg) - - msg.Nonce = 3 - ps1.Prices[0] = ptd4 - l4c, _ = f.filtrate(msg) - So(l4c[0].Prices, ShouldContain, ptd4) - - msg.Nonce = 4 - ps1.Prices[0] = ptd5 - l4c, _ = f.filtrate(msg) - So(l4c, ShouldBeNil) - }) - - Convey("add too many DetIds", func() { - msg.Nonce = 2 - ps1.Prices = []*types.PriceTimeDetID{ptd3, ptd4, ptd5, ptd6} - l4c, l4a = f.filtrate(msg) - So(l4c, ShouldResemble, l4a) - So(l4c[0].Prices, ShouldContain, ptd3) - So(l4c[0].Prices, ShouldContain, ptd4) - So(l4c[0].Prices, ShouldContain, ptd5) - So(l4c[0].Prices, ShouldNotContain, ptd6) - }) - }) -} diff --git a/x/oracle/keeper/aggregator/helper_test.go b/x/oracle/keeper/aggregator/helper_test.go deleted file mode 100644 index f993c6b8e..000000000 --- a/x/oracle/keeper/aggregator/helper_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package aggregator - -import "github.com/ExocoreNetwork/exocore/x/oracle/types" - -func newPTD(detID, price string) *types.PriceTimeDetID { - return &types.PriceTimeDetID{ - Price: price, - Decimal: 1, - Timestamp: "-", - DetID: detID, - } -} - -func newPS(sourceID uint64, prices ...*types.PriceTimeDetID) *types.PriceSource { - return &types.PriceSource{ - SourceID: sourceID, - Prices: prices, - } -} diff --git a/x/oracle/keeper/aggregator/info_test.go b/x/oracle/keeper/aggregator/info_test.go deleted file mode 100644 index dbab8f01c..000000000 --- a/x/oracle/keeper/aggregator/info_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package aggregator - -import ( - "math/big" - - "github.com/ExocoreNetwork/exocore/x/oracle/types" -) - -var ( - one = big.NewInt(1) - zero = big.NewInt(0) - ten = big.NewInt(10) - eleven = big.NewInt(11) - fifteen = big.NewInt(15) - twenty = big.NewInt(20) -) - -var ( - pTD1 = newPTD("1", "10") - pTD2 = newPTD("2", "12") - pTD3 = newPTD("3", "15") - pTD2M = newPTD("2", "11") - pTD3M = newPTD("3", "19") - // 1-10, 2-12 - pS1 = []*types.PriceSource{newPS(1, pTD1, pTD2)} - // 2-12, 3-1 - pS2 = []*types.PriceSource{newPS(1, pTD3, pTD2)} - // 1-10, 2-11(m) - pS3 = []*types.PriceSource{newPS(1, pTD1, pTD2M)} - // 2-12, 3-19(m) - pS4 = []*types.PriceSource{newPS(1, pTD2, pTD3M)} - // 1-10, 3-19(m) - pS5 = []*types.PriceSource{newPS(1, pTD1, pTD3M)} - - pS6 = []*types.PriceSource{newPS(2, pTD1)} - - // 1-10, 2-12 - pS21 = []*types.PriceSource{newPS(1, pTD1, pTD2), newPS(2, pTD1, pTD3)} - // 2-12, 3-15 - pS22 = []*types.PriceSource{newPS(1, pTD3, pTD2), newPS(2, pTD2, pTD3)} - // 1-10, 2-11(m) - pS23 = []*types.PriceSource{newPS(1, pTD1, pTD2M), newPS(2, pTD2M, pTD1)} - // 2-12, 3-19(m) - pS24 = []*types.PriceSource{newPS(1, pTD2, pTD3M), newPS(2, pTD3, pTD2M)} - // 1-10, 3-19(m) - pS25 = []*types.PriceSource{newPS(1, pTD1, pTD3M), newPS(2, pTD2M, pTD3M)} -) - -var defaultParams = types.Params{ - Chains: []*types.Chain{{Name: "-", Desc: "-"}, {Name: "Ethereum", Desc: "-"}}, - Tokens: []*types.Token{{}, {Name: "eth", ChainID: 1, ContractAddress: "0xabc", Decimal: 18, Active: true, AssetID: ""}}, - Sources: []*types.Source{{}, {Name: "chainLink", Entry: &types.Endpoint{}, Valid: true, Deterministic: true}}, - Rules: []*types.RuleSource{{}, {SourceIDs: []uint64{1}}}, - TokenFeeders: []*types.TokenFeeder{{}, {TokenID: 1, RuleID: 1, StartRoundID: 1, StartBaseBlock: 0, Interval: 10, EndBlock: 0}}, - MaxNonce: 3, - ThresholdA: 2, - ThresholdB: 3, - Mode: types.ConsensusModeASAP, - MaxDetId: 5, -} diff --git a/x/oracle/keeper/aggregator/util.go b/x/oracle/keeper/aggregator/util.go deleted file mode 100644 index 0c1bbc47b..000000000 --- a/x/oracle/keeper/aggregator/util.go +++ /dev/null @@ -1,11 +0,0 @@ -package aggregator - -import "math/big" - -func copyBigInt(i *big.Int) *big.Int { - if i == nil { - return nil - } - - return big.NewInt(0).Set(i) -} diff --git a/x/oracle/keeper/aggregator/worker.go b/x/oracle/keeper/aggregator/worker.go deleted file mode 100644 index a676fd2f4..000000000 --- a/x/oracle/keeper/aggregator/worker.go +++ /dev/null @@ -1,121 +0,0 @@ -package aggregator - -import ( - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// worker is the actual instance used to calculate final price for each tokenFeeder's round. Which means, every tokenFeeder corresponds to a specified token, and for that tokenFeeder, each round we use a worker instance to calculate the final price -type worker struct { - sealed bool - price string - decimal int32 - // mainly used for deterministic source data to check conflicts and validation - f *filter - // used to get to consensus on deterministic source's data - c *calculator - // when enough data(exceeds threshold) collected, aggregate to conduct the final price - a *aggregator - ctx *AggregatorContext - // TODO: move outside into context through .ctx - records recordMsg -} - -// recordKey used to retrieve messages from records to evaluate that if a validator report proper price for a specific feederID+sourceID -type recordKey struct { - validator string - feederID uint64 - sourceID uint64 -} - -// recordMsg define wrap the map for fast access to validator's message info -type recordMsg map[recordKey][]*types.PriceTimeDetID - -func newRecordMsg() recordMsg { - return make(map[recordKey][]*types.PriceTimeDetID) -} - -func (r recordMsg) get(validator string, feederID, sourceID uint64) []*types.PriceTimeDetID { - v := r[recordKey{validator, feederID, sourceID}] - return v -} - -func (r recordMsg) check(validator string, feederID, sourceID uint64, price, detID string) (exist, matched bool) { - prices := r.get(validator, feederID, sourceID) - for _, p := range prices { - if p.DetID == detID { - exist = true - if p.Price == price { - matched = true - return - } - } - } - return -} - -func (r recordMsg) set(creator string, feederID uint64, priceSources []*types.PriceSource) { - accAddress, _ := sdk.AccAddressFromBech32(creator) - validator := sdk.ConsAddress(accAddress).String() - for _, price := range priceSources { - r[recordKey{validator, feederID, price.SourceID}] = price.Prices - } -} - -// GetFinalPriceList relies requirement to aggregator inside them to get final price list -// []{feederID, sourceID, detID, price} in asc order of {soruceID} -func (w *worker) getFinalPriceList(feederID uint64) []*types.AggFinalPrice { - return w.a.getFinalPriceList(feederID) -} - -func (w *worker) filtrate(msg *types.MsgCreatePrice) (list4Calculator []*types.PriceSource, list4Aggregator []*types.PriceSource) { - return w.f.filtrate(msg) -} - -func (w *worker) recordMessage(creator string, feederID uint64, priceSources []*types.PriceSource) { - w.records.set(creator, feederID, priceSources) -} - -func (w *worker) check(validator string, feederID, sourceID uint64, price, detID string) (exist, matched bool) { - return w.records.check(validator, feederID, sourceID, price, detID) -} - -func (w *worker) do(msg *types.MsgCreatePrice) []*types.PriceSource { - list4Calculator, list4Aggregator := w.f.filtrate(msg) - if list4Aggregator != nil { - accAddress, _ := sdk.AccAddressFromBech32(msg.Creator) - validator := sdk.ConsAddress(accAddress).String() - power := w.ctx.validatorsPower[validator] - w.a.fillPrice(list4Aggregator, validator, power) - if confirmedRounds := w.c.fillPrice(list4Calculator, validator, power); confirmedRounds != nil { - w.a.confirmDSPrice(confirmedRounds) - } - } - return list4Aggregator -} - -func (w *worker) aggregate() string { - return w.a.aggregate() -} - -// not concurrency safe -func (w *worker) seal() { - if w.sealed { - return - } - w.sealed = true - w.price = w.a.aggregate() -} - -// newWorker new a instance for a tokenFeeder's specific round -func newWorker(feederID uint64, agc *AggregatorContext) *worker { - return &worker{ - f: newFilter(int(common.MaxNonce), int(common.MaxDetID)), - c: newCalculator(len(agc.validatorsPower), agc.totalPower), - a: newAggregator(len(agc.validatorsPower), agc.totalPower), - decimal: agc.params.GetTokenInfo(feederID).Decimal, - ctx: agc, - records: newRecordMsg(), - } -} diff --git a/x/oracle/keeper/cache/caches.go b/x/oracle/keeper/cache/caches.go index 47db24b53..ce8b83081 100644 --- a/x/oracle/keeper/cache/caches.go +++ b/x/oracle/keeper/cache/caches.go @@ -53,6 +53,7 @@ func (c *cacheMsgs) remove(item *ItemM) { } func (c cacheMsgs) commit(ctx sdk.Context, k common.KeeperOracle) { + // #nosec G115 // block height is not negative block := uint64(ctx.BlockHeight()) recentMsgs := types.RecentMsg{ @@ -69,6 +70,7 @@ func (c cacheMsgs) commit(ctx sdk.Context, k common.KeeperOracle) { i := 0 for ; i < len(index.Index); i++ { b := index.Index[i] + // #nosec G115 // maxNonce must not be negative if b > block-uint64(common.MaxNonce) { break } @@ -101,6 +103,7 @@ func (c *cacheValidator) add(validators map[string]*big.Int) { } func (c *cacheValidator) commit(ctx sdk.Context, k common.KeeperOracle) { + // #nosec G115 // block height is not negative block := uint64(ctx.BlockHeight()) k.SetValidatorUpdateBlock(ctx, types.ValidatorUpdateBlock{Block: block}) } @@ -113,11 +116,13 @@ func (c *cacheParams) add(p ItemP) { } func (c *cacheParams) commit(ctx sdk.Context, k common.KeeperOracle) { + // #nosec G115 // block height is not negative block := uint64(ctx.BlockHeight()) index, _ := k.GetIndexRecentParams(ctx) i := 0 for ; i < len(index.Index); i++ { b := index.Index[i] + // #nosec G115 // maxNonce must not be negative if b >= block-uint64(common.MaxNonce) { break } diff --git a/x/oracle/keeper/common/expected_keepers.go b/x/oracle/keeper/common/expected_keepers.go index f668d139b..7a50c75d0 100644 --- a/x/oracle/keeper/common/expected_keepers.go +++ b/x/oracle/keeper/common/expected_keepers.go @@ -1,11 +1,14 @@ package common import ( + "time" + sdkmath "cosmossdk.io/math" dogfoodkeeper "github.com/ExocoreNetwork/exocore/x/dogfood/keeper" dogfoodtypes "github.com/ExocoreNetwork/exocore/x/dogfood/types" "github.com/ExocoreNetwork/exocore/x/oracle/types" abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/log" sdk "github.com/cosmos/cosmos-sdk/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" ) @@ -14,12 +17,36 @@ type Price struct { Value sdkmath.Int Decimal uint8 } - +type SlashingKeeper interface { + JailUntil(sdk.Context, sdk.ConsAddress, time.Time) +} type KeeperOracle interface { KeeperDogfood - + SlashingKeeper + + Logger(ctx sdk.Context) log.Logger + AddZeroNonceItemWithFeederIDForValidators(ctx sdk.Context, feederID uint64, validators []string) + InitValidatorReportInfo(ctx sdk.Context, validator string, height int64) + ClearAllValidatorReportInfo(ctx sdk.Context) + ClearAllValidatorMissedRoundBitArray(ctx sdk.Context) + GrowRoundID(ctx sdk.Context, tokenID uint64) (price string, roundID uint64) + AppendPriceTR(ctx sdk.Context, tokenID uint64, priceTR types.PriceTimeRound) bool + GetValidatorReportInfo(ctx sdk.Context, validator string) (info types.ValidatorReportInfo, found bool) + GetMaliciousJailDuration(ctx sdk.Context) (res time.Duration) + ClearValidatorMissedRoundBitArray(ctx sdk.Context, validator string) + GetReportedRoundsWindow(ctx sdk.Context) int64 + GetValidatorMissedRoundBitArray(ctx sdk.Context, validator string, index uint64) bool + SetValidatorMissedRoundBitArray(ctx sdk.Context, validator string, index uint64, missed bool) + GetMinReportedPerWindow(ctx sdk.Context) int64 + GetMissJailDuration(ctx sdk.Context) (res time.Duration) + SetValidatorReportInfo(ctx sdk.Context, validator string, info types.ValidatorReportInfo) + GetSlashFractionMalicious(ctx sdk.Context) (res sdk.Dec) + SetValidatorUpdateForCache(sdk.Context, types.ValidatorUpdateBlock) + SetParamsForCache(sdk.Context, types.RecentParams) + SetMsgItemsForCache(sdk.Context, types.RecentMsg) + GetRecentParamsWithinMaxNonce(ctx sdk.Context) (recentParamsList []*types.RecentParams, prev, latest types.RecentParams) + GetAllRecentMsg(ctx sdk.Context) (list []types.RecentMsg) GetParams(sdk.Context) types.Params - GetIndexRecentMsg(sdk.Context) (types.IndexRecentMsg, bool) GetAllRecentMsgAsMap(sdk.Context) map[int64][]*types.MsgItem diff --git a/x/oracle/keeper/feedermanagement/aggregator.go b/x/oracle/keeper/feedermanagement/aggregator.go new file mode 100644 index 000000000..e63f23251 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/aggregator.go @@ -0,0 +1,486 @@ +package feedermanagement + +import ( + "fmt" + "math/big" + "reflect" + "slices" + + "golang.org/x/exp/maps" +) + +type sourceChecker interface { + IsDeterministic(sourceID int64) bool +} + +func newAggregator(t *threshold, algo AggAlgorithm) *aggregator { + return &aggregator{ + t: t, + algo: algo, + finalPrice: nil, + v: newRecordsValidators(), + ds: newRecordsDSs(t), + } +} + +func (a *aggregator) Equals(a2 *aggregator) bool { + if a == nil && a2 == nil { + return true + } + if a == nil || a2 == nil { + return false + } + + if !reflect.DeepEqual(a.finalPrice, a2.finalPrice) { + return false + } + + if !a.t.Equals(a2.t) { + return false + } + if !a.v.Equals(a2.v) { + return false + } + if !a.ds.Equals(a2.ds) { + return false + } + + return true +} + +func (a *aggregator) CopyForCheckTx() *aggregator { + if a == nil { + return nil + } + var finalPrice *PriceResult + if a.finalPrice != nil { + tmp := *a.finalPrice + finalPrice = &tmp + } + return &aggregator{ + t: a.t.Cpy(), + finalPrice: finalPrice, + v: a.v.Cpy(), + ds: a.ds.Cpy(), + } +} + +func (a *aggregator) GetFinalPrice() (*PriceResult, bool) { + if a.finalPrice != nil { + return a.finalPrice, true + } + if !a.exceedPowerLimit() { + return nil, false + } + finalPrice, ok := a.v.GetFinalPrice(a.algo) + if ok { + a.finalPrice = finalPrice + } + return finalPrice, ok +} + +func (a *aggregator) RecordMsg(msg *MsgItem) error { + // TODO: implement me + _, err := a.v.RecordMsg(msg) + return err +} + +// AddMsg records the message in a.v and do aggregation in a.ds +func (a *aggregator) AddMsg(msg *MsgItem) error { + // record into recordsValidators, validation for duplication + addedMsg, err := a.v.RecordMsg(msg) + // all prices failed to be recorded + if err != nil { + return fmt.Errorf("failed to add quote, error:%w", err) + } + // add into recordsDSs for DS aggregation + for _, ps := range addedMsg.PriceSources { + if ps.deterministic { + if a.ds.AddPriceSource(ps, msg.Power, msg.Validator) { + finalPrice, ok := a.ds.GetFinalPriceForSourceID(ps.sourceID) + if ok { + a.v.UpdateFinalPriceForDS(ps.sourceID, finalPrice) + } + } + } + } + return nil +} + +// TODO: V2: the accumulatedPower should corresponding to all valid validators which provides all sources required by rules(defined in oracle.Params) +func (a *aggregator) exceedPowerLimit() bool { + return a.t.Exceeds(a.v.accumulatedPower) +} + +func newRecordsValidators() *recordsValidators { + return &recordsValidators{ + finalPrice: nil, + accumulatedPower: big.NewInt(0), + records: make(map[string]*priceValidator), + } +} + +func (rv *recordsValidators) Equals(rv2 *recordsValidators) bool { + if rv == nil && rv2 == nil { + return true + } + if rv == nil || rv2 == nil { + return false + } + + if !reflect.DeepEqual(rv.finalPrice, rv2.finalPrice) { + return false + } + if rv.accumulatedPower.Cmp(rv2.accumulatedPower) != 0 { + return false + } + if !reflect.DeepEqual(rv.finalPrices, rv2.finalPrices) { + return false + } + if len(rv.records) != len(rv2.records) { + return false + } + for k, v := range rv.records { + if v2, ok := rv2.records[k]; !ok || !v.Equals(v2) { + return false + } + } + + return true +} + +func (rv *recordsValidators) Cpy() *recordsValidators { + if rv == nil { + return nil + } + var finalPrice *PriceResult + if rv.finalPrice != nil { + tmp := *rv.finalPrice + finalPrice = &tmp + } + var finalPrices map[string]*PriceResult + if len(rv.finalPrices) > 0 { + finalPrices = make(map[string]*PriceResult) + for v, p := range rv.finalPrices { + price := *p + finalPrices[v] = &price + } + } + records := make(map[string]*priceValidator) + for v, pv := range rv.records { + records[v] = pv.Cpy() + } + return &recordsValidators{ + finalPrice: finalPrice, + finalPrices: finalPrices, + accumulatedPower: new(big.Int).Set(rv.accumulatedPower), + records: records, + } +} + +func (rv *recordsValidators) RecordMsg(msg *MsgItem) (*MsgItem, error) { + record, ok := rv.records[msg.Validator] + rets := &MsgItem{ + FeederID: msg.FeederID, + Validator: msg.Validator, + Power: msg.Power, + PriceSources: make([]*priceSource, 0), + } + if !ok { + record = newPriceValidator(msg.Validator, msg.Power) + } + updated, added, err := record.TryAddPriceSources(msg.PriceSources) + if err != nil { + return nil, fmt.Errorf("failed to record msg, error:%w", err) + } + record.ApplyAddedPriceSources(updated) + if !ok { + rv.records[msg.Validator] = record + rv.accumulatedPower = new(big.Int).Add(rv.accumulatedPower, msg.Power) + } + rets.PriceSources = added + return rets, nil +} + +func (rv *recordsValidators) GetValidatorQuotePricesForSourceID(validator string, sourceID int64) ([]*PriceInfo, bool) { + record, ok := rv.records[validator] + if !ok { + return nil, false + } + pSource, ok := record.priceSources[sourceID] + if !ok { + return nil, false + } + return pSource.prices, true +} + +func (rv *recordsValidators) GetFinalPrice(algo AggAlgorithm) (*PriceResult, bool) { + if rv.finalPrice != nil { + return rv.finalPrice, true + } + if prices, ok := rv.GetFinalPriceForValidators(algo); ok { + keySlice := make([]string, 0, len(prices)) + for validator := range prices { + keySlice = append(keySlice, validator) + } + algo.Reset() + slices.Sort(keySlice) + for _, validator := range keySlice { + if !algo.Add(prices[validator]) { + algo.Reset() + return nil, false + } + } + rv.finalPrice = algo.GetResult() + if rv.finalPrice == nil { + return nil, false + } + return rv.finalPrice, true + } + return nil, false +} + +func (rv *recordsValidators) GetFinalPriceForValidators(algo AggAlgorithm) (map[string]*PriceResult, bool) { + if len(rv.finalPrices) > 0 { + return rv.finalPrices, true + } + ret := make(map[string]*PriceResult) + for validator, pv := range rv.records { + finalPrice, ok := pv.GetFinalPrice(algo) + if !ok { + return nil, false + } + ret[validator] = finalPrice + } + if len(ret) > 0 { + rv.finalPrices = ret + } + return ret, true +} + +func (rv *recordsValidators) UpdateFinalPriceForDS(sourceID int64, finalPrice *PriceResult) bool { + if finalPrice == nil { + return false + } + // it's safe to range map here, order does not matter + for _, record := range rv.records { + // ignore the fail cases for updating some pv' DS finalPrice + record.UpdateFinalPriceForDS(sourceID, finalPrice) + } + return true +} + +func newRecordsDSs(t *threshold) *recordsDSs { + return &recordsDSs{ + t: t, + dsMap: make(map[int64]*recordsDS), + } +} + +// type recordsDSs struct { +// } + +func (rdss *recordsDSs) Equals(rdss2 *recordsDSs) bool { + if rdss == nil && rdss2 == nil { + return true + } + if rdss == nil || rdss2 == nil { + return false + } + + if !rdss.t.Equals(rdss2.t) { + return false + } + if len(rdss.dsMap) != len(rdss2.dsMap) { + return false + } + for k, v := range rdss.dsMap { + if v2, ok := rdss2.dsMap[k]; !ok || !v.Equals(v2) { + return false + } + } + + return true +} + +func (rdss *recordsDSs) Cpy() *recordsDSs { + if rdss == nil { + return nil + } + dsMap := make(map[int64]*recordsDS) + for id, r := range rdss.dsMap { + dsMap[id] = r.Cpy() + } + return &recordsDSs{ + t: rdss.t.Cpy(), + dsMap: dsMap, + } +} + +// AddPriceSource adds prices for DS sources +func (rdss *recordsDSs) AddPriceSource(ps *priceSource, power *big.Int, validator string) bool { + if !ps.deterministic { + return false + } + price, ok := rdss.dsMap[ps.sourceID] + if !ok { + price = newRecordsDS() + rdss.dsMap[ps.sourceID] = price + } + for _, p := range ps.prices { + price.AddPrice(&PricePower{ + Price: p, + Power: power, + Validators: map[string]struct{}{validator: {}}, + }) + } + return true +} + +func (rdss *recordsDSs) GetFinalPriceForSourceID(sourceID int64) (*PriceResult, bool) { + rds, ok := rdss.dsMap[sourceID] + if !ok { + return nil, false + } + return rds.GetFinalPrice(rdss.t) +} + +func (rdss *recordsDSs) GetFinalPriceForSources() (map[int64]*PriceResult, bool) { + ret := make(map[int64]*PriceResult) + for sourceID, rds := range rdss.dsMap { + if finalPrice, ok := rds.GetFinalPrice(rdss.t); ok { + ret[sourceID] = finalPrice + } else { + return nil, false + } + } + return ret, true +} + +func (rdss *recordsDSs) GetFinalDetIDForSourceID(sourceID int64) string { + if rds, ok := rdss.dsMap[sourceID]; ok { + if rds.finalPrice != nil { + return rds.finalDetID + } + if _, ok := rds.GetFinalPrice(rdss.t); ok { + return rds.finalDetID + } + } + return "" +} + +func newRecordsDS() *recordsDS { + return &recordsDS{ + finalPrice: nil, + validators: make(map[string]struct{}), + finalDetID: "", + accumulatedPowers: big.NewInt(0), + records: make([]*PricePower, 0), + } +} + +func (rds *recordsDS) Equals(rds2 *recordsDS) bool { + if rds == nil && rds2 == nil { + return true + } + if rds == nil || rds2 == nil { + return false + } + + if !reflect.DeepEqual(rds.finalPrice, rds2.finalPrice) { + return false + } + if rds.finalDetID != rds2.finalDetID { + return false + } + if rds.accumulatedPowers.Cmp(rds2.accumulatedPowers) != 0 { + return false + } + if !reflect.DeepEqual(rds.validators, rds2.validators) { + return false + } + if len(rds.records) != len(rds2.records) { + return false + } + for i, r := range rds.records { + if !r.Equals(rds2.records[i]) { + return false + } + } + + return true +} + +func (rds *recordsDS) Cpy() *recordsDS { + if rds == nil { + return nil + } + var finalPrice *PriceResult + if rds.finalPrice != nil { + tmp := *rds.finalPrice + finalPrice = &tmp + } + validators := make(map[string]struct{}) + for v := range rds.validators { + validators[v] = struct{}{} + } + records := make([]*PricePower, 0, len(rds.records)) + for _, r := range rds.records { + records = append(records, r.Cpy()) + } + return &recordsDS{ + finalPrice: finalPrice, + finalDetID: rds.finalDetID, + accumulatedPowers: new(big.Int).Set(rds.accumulatedPowers), + validators: validators, + records: records, + } +} + +func (rds *recordsDS) GetFinalPrice(t *threshold) (*PriceResult, bool) { + if rds.finalPrice != nil { + return rds.finalPrice, true + } + if t.Exceeds(rds.accumulatedPowers) { + l := len(rds.records) + for i := l - 1; i >= 0; i-- { + pPower := rds.records[i] + if t.Exceeds(pPower.Power) { + rds.finalPrice = pPower.Price.PriceResult() + rds.finalDetID = pPower.Price.DetID + return rds.finalPrice, true + } + } + } + return nil, false +} + +func (rds *recordsDS) AddPrice(p *PricePower) { + validator := maps.Keys(p.Validators)[0] + biggestDetID := true + p = p.Cpy() + for i, record := range rds.records { + if record.Price.EqualDS(p.Price) { + if _, ok := record.Validators[validator]; !ok { + record.Power.Add(record.Power, p.Power) + record.Validators[validator] = struct{}{} + } + biggestDetID = false + break + } + if p.Price.DetID <= record.Price.DetID { + // insert before i + combined := append([]*PricePower{p}, rds.records[i:]...) + rds.records = append(rds.records[:i], combined...) + biggestDetID = false + break + } + } + if _, ok := rds.validators[validator]; !ok { + rds.accumulatedPowers.Add(rds.accumulatedPowers, p.Power) + rds.validators[validator] = struct{}{} + } + if biggestDetID { + rds.records = append(rds.records, p) + } +} diff --git a/x/oracle/keeper/feedermanagement/aggregator_test.go b/x/oracle/keeper/feedermanagement/aggregator_test.go new file mode 100644 index 000000000..bd6e635b1 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/aggregator_test.go @@ -0,0 +1,313 @@ +package feedermanagement + +import ( + "testing" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + . "github.com/smartystreets/goconvey/convey" + gomock "go.uber.org/mock/gomock" +) + +func TestAggregation(t *testing.T) { + Convey("aggregation", t, func() { + Convey("add priceSouce in priceSource", func() { + ps := newPriceSource(1, true) + Convey("add first priceSource, success", func() { + psAdded, err := ps.Add(ps1) + So(psAdded, ShouldResemble, ps1) + So(err, ShouldBeNil) + _, ok := ps.detIDs["1"] + So(ok, ShouldBeTrue) + Convey("add different sourceID, reject", func() { + psAdded, err := ps.Add(ps3) + So(psAdded, ShouldBeNil) + So(err, ShouldNotBeNil) + }) + Convey("add same sourceID with same DetID, reject", func() { + psAdded, err := ps.Add(ps1) + So(psAdded, ShouldBeNil) + So(err, ShouldNotBeNil) + }) + Convey("add same sourceID with different DetID, success", func() { + psAdded, err := ps.Add(ps2) + So(psAdded, ShouldResemble, ps2) + So(err, ShouldBeNil) + _, ok := ps.detIDs["2"] + So(ok, ShouldBeTrue) + }) + Convey("add same sourceID with different DetID, duplicated input, return the added one value", func() { + psAdded, err := ps.Add(ps4) + So(psAdded, ShouldResemble, ps2) + So(err, ShouldBeNil) + }) + }) + }) + Convey("add priceSource in priceValidator", func() { + // Try + pv := newPriceValidator("validator1", big1) + Convey("add source1 with 2 detIDs, try:success", func() { + // duplicated detID=1 in ps1_2 will be removed in returned 'added' + updated, added, err := pv.TryAddPriceSources([]*priceSource{ps1_2, ps2}) + So(updated, ShouldResemble, map[int64]*priceSource{1: ps5}) + So(added, ShouldResemble, []*priceSource{ps1, ps2}) + So(err, ShouldBeNil) + // 'try' will not actually update pv + So(pv.priceSources, ShouldHaveLength, 0) + Convey("apply changes, success", func() { + pv.ApplyAddedPriceSources(updated) + So(pv.priceSources, ShouldHaveLength, 1) + So(pv.priceSources, ShouldResemble, map[int64]*priceSource{1: ps5}) + Convey("add source1 with detID 3, try:success", func() { + updated, added, err := pv.TryAddPriceSources([]*priceSource{ps3_2}) + So(updated, ShouldResemble, map[int64]*priceSource{1: ps6}) + So(added, ShouldResemble, []*priceSource{ps3_2}) + So(err, ShouldBeNil) + So(pv.priceSources[1].prices, ShouldHaveLength, 2) + Convey("apply changes, success", func() { + pv.ApplyAddedPriceSources(updated) + So(pv.priceSources[1].prices, ShouldHaveLength, 3) + }) + }) + }) + }) + }) + Convey("record msgs in recordsValidators", func() { + rv := newRecordsValidators() + // TODO: multiple sources(for V2) + Convey("record valid msg, success", func() { + msgAdded, err := rv.RecordMsg(msgItem1) + So(msgAdded, ShouldResemble, msgItem1_2) + So(err, ShouldBeNil) + So(rv.records["validator1"], ShouldResemble, &priceValidator{validator: "validator1", power: big1, priceSources: map[int64]*priceSource{1: ps5}}) + So(rv.accumulatedPower, ShouldResemble, big1) + Convey("record duplicated msg, reject", func() { + msgAdded, err := rv.RecordMsg(msgItem1_3) + So(msgAdded, ShouldBeNil) + So(err, ShouldNotBeNil) + }) + Convey("record msg from another validator, success", func() { + msgAdded, err := rv.RecordMsg(msgItem2) + So(msgAdded, ShouldResemble, msgItem2_2) + So(err, ShouldBeNil) + So(rv.records["validator2"], ShouldResemble, &priceValidator{validator: "validator2", power: big1, priceSources: map[int64]*priceSource{1: ps5}}) + So(rv.accumulatedPower, ShouldResemble, big2) + Convey("calculate final price without confirmed ds price, fail", func() { + finalPrice, err := rv.GetFinalPrice(defaultAggMedian) + So(finalPrice, ShouldBeNil) + So(err, ShouldBeFalse) + }) + Convey("calculate final price with confirmed ds price, success", func() { + Convey("update final price of ds, success", func() { + So(rv.records["validator1"].priceSources[1].finalPrice, ShouldBeNil) + rv.UpdateFinalPriceForDS(1, pr1) + So(rv.records["validator1"].priceSources[1].finalPrice, ShouldResemble, pr1) + So(rv.records["validator2"].priceSources[1].finalPrice, ShouldResemble, pr1) + finalPrice, err := rv.GetFinalPrice(defaultAggMedian) + So(finalPrice, ShouldResemble, pr1_2) + So(err, ShouldBeTrue) + }) + }) + }) + }) + }) + Convey("add msgs in recordsDS", func() { + rds := newRecordsDS() + + Convey("add first msg with v1-power-1 for detID2, success", func() { + rds.AddPrice(pw2) + So(rds.accumulatedPowers, ShouldResemble, big1) + So(rds.validators["validator1"], ShouldNotBeNil) + So(rds.records, ShouldHaveLength, 1) + So(rds.records[0], ShouldResemble, pw2) + Convey("add second msg with v1-power- 1 for detID1", func() { + rds.AddPrice(pw1) + So(rds.accumulatedPowers, ShouldResemble, big1) + So(rds.records, ShouldHaveLength, 2) + So(rds.records[0], ShouldResemble, pw1) + So(rds.records[1], ShouldResemble, pw2) + Convey("add 3rd msg with v2-power-1 for detID2", func() { + rds.AddPrice(pw3) + So(rds.accumulatedPowers, ShouldResemble, big2) + So(rds.validators["validator2"], ShouldNotBeNil) + So(rds.records, ShouldHaveLength, 2) + So(rds.records[0], ShouldResemble, pw1) + So(rds.records[1], ShouldResemble, pw2_2) + finalPrice, ok := rds.GetFinalPrice(th) + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + Convey("add 4th msg with v3-power-1 for detID2", func() { + rds.AddPrice(pw4) + So(rds.accumulatedPowers, ShouldResemble, big3) + So(rds.validators["validator3"], ShouldNotBeNil) + So(rds.records, ShouldHaveLength, 2) + So(rds.records[1], ShouldResemble, pw3_2) + Convey("get finalPrice, success", func() { + finalPrice, ok = rds.GetFinalPrice(th) + So(finalPrice, ShouldResemble, &PriceResult{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }) + So(ok, ShouldBeTrue) + Convey("add 5th msg with v4-power-1 for detID2", func() { + rds.AddPrice(pw5) + So(rds.accumulatedPowers, ShouldResemble, big4) + So(rds.validators["validator4"], ShouldNotBeNil) + So(rds.records, ShouldHaveLength, 2) + finalPrice, ok = rds.GetFinalPrice(th) + So(finalPrice, ShouldResemble, &PriceResult{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }) + }) + }) + }) + }) + }) + }) + }) + Convey("add msgs in recordsDSs", func() { + rdss := newRecordsDSs(th) + Convey("add 3 same detId=1 prices from v1,v2,v3", func() { + rdss.AddPriceSource(ps1, big1, "validator1") + rdss.AddPriceSource(ps1, big1, "validator2") + finalPrice, ok := rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + rdss.AddPriceSource(ps1, big1, "validator3") + finalPrice, ok = rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldNotBeNil) + So(finalPrice, ShouldResemble, ps1.prices[0].PriceResult()) + So(ok, ShouldBeTrue) + }) + Convey("add 3 same detId=1 prices and 2 same detID=2 prices from v1,v2,v3", func() { + rdss.AddPriceSource(ps1, big1, "validator1") + rdss.AddPriceSource(ps2, big1, "validator2") + finalPrice, ok := rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + rdss.AddPriceSource(ps1_3, big1, "validator3") + finalPrice, ok = rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + rdss.AddPriceSource(ps2, big1, "validator4") + finalPrice, ok = rdss.GetFinalPriceForSourceID(1) + So(finalPrice, ShouldResemble, ps2.prices[0].PriceResult()) + So(ok, ShouldBeTrue) + }) + + }) + Convey("add msgs in aggregator", func() { + a := newAggregator(th, defaultAggMedian) + err := a.AddMsg(msgItem1) + So(err, ShouldBeNil) + finalPrice, ok := a.GetFinalPrice() + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + + err = a.AddMsg(msgItem2) + So(err, ShouldBeNil) + finalPrice, ok = a.GetFinalPrice() + So(finalPrice, ShouldBeNil) + So(ok, ShouldBeFalse) + + // failed to add duplicated msg + err = a.AddMsg(msgItem2) + So(err, ShouldNotBeNil) + + // powe exceeds 2/3 on detID=2 + err = a.AddMsg(msgItem3) + So(err, ShouldBeNil) + finalPrice, ok = a.GetFinalPrice() + So(finalPrice, ShouldResemble, &PriceResult{Price: "999", Decimal: 8}) + So(ok, ShouldBeTrue) + So(a.ds.GetFinalDetIDForSourceID(1), ShouldEqual, "2") + }) + Convey("tally in round", func() { + ctrl := gomock.NewController(t) + c := NewMockCacheReader(ctrl) + c.EXPECT(). + GetPowerForValidator(gomock.Any()). + Return(big1, true). + AnyTimes() + c.EXPECT(). + IsDeterministic(gomock.Eq(int64(1))). + Return(true). + AnyTimes() + c.EXPECT(). + GetThreshold(). + Return(th). + AnyTimes() + c.EXPECT(). + IsRuleV1(gomock.Any()). + Return(true). + AnyTimes() + + r := tData.NewRound(nil) + r.cache = c + feederID := r.feederID + Convey("add msg in closed quoting window", func() { + pmsg1 := protoMsgItem1 + pmsg1.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err := r.Tally(pmsg1) + // quoting window not open + So(err, ShouldNotBeNil) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldBeNil) + }) + Convey("open quotingWindow", func() { + r.PrepareForNextBlock(int64(params.TokenFeeders[r.feederID].StartBaseBlock)) + So(r.status, ShouldEqual, roundStatusOpen) + Convey("add msg-v1-detID1 for source1", func() { + pmsg1 := protoMsgItem1 + pmsg1.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err := r.Tally(pmsg1) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldResemble, pmsg1) + So(err, ShouldBeNil) + Convey("add msg-v1-detID2, success ", func() { + pmsg2 := protoMsgItem2 + pmsg2.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg2) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldResemble, pmsg2) + So(err, ShouldBeNil) + Convey("add msg-v2-detID2, success", func() { + // v2,detID=2 + pmsg3 := protoMsgItem3 + pmsg3.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg3) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldResemble, pmsg3) + So(err, ShouldBeNil) + Convey("add msg-v3-detID2, finalPrice", func() { + // v3,detID=2 + pmsg4 := protoMsgItem4 + pmsg4.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg4) + So(finalPrice, ShouldResemble, &PriceResult{ + Price: "999", + Decimal: 8, + DetID: "2", + }) + So(addedMsgItem, ShouldResemble, pmsg4) + So(err, ShouldBeNil) + Convey("add msg-v4-detID1, recordOnly", func() { + pmsg5 := protoMsgItem5 + pmsg5.FeederID = uint64(feederID) + finalPrice, addedMsgItem, err = r.Tally(pmsg5) + So(finalPrice, ShouldBeNil) + So(addedMsgItem, ShouldResemble, pmsg5) + So(err, ShouldBeError, oracletypes.ErrQuoteRecorded) + }) + }) + }) + }) + }) + }) + }) + }) +} diff --git a/x/oracle/keeper/feedermanagement/algo.go b/x/oracle/keeper/feedermanagement/algo.go new file mode 100644 index 000000000..4d1d049d6 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/algo.go @@ -0,0 +1,131 @@ +package feedermanagement + +import ( + "math/big" + "sort" + "strings" +) + +type BigIntList []*big.Int + +func (b BigIntList) Len() int { + return len(b) +} + +func (b BigIntList) Less(i, j int) bool { + return b[i].Cmp(b[j]) < 0 +} + +func (b BigIntList) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b BigIntList) Median() *big.Int { + sort.Sort(b) + l := len(b) + if l%2 == 1 { + return b[l/2] + } + return new(big.Int).Div(new(big.Int).Add(b[l/2], b[l/2-1]), big.NewInt(2)) +} + +type AggAlgorithm interface { + Add(*PriceResult) bool + GetResult() *PriceResult + Reset() +} + +type priceType int + +const ( + notSet priceType = iota + number + notNumber +) + +type AggMedian struct { + t priceType + finalString string + list []*big.Int + decimal int32 +} + +func NewAggMedian() *AggMedian { + return &AggMedian{ + list: make([]*big.Int, 0), + } +} + +func (a *AggMedian) Add(price *PriceResult) bool { + priceInt, ok := new(big.Int).SetString(price.Price, 10) + if ok { + if a.t == notNumber { + return false + } + if a.t == notSet { + a.t = number + a.list = append(a.list, priceInt) + a.decimal = price.Decimal + return true + } + if a.decimal != price.Decimal { + if a.decimal > price.Decimal { + price.Price += strings.Repeat("0", int(a.decimal-price.Decimal)) + priceInt, _ = new(big.Int).SetString(price.Price, 10) + } else { + delta := big.NewInt(int64(price.Decimal - a.decimal)) + for _, v := range a.list { + nv := new(big.Int).Mul(v, new(big.Int).Exp(big.NewInt(10), delta, nil)) + *v = *nv + } + a.decimal = price.Decimal + } + } + a.list = append(a.list, priceInt) + return true + } + // input is a string, not a number + if a.t == number { + return false + } + if a.t == notSet { + a.t = notNumber + a.finalString = price.Price + return true + } + if a.finalString != price.Price { + return false + } + return true +} + +func (a *AggMedian) GetResult() *PriceResult { + defer a.Reset() + if a.t == notSet { + return nil + } + if a.t == number { + result := BigIntList(a.list).Median().String() + decimal := a.decimal + return &PriceResult{ + Price: result, + Decimal: decimal, + } + } + if len(a.finalString) == 0 { + return nil + } + result := a.finalString + return &PriceResult{ + Price: result, + } +} + +func (a *AggMedian) Reset() { + a.list = make([]*big.Int, 0) + a.t = notSet + a.decimal = 0 + a.finalString = "" +} + +var defaultAggMedian = NewAggMedian() diff --git a/x/oracle/keeper/feedermanagement/caches.go b/x/oracle/keeper/feedermanagement/caches.go new file mode 100644 index 000000000..c8e64a00e --- /dev/null +++ b/x/oracle/keeper/feedermanagement/caches.go @@ -0,0 +1,365 @@ +package feedermanagement + +import ( + "fmt" + "math/big" + "reflect" + "slices" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ItemV map[string]*big.Int + +var zeroBig = big.NewInt(0) + +func (c *caches) CpyForSimulation() *caches { + ret := *c + msg := *(c.msg) + params := *(c.params) + // it's safe to do shallow copy on msg, params + ret.msg = &msg + ret.params = ¶ms + validators := make(map[string]*big.Int) + for v, p := range c.validators.validators { + validators[v] = new(big.Int).Set(p) + } + ret.validators = &cacheValidator{ + validators: validators, + update: c.validators.update, + } + + return &ret +} + +func (c *caches) Equals(c2 *caches) bool { + if c == nil && c2 == nil { + return true + } + if c == nil || c2 == nil { + return false + } + if !c.msg.Equals(c2.msg) { + return false + } + if !c.validators.Equals(c2.validators) { + return false + } + if !c.params.Equals(c2.params) { + return false + } + return true +} + +func (c *caches) Init(k Submitter, params *oracletypes.Params, validators map[string]*big.Int) { + c.ResetCaches() + c.k = k + + c.params.add(params) + + c.validators.add(validators) +} + +func (c *caches) IsDeterministic(sourceID int64) bool { + sources := c.params.params.Sources + if sourceID >= int64(len(sources)) { + return false + } + + return sources[sourceID].Deterministic +} + +// RuleV1, we restrict the source to be Chainlink and only that source is acceptable +func (c *caches) IsRuleV1(feederID int64) bool { + ruleID := c.params.params.TokenFeeders[feederID].RuleID + return ruleID == 1 && len(c.params.params.Sources) == 2 && c.params.params.Sources[1].Name == oracletypes.SourceChainlinkName +} + +func (c *caches) GetTokenIDForFeederID(feederID int64) (int64, bool) { + tf, ok := c.GetTokenFeederForFeederID(feederID) + if !ok { + return 0, false + } + // #nosec G115 // tokenID is index of slice + return int64(tf.TokenID), true +} + +func (c *caches) GetValidators() []string { + return c.validators.slice() +} + +func (cm *cacheMsgs) Equals(cm2 *cacheMsgs) bool { + if cm == nil && cm2 == nil { + return true + } + if cm == nil || cm2 == nil { + return false + } + for idx, v := range *cm { + v2 := (*cm2)[idx] + if !reflect.DeepEqual(v, v2) { + return false + } + } + return true +} + +func (cm *cacheMsgs) Cpy() *cacheMsgs { + ret := make([]*oracletypes.MsgItem, 0, len(*cm)) + for _, msg := range *cm { + msgCpy := *msg + ret = append(ret, &msgCpy) + } + cmNew := cacheMsgs(ret) + return &cmNew +} + +func (cm *cacheMsgs) add(item *oracletypes.MsgItem) { + *cm = append(*cm, item) +} + +func (cm *cacheMsgs) commit(ctx sdk.Context, k Submitter) { + if len(*cm) == 0 { + return + } + recentMsgs := oracletypes.RecentMsg{ + // #nosec G115 // height is not negative + Block: uint64(ctx.BlockHeight()), + Msgs: *cm, + } + + k.SetMsgItemsForCache(ctx, recentMsgs) + + *cm = make([]*oracletypes.MsgItem, 0) +} + +func (cv *cacheValidator) Equals(cv2 *cacheValidator) bool { + if cv == nil && cv2 == nil { + return true + } + if cv == nil || cv2 == nil { + return false + } + if cv.update != cv2.update { + return false + } + if len(cv.validators) != len(cv2.validators) { + return false + } + for k, v := range cv.validators { + if v2, ok := cv2.validators[k]; !ok { + return false + } else if v.Cmp(v2) != 0 { + return false + } + } + return true +} + +func (cv *cacheValidator) add(validators map[string]*big.Int) { + for operator, newPower := range validators { + if power, ok := cv.validators[operator]; ok { + if newPower.Cmp(zeroBig) == 0 { + delete(cv.validators, operator) + cv.update = true + } else if power.Cmp(newPower) != 0 { + cv.validators[operator].Set(newPower) + cv.update = true + } + } else { + cv.update = true + np := *newPower + cv.validators[operator] = &np + } + } +} + +func (cv *cacheValidator) commit(ctx sdk.Context, k Submitter) { + if !cv.update { + return + } + // #nosec blockHeight is not negative + // TODO: consider change the define of all height types in proto to int64(since cosmossdk defined block height as int64) to get avoid all these conversion + k.SetValidatorUpdateForCache(ctx, oracletypes.ValidatorUpdateBlock{Block: uint64(ctx.BlockHeight())}) + cv.update = false +} + +func (cv *cacheValidator) size() int { + return len(cv.validators) +} + +func (cv *cacheValidator) slice() []string { + if cv.size() == 0 { + return nil + } + validators := make([]string, 0, cv.size()) + for validator := range cv.validators { + validators = append(validators, validator) + } + slices.Sort(validators) + return validators +} + +func (cp *cacheParams) Equals(cp2 *cacheParams) bool { + if cp == nil && cp2 == nil { + return true + } + if cp == nil || cp2 == nil { + return false + } + if cp.update != cp2.update { + return false + } + p1 := cp.params + p2 := cp2.params + return reflect.DeepEqual(p1, p2) +} + +func (cp *cacheParams) add(p *oracletypes.Params) { + cp.params = p + cp.update = true +} + +func (cp *cacheParams) commit(ctx sdk.Context, k Submitter) { + if !cp.update { + return + } + k.SetParamsForCache(ctx, oracletypes.RecentParams{ + // #nosec G115 blockheight is not negative + Block: uint64(ctx.BlockHeight()), + Params: cp.params, + }) + cp.update = false +} + +// memory cache +func (c *caches) AddCache(i any) error { + switch item := i.(type) { + case *oracletypes.MsgItem: + c.msg.add(item) + case *oracletypes.Params: + c.params.add(item) + case ItemV: + c.validators.add(item) + default: + return fmt.Errorf("unsuppported caceh type: %T", i) + } + return nil +} + +// Read reads the cache +func (c *caches) Read(i any) bool { + switch item := i.(type) { + case ItemV: + if item == nil { + return false + } + for addr, power := range c.validators.validators { + item[addr] = power + } + return c.validators.update + case *oracletypes.Params: + if item == nil { + return false + } + *item = *c.params.params + return c.params.update + case *[]*oracletypes.MsgItem: + if item == nil { + return false + } + *item = *c.msg + return len(*c.msg) > 0 + default: + return false + } +} + +func (c *caches) GetThreshold() *threshold { + params := &oracletypes.Params{} + c.Read(params) + return &threshold{ + totalPower: c.GetTotalPower(), + thresholdA: big.NewInt(int64(params.ThresholdA)), + thresholdB: big.NewInt(int64(params.ThresholdB)), + } +} + +// GetPowerForValidator returns the power of a validator +func (c *caches) GetPowerForValidator(validator string) (power *big.Int, found bool) { + if c.validators != nil && + len(c.validators.validators) > 0 { + power = c.validators.validators[validator] + if power != nil { + found = true + } + } + // if caches not filled yet, we just return not-found instead of fetching from keeper + return +} + +// GetTotalPower returns the total power of all validators +func (c *caches) GetTotalPower() (totalPower *big.Int) { + totalPower = big.NewInt(0) + if c.validators == nil { + return + } + for _, power := range c.validators.validators { + totalPower.Add(totalPower, power) + } + return +} + +// GetTokenFeederForFeederID returns the token feeder for a feederID +func (c *caches) GetTokenFeederForFeederID(feederID int64) (tokenFeeder *oracletypes.TokenFeeder, found bool) { + if c.params != nil && + c.params.params != nil && + int64(len(c.params.params.TokenFeeders)) > feederID { + tokenFeeder = c.params.params.TokenFeeders[feederID] + found = true + } + return +} + +// SkipCommit skip real commit by setting the updage flag to false +func (c *caches) SkipCommit() { + c.validators.update = false + c.params.update = false +} + +// CommitCache commits the cache to the KVStore +func (c *caches) Commit(ctx sdk.Context, reset bool) (msgUpdated, validatorsUpdated, paramsUpdated bool) { + if len(*(c.msg)) > 0 { + c.msg.commit(ctx, c.k) + msgUpdated = true + } + + if c.validators.update { + c.validators.commit(ctx, c.k) + validatorsUpdated = true + } + + if c.params.update { + c.params.commit(ctx, c.k) + paramsUpdated = true + } + if reset { + c.ResetCaches() + } + return +} + +func (c *caches) ResetCaches() { + *c = *(newCaches()) +} + +func newCaches() *caches { + return &caches{ + msg: new(cacheMsgs), + validators: &cacheValidator{ + validators: make(map[string]*big.Int), + }, + params: &cacheParams{}, + } +} diff --git a/x/oracle/keeper/feedermanagement/date_test.go b/x/oracle/keeper/feedermanagement/date_test.go new file mode 100644 index 000000000..0e861109f --- /dev/null +++ b/x/oracle/keeper/feedermanagement/date_test.go @@ -0,0 +1,257 @@ +package feedermanagement + +import oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + +var ( + ps1 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}}, + } + ps1_2 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}, {Price: "998", Decimal: 8, DetID: "1"}}, + } + ps1_3 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}, {Price: "999", Decimal: 8, DetID: "2"}}, + } + ps2 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "2"}}, + } + ps3 = &priceSource{ + deterministic: true, + sourceID: 2, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "3"}}, + } + ps3_2 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "3"}}, + } + ps4 = &priceSource{ + deterministic: true, + sourceID: 1, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "2"}, {Price: "999", Decimal: 8, DetID: "2"}}, + } + ps5 = &priceSource{ + deterministic: true, + sourceID: 1, + detIDs: map[string]struct{}{"1": {}, "2": {}}, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}, {Price: "999", Decimal: 8, DetID: "2"}}, + } + ps6 = &priceSource{ + deterministic: true, + sourceID: 1, + detIDs: map[string]struct{}{"1": {}, "2": {}, "3": {}}, + prices: []*PriceInfo{{Price: "999", Decimal: 8, DetID: "1"}, {Price: "999", Decimal: 8, DetID: "2"}, {Price: "999", Decimal: 8, DetID: "3"}}, + } + msgItem1 = &MsgItem{ + FeederID: 1, + Validator: "validator1", + Power: big1, + PriceSources: []*priceSource{ps1_2, ps2}, + } + msgItem1_2 = &MsgItem{ + FeederID: 1, + Validator: "validator1", + Power: big1, + PriceSources: []*priceSource{ps1, ps2}, + } + msgItem1_3 = &MsgItem{ + FeederID: 1, + Validator: "validator1", + Power: big1, + PriceSources: []*priceSource{ps1}, + } + msgItem2 = &MsgItem{ + FeederID: 1, + Validator: "validator2", + Power: big1, + PriceSources: []*priceSource{ps1_2, ps2}, + } + msgItem2_2 = &MsgItem{ + FeederID: 1, + Validator: "validator2", + Power: big1, + PriceSources: []*priceSource{ps1, ps2}, + } + msgItem3 = &MsgItem{ + FeederID: 1, + Validator: "validator3", + Power: big1, + PriceSources: []*priceSource{ps2}, + } + protoMsgItem1 = &oracletypes.MsgItem{ + FeederID: 1, + PSources: []*oracletypes.PriceSource{ + { + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "999", + Decimal: 8, + DetID: "1", + Timestamp: timestamp, + }, + }, + }, + }, + Validator: "validator1", + } + protoMsgItem2 = &oracletypes.MsgItem{ + FeederID: 1, + PSources: []*oracletypes.PriceSource{ + { + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + }, + }, + }, + Validator: "validator1", + } + protoMsgItem3 = &oracletypes.MsgItem{ + FeederID: 1, + PSources: []*oracletypes.PriceSource{ + { + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + }, + }, + }, + Validator: "validator2", + } + protoMsgItem4 = &oracletypes.MsgItem{ + FeederID: 1, + PSources: []*oracletypes.PriceSource{ + { + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + }, + }, + }, + Validator: "validator3", + } + protoMsgItem5 = &oracletypes.MsgItem{ + FeederID: 1, + PSources: []*oracletypes.PriceSource{ + { + SourceID: 1, + Prices: []*oracletypes.PriceTimeDetID{ + { + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + }, + }, + }, + Validator: "validator4", + } + + pr1 = &PriceResult{ + Price: "999", + Decimal: 8, + DetID: "1", + Timestamp: timestamp, + } + pr1_2 = &PriceResult{ + Price: "999", + Decimal: 8, + } + pw1 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "1", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator1": {}}, + } + pw2 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator1": {}}, + } + pw2_2 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big2, + Validators: map[string]struct{}{"validator1": {}, "validator2": {}}, + } + + pw3 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator2": {}}, + } + pw3_2 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big3, + Validators: map[string]struct{}{"validator1": {}, "validator2": {}, "validator3": {}}, + } + + pw4 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator3": {}}, + } + pw5 = &PricePower{ + Price: &PriceInfo{ + Price: "999", + Decimal: 8, + DetID: "2", + Timestamp: timestamp, + }, + Power: big1, + Validators: map[string]struct{}{"validator4": {}}, + } +) diff --git a/x/oracle/keeper/feedermanagement/feedermanager.go b/x/oracle/keeper/feedermanagement/feedermanager.go new file mode 100644 index 000000000..aac015f32 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/feedermanager.go @@ -0,0 +1,834 @@ +package feedermanagement + +import ( + "fmt" + "math/big" + "sort" + "strconv" + "strings" + + sdkerrors "cosmossdk.io/errors" + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +func NewFeederManager(k common.KeeperOracle) *FeederManager { + return &FeederManager{ + k: k, + sortedFeederIDs: make([]int64, 0), + rounds: make(map[int64]*round), + cs: nil, + } +} + +//nolint:revive +func (f *FeederManager) GetCaches() *caches { + return f.cs +} + +func (f *FeederManager) GetParamsFromCache() *oracletypes.Params { + return f.cs.params.params +} + +func (f *FeederManager) GetTokenIDForFeederID(feederID int64) (int64, bool) { + return f.cs.GetTokenIDForFeederID(feederID) +} + +func (f *FeederManager) SetKeeper(k common.KeeperOracle) { + f.k = k +} + +func (f *FeederManager) SetNilCaches() { + f.cs = nil +} + +// BeginBlock initializes the caches and slashing records, and setup the rounds +func (f *FeederManager) BeginBlock(ctx sdk.Context) (recovered bool) { + // if the cache is nil and we are not in recovery mode, init the caches + if f.cs == nil { + recovered = f.recovery(ctx) + // init feederManager if failed to recovery, this should only happened on block_height==1 + if !recovered { + f.initCaches(ctx) + if ctx.BlockHeight() < 2 { + f.SetParamsUpdated() + f.SetValidatorsUpdated() + } + } + f.initBehaviorRecords(ctx, ctx.BlockHeight()) + // in recovery mode, snapshot of feederManager is set in the beginblock instead of in the process of replaying endblockInrecovery + f.updateCheckTx() + } + return +} + +func (f *FeederManager) EndBlock(ctx sdk.Context) { + // update params and validator set if necessary in caches and commit all updated information + addedValidators := f.updateAndCommitCaches(ctx) + + // update Slashing related records (reportInfo, missCountBitArray), handle case for 1. reseetSlashing, 2. new validators added for validatorset change + f.updateBehaviorRecords(ctx, addedValidators) + + // update rounds including create new rounds based on params change, remove expired rounds + // handleQuoteBehavior for ending quotes of rounds + // commit state of mature rounds + f.updateAndCommitRounds(ctx) + + // set status to open for rounds before their quoting window + feederIDs := f.prepareRounds(ctx) + // remove nocnes for closing quoting-window and set nonces for opening quoting-window + f.setupNonces(ctx, feederIDs) + + if f.validatorsUpdated { + f.initBehaviorRecords(ctx, ctx.BlockHeight()+1) + } + + f.ResetFlags() + + f.updateCheckTx() +} + +func (f *FeederManager) EndBlockInRecovery(ctx sdk.Context, params *oracletypes.Params) { + if params != nil { + f.SetParamsUpdated() + _ = f.cs.AddCache(params) + } + f.updateAndCommitRoundsInRecovery(ctx) + f.prepareRounds(ctx) + f.ResetFlags() +} + +func (f *FeederManager) setupNonces(ctx sdk.Context, feederIDs []int64) { + logger := f.k.Logger(ctx) + // remove nonces for closed quoting windows + height := ctx.BlockHeight() + if f.forceSeal { + for _, r := range f.rounds { + // #nosec G115 // feederID is index of slice + f.k.RemoveNonceWithFeederIDForAll(ctx, uint64(r.feederID)) + } + } else { + for _, r := range f.rounds { + if r.IsQuotingWindowEnd(height) { + logger.Debug("clear nonces for closing quoting window", "feederID", r.feederID, "roundID", r.roundID, "basedBlock", r.roundBaseBlock, "height", height) + // #nosec G115 // feederID is index of slice + f.k.RemoveNonceWithFeederIDForAll(ctx, uint64(r.feederID)) + } + } + } + // setup nonces for opening quoting windows + if len(feederIDs) == 0 { + return + } + validators := f.cs.GetValidators() + for _, feederID := range feederIDs { + r := f.rounds[feederID] + logger.Debug("init nonces for new quoting window", "feederID", feederID, "roundID", r.roundID, "basedBlock", r.roundBaseBlock, "height", height) + // #nosec G115 // feederID is index of slice + f.k.AddZeroNonceItemWithFeederIDForValidators(ctx, uint64(feederID), validators) + } +} + +func (f *FeederManager) initBehaviorRecords(ctx sdk.Context, height int64) { + if !f.validatorsUpdated { + return + } + validators := f.cs.GetValidators() + for _, validator := range validators { + // f.k.InitValidatorReportInfo(ctx, validator, ctx.BlockHeight()) + f.k.InitValidatorReportInfo(ctx, validator, height) + } +} + +func (f *FeederManager) updateBehaviorRecords(ctx sdk.Context, addedValidators []string) { + height := ctx.BlockHeight() + if f.resetSlashing { + // reset all validators' reportInfo + f.k.ClearAllValidatorReportInfo(ctx) + f.k.ClearAllValidatorMissedRoundBitArray(ctx) + validators := f.cs.GetValidators() + for _, validator := range validators { + f.k.InitValidatorReportInfo(ctx, validator, height) + } + } else if f.validatorsUpdated { + for _, validator := range addedValidators { + // add possible new added validator info for slashing tracking + f.k.InitValidatorReportInfo(ctx, validator, height) + } + } +} + +func (f *FeederManager) prepareRounds(ctx sdk.Context) []int64 { + logger := f.k.Logger(ctx) + feederIDs := make([]int64, 0) + for _, r := range f.rounds { + if open := r.PrepareForNextBlock(ctx.BlockHeight()); open { + feederIDs = append(feederIDs, r.feederID) + } + } + sort.Slice(feederIDs, func(i, j int) bool { + return feederIDs[i] < feederIDs[j] + }) + height := ctx.BlockHeight() + for _, feederID := range feederIDs { + r := f.rounds[feederID] + logger.Info("[mem] open quoting window for round", "feederID", feederID, "roundID", r.roundID, "basedBlock", r.roundBaseBlock, "height", height) + } + return feederIDs +} + +// 1. update and commit Params if updated +// 2. update and commit validatorPowers if updated +// forceSeal: 1. params has some modifications related to quoting. 2.validatorSet changed +// resetSlashing: params has some modifications related to oracle_slashing +// func (f *FeederManager) updateAndCommitCaches(ctx sdk.Context) (forceSeal, resetSlashing bool, prevValidators, addedValidators []string) { +func (f *FeederManager) updateAndCommitCaches(ctx sdk.Context) (addedValidators []string) { + // update params in caches + if f.paramsUpdated { + paramsOld := &oracletypes.Params{} + f.cs.Read(paramsOld) + params := f.k.GetParams(ctx) + if paramsOld.IsForceSealingUpdate(¶ms) { + f.SetForceSeal() + } + if paramsOld.IsSlashingResetUpdate(¶ms) { + f.SetResetSlasing() + } + _ = f.cs.AddCache(¶ms) + } + + // update validators + validatorUpdates := f.k.GetValidatorUpdates(ctx) + if len(validatorUpdates) > 0 { + f.SetValidatorsUpdated() + f.SetForceSeal() + addedValidators = make([]string, 0) + validatorMap := make(map[string]*big.Int) + for _, vu := range validatorUpdates { + pubKey, _ := cryptocodec.FromTmProtoPublicKey(vu.PubKey) + validatorStr := sdk.ConsAddress(pubKey.Address()).String() + validatorMap[validatorStr] = big.NewInt(vu.Power) + if vu.Power > 0 { + addedValidators = append(addedValidators, validatorStr) + } + } + // update validator set information in cache + _ = f.cs.AddCache(ItemV(validatorMap)) + } + + // commit caches: msgs is exists, params if updated, validatorPowers is updated + _, vUpdated, pUpdated := f.cs.Commit(ctx, false) + if vUpdated || pUpdated { + f.k.Logger(ctx).Info("update caches", "validatorUpdates", vUpdated, "paramsUpdated", pUpdated) + } + return addedValidators +} + +func (f *FeederManager) commitRoundsInRecovery() { + for _, r := range f.rounds { + if r.Committable() { + r.FinalPrice() + r.status = roundStatusClosed + } + // close all quotingWindow to skip current rounds' 'handlQuotingMisBehavior' + if f.forceSeal { + r.closeQuotingWindow() + } + } +} + +func (f *FeederManager) commitRounds(ctx sdk.Context) { + logger := f.k.Logger(ctx) + height := ctx.BlockHeight() + successFeederIDs := make([]string, 0) + for _, feederID := range f.sortedFeederIDs { + r := f.rounds[feederID] + if r.Committable() { + finalPrice, ok := r.FinalPrice() + if !ok { + logger.Info("commit round with price from previous", "feederID", r.feederID, "roundID", r.roundID, "baseBlock", r.roundBaseBlock, "heigth", height) + // #nosec G115 // tokenID is index of slice + f.k.GrowRoundID(ctx, uint64(r.tokenID)) + } else { + if f.cs.IsRuleV1(r.feederID) { + priceCommit := finalPrice.ProtoPriceTimeRound(r.roundID, ctx.BlockTime().Format(oracletypes.TimeLayout)) + logger.Info("commit round with aggregated price", "feederID", r.feederID, "roundID", r.roundID, "baseBlock", r.roundBaseBlock, "price", priceCommit, "heigth", height) + + // #nosec G115 // tokenID is index of slice + f.k.AppendPriceTR(ctx, uint64(r.tokenID), *priceCommit) + + fstr := strconv.FormatInt(feederID, 10) + successFeederIDs = append(successFeederIDs, fstr) // there's no valid price for any round yet + } else { + logger.Error("We currently only support rules under oracle V1: only allow price from source Chainlink", "feederID", r.feederID) + } + } + // keep aggregator for possible 'handlQuotingMisBehavior' at quotingWindowEnd + r.status = roundStatusClosed + } + // close all quotingWindow to skip current rounds' 'handlQuotingMisBehavior' + if f.forceSeal { + r.closeQuotingWindow() + } + } + if len(successFeederIDs) > 0 { + feederIDsStr := strings.Join(successFeederIDs, "_") + ctx.EventManager().EmitEvent(sdk.NewEvent( + oracletypes.EventTypeCreatePrice, + sdk.NewAttribute(oracletypes.AttributeKeyPriceUpdated, oracletypes.AttributeValuePriceUpdatedSuccess), + sdk.NewAttribute(oracletypes.AttributeKeyFeederIDs, feederIDsStr), + )) + } +} + +func (f *FeederManager) handleQuotingMisBehaviorInRecovery(ctx sdk.Context) { + height := ctx.BlockHeight() + logger := f.k.Logger(ctx) + + for _, r := range f.rounds { + if r.IsQuotingWindowEnd(height) && r.a != nil { + validators := f.cs.GetValidators() + for _, validator := range validators { + _, found := f.k.GetValidatorReportInfo(ctx, validator) + if !found { + logger.Error(fmt.Sprintf("Expected report info for validator %s but not found", validator)) + continue + } + _, malicious := r.PerformanceReview(validator) + if malicious { + r.getFinalDetIDForSourceID(oracletypes.SourceChainlinkID) + r.FinalPrice() + } + } + r.closeQuotingWindow() + } + } +} + +func (f *FeederManager) handleQuotingMisBehavior(ctx sdk.Context) { + height := ctx.BlockHeight() + logger := f.k.Logger(ctx) + + for _, r := range f.rounds { + if r.IsQuotingWindowEnd(height) { + if _, found := r.FinalPrice(); !found { + r.closeQuotingWindow() + continue + } + validators := f.cs.GetValidators() + for _, validator := range validators { + reportedInfo, found := f.k.GetValidatorReportInfo(ctx, validator) + if !found { + logger.Error(fmt.Sprintf("Expected report info for validator %s but not found", validator)) + continue + } + miss, malicious := r.PerformanceReview(validator) + if malicious { + detID := r.getFinalDetIDForSourceID(oracletypes.SourceChainlinkID) + finalPrice, _ := r.FinalPrice() + logger.Info( + "confirmed malicious price", + "validator", validator, + "infraction_height", height, + "infraction_time", ctx.BlockTime(), + "feederID", r.feederID, + "detID", detID, + "sourceID", oracletypes.SourceChainlinkID, + "finalPrice", finalPrice, + ) + consAddr, err := sdk.ConsAddressFromBech32(validator) + if err != nil { + panic("invalid consAddr string") + } + + operator := f.k.ValidatorByConsAddr(ctx, consAddr) + if operator != nil && !operator.IsJailed() { + power, _ := f.cs.GetPowerForValidator(validator) + coinsBurned := f.k.SlashWithInfractionReason(ctx, consAddr, height, power.Int64(), f.k.GetSlashFractionMalicious(ctx), stakingtypes.Infraction_INFRACTION_UNSPECIFIED) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + oracletypes.EventTypeOracleSlash, + sdk.NewAttribute(oracletypes.AttributeKeyValidatorKey, validator), + sdk.NewAttribute(oracletypes.AttributeKeyPower, fmt.Sprintf("%d", power)), + sdk.NewAttribute(oracletypes.AttributeKeyReason, oracletypes.AttributeValueMaliciousReportPrice), + sdk.NewAttribute(oracletypes.AttributeKeyJailed, validator), + sdk.NewAttribute(oracletypes.AttributeKeyBurnedCoins, coinsBurned.String()), + ), + ) + f.k.Jail(ctx, consAddr) + jailUntil := ctx.BlockHeader().Time.Add(f.k.GetMaliciousJailDuration(ctx)) + f.k.JailUntil(ctx, consAddr, jailUntil) + + reportedInfo.MissedRoundsCounter = 0 + reportedInfo.IndexOffset = 0 + f.k.ClearValidatorMissedRoundBitArray(ctx, validator) + } + continue + } + reportedRoundsWindow := f.k.GetReportedRoundsWindow(ctx) + // #nosec G115 + index := uint64(reportedInfo.IndexOffset % reportedRoundsWindow) + reportedInfo.IndexOffset++ + // Update reported round bit array & counter + // This counter just tracks the sum of the bit array + // That way we avoid needing to read/write the whole array each time + previous := f.k.GetValidatorMissedRoundBitArray(ctx, validator, index) + switch { + case !previous && miss: + // Array value has changed from not missed to missed, increment counter + f.k.SetValidatorMissedRoundBitArray(ctx, validator, index, true) + reportedInfo.MissedRoundsCounter++ + case previous && !miss: + // Array value has changed from missed to not missed, decrement counter + f.k.SetValidatorMissedRoundBitArray(ctx, validator, index, false) + reportedInfo.MissedRoundsCounter-- + default: + // Array value at this index has not changed, no need to update counter + } + + minReportedPerWindow := f.k.GetMinReportedPerWindow(ctx) + + if miss { + ctx.EventManager().EmitEvent( + sdk.NewEvent( + oracletypes.EventTypeOracleLiveness, + sdk.NewAttribute(oracletypes.AttributeKeyValidatorKey, validator), + sdk.NewAttribute(oracletypes.AttributeKeyMissedRounds, fmt.Sprintf("%d", reportedInfo.MissedRoundsCounter)), + sdk.NewAttribute(oracletypes.AttributeKeyHeight, fmt.Sprintf("%d", height)), + ), + ) + + logger.Info( + "oracle_absent validator", + "height", ctx.BlockHeight(), + "validator", validator, + "missed", reportedInfo.MissedRoundsCounter, + "threshold", minReportedPerWindow, + ) + } + + minHeight := reportedInfo.StartHeight + reportedRoundsWindow + maxMissed := reportedRoundsWindow - minReportedPerWindow + // if we are past the minimum height and the validator has missed too many rounds reporting prices, punish them + if height > minHeight && reportedInfo.MissedRoundsCounter > maxMissed { + consAddr, err := sdk.ConsAddressFromBech32(validator) + if err != nil { + f.k.Logger(ctx).Error("when do orale_performance_review, got invalid consAddr string. This should never happen", "validatorStr", validator) + continue + } + operator := f.k.ValidatorByConsAddr(ctx, consAddr) + if operator != nil && !operator.IsJailed() { + // missing rounds confirmed: just jail the validator + f.k.Jail(ctx, consAddr) + jailUntil := ctx.BlockHeader().Time.Add(f.k.GetMissJailDuration(ctx)) + f.k.JailUntil(ctx, consAddr, jailUntil) + + // We need to reset the counter & array so that the validator won't be immediately slashed for miss report info upon rebonding. + reportedInfo.MissedRoundsCounter = 0 + reportedInfo.IndexOffset = 0 + f.k.ClearValidatorMissedRoundBitArray(ctx, validator) + + logger.Info( + "jailing validator due to oracle_liveness fault", + "height", height, + "validator", consAddr.String(), + "min_height", minHeight, + "threshold", minReportedPerWindow, + "jailed_until", jailUntil, + ) + } else { + // validator was (a) not found or (b) already jailed so we do not slash + logger.Info( + "validator would have been slashed for too many missed reporting price, but was either not found in store or already jailed", + "validator", validator, + ) + } + } + // Set the updated reportInfo + f.k.SetValidatorReportInfo(ctx, validator, reportedInfo) + } + r.closeQuotingWindow() + } + } +} + +func (f *FeederManager) setCommittableState(ctx sdk.Context) { + if f.forceSeal { + for _, r := range f.rounds { + if r.status == roundStatusOpen { + r.status = roundStatusCommittable + } + } + } else { + height := ctx.BlockHeight() + for _, r := range f.rounds { + if r.IsQuotingWindowEnd(height) && r.status == roundStatusOpen { + r.status = roundStatusCommittable + } + } + } +} + +func (f *FeederManager) updateRoundsParamsAndAddNewRounds(ctx sdk.Context) { + height := ctx.BlockHeight() + logger := f.k.Logger(ctx) + + if f.paramsUpdated { + params := &oracletypes.Params{} + f.cs.Read(params) + existsFeederIDs := make(map[int64]struct{}) + for _, r := range f.rounds { + r.UpdateParams(params.TokenFeeders[r.feederID], int64(params.MaxNonce)) + existsFeederIDs[r.feederID] = struct{}{} + } + // add new rounds + for feederID, tokenFeeder := range params.TokenFeeders { + if feederID == 0 { + continue + } + feederID := int64(feederID) + if _, ok := existsFeederIDs[feederID]; !ok && (tokenFeeder.EndBlock == 0 || tokenFeeder.EndBlock > uint64(height)) { + logger.Info("[mem] add new round", "feederID", feederID, "height", height) + f.sortedFeederIDs = append(f.sortedFeederIDs, feederID) + f.rounds[feederID] = newRound(feederID, tokenFeeder, int64(params.MaxNonce), f.cs, NewAggMedian()) + } + } + f.sortedFeederIDs.sort() + } +} + +func (f *FeederManager) removeExpiredRounds(ctx sdk.Context) { + height := ctx.BlockHeight() + expiredFeederIDs := make([]int64, 0) + for _, r := range f.rounds { + if r.endBlock > 0 && r.endBlock <= height { + expiredFeederIDs = append(expiredFeederIDs, r.feederID) + } + } + for _, feederID := range expiredFeederIDs { + if r := f.rounds[feederID]; r.status != roundStatusClosed { + r.closeQuotingWindow() + f.k.RemoveNonceWithFeederIDForAll(ctx, uint64(r.feederID)) + } + delete(f.rounds, feederID) + f.sortedFeederIDs.remove(feederID) + } +} + +func (f *FeederManager) updateAndCommitRoundsInRecovery(ctx sdk.Context) { + f.setCommittableState(ctx) + f.commitRoundsInRecovery() + f.handleQuotingMisBehaviorInRecovery(ctx) + f.updateRoundsParamsAndAddNewRounds(ctx) + f.removeExpiredRounds(ctx) +} + +func (f *FeederManager) updateAndCommitRounds(ctx sdk.Context) { + f.setCommittableState(ctx) + f.commitRounds(ctx) + // behaviors review and close quotingWindow + f.handleQuotingMisBehavior(ctx) + f.updateRoundsParamsAndAddNewRounds(ctx) + f.removeExpiredRounds(ctx) +} + +func (f *FeederManager) ResetFlags() { + f.paramsUpdated = false + f.validatorsUpdated = false + f.forceSeal = false + f.resetSlashing = false +} + +func (f *FeederManager) SetParamsUpdated() { + f.paramsUpdated = true +} + +func (f *FeederManager) SetValidatorsUpdated() { + f.validatorsUpdated = true +} + +func (f *FeederManager) SetResetSlasing() { + f.resetSlashing = true +} + +func (f *FeederManager) SetForceSeal() { + f.forceSeal = true +} + +//nolint:revive +func (f *FeederManager) ValidateMsg(msg *oracletypes.MsgCreatePrice) error { + // TODO: implement me + return nil +} + +func (f *FeederManager) ProcessQuote(ctx sdk.Context, msg *oracletypes.MsgCreatePrice, isCheckTx bool) (*oracletypes.PriceTimeRound, error) { + if isCheckTx { + f = f.getCheckTx() + } + msgItem := getProtoMsgItemFromQuote(msg) + + // #nosec G115 // feederID is index of slice + r, ok := f.rounds[int64(msgItem.FeederID)] + if !ok { + // This should not happened since we do check the nonce in anthHandle + return nil, fmt.Errorf("round not exists for feederID:%d, porposer:%s", msgItem.FeederID, msgItem.Validator) + } + + // #nosec G115 // baseBlock is block height which is not negative + if valid := r.ValidQuotingBaseBlock(int64(msg.BasedBlock)); !valid { + return nil, fmt.Errorf("failed to process price-feed msg for feederID:%d, round is quoting:%t,quotingWindow is open:%t, expected baseBlock:%d, got baseBlock:%d", msgItem.FeederID, r.IsQuoting(), r.IsQuotingWindowOpen(), r.roundBaseBlock, msg.BasedBlock) + } + + // tally msgItem + finalPrice, validMsgItem, err := r.Tally(msgItem) + + // record msgItem in caches if needed + defer func() { + if !isCheckTx && + validMsgItem != nil && + (err == nil || sdkerrors.IsOf(err, oracletypes.ErrQuoteRecorded)) { + _ = f.cs.AddCache(validMsgItem) + } + }() + + if err != nil { + return nil, err + } + + if finalPrice == nil { + return nil, nil + } + return finalPrice.ProtoPriceTimeRound(r.roundID, ctx.BlockTime().Format(oracletypes.TimeLayout)), nil +} + +func (f *FeederManager) getCheckTx() *FeederManager { + fCheckTx := f.fCheckTx + ret := *fCheckTx + ret.fCheckTx = nil + + // rounds + rounds := make(map[int64]*round) + for id, r := range fCheckTx.rounds { + rounds[id] = r.CopyForCheckTx() + } + ret.rounds = rounds + + return &ret +} + +func (f *FeederManager) updateCheckTx() { + // flgas are taken care of + // sortedFeederIDs will not be modified except in abci.EndBlock + // successFeedereIDs will not be modifed except in abci.EndBlock + // caches will not be modifed except in abci.EndBlock, abci.DeliverTx (in abci.Query_simulate, or abci.CheckTx the update in ProcessQuote is forbided) + // shallow copy is good enough for these fields + + ret := *f + ret.fCheckTx = nil + + // rounds + rounds := make(map[int64]*round) + for id, r := range f.rounds { + rounds[id] = r.CopyForCheckTx() + } + ret.rounds = rounds + f.fCheckTx = &ret +} + +func (f *FeederManager) ProcessQuoteInRecovery(msgItems []*oracletypes.MsgItem) { + for _, msgItem := range msgItems { + // #nosec G115 // feederID is index of slice + r, ok := f.rounds[int64(msgItem.FeederID)] + if !ok { + continue + } + // error deos not need to be handled in recovery mode + //nolint:all + r.Tally(msgItem) + } +} + +// initCaches initializes the caches of the FeederManager with keeper, params, validatorPowers +func (f *FeederManager) initCaches(ctx sdk.Context) { + f.cs = newCaches() + params := f.k.GetParams(ctx) + validatorSet := f.k.GetAllExocoreValidators(ctx) + validatorPowers := make(map[string]*big.Int) + for _, v := range validatorSet { + validatorPowers[sdk.ConsAddress(v.Address).String()] = big.NewInt(v.Power) + } + f.cs.Init(f.k, ¶ms, validatorPowers) +} + +func (f *FeederManager) recovery(ctx sdk.Context) bool { + height := ctx.BlockHeight() + recentParamsList, prevRecentParams, latestRecentParams := f.k.GetRecentParamsWithinMaxNonce(ctx) + if latestRecentParams.Block == 0 { + return false + } + validatorUpdateBlock, found := f.k.GetValidatorUpdateBlock(ctx) + if !found { + // on recovery mode, the validator update block must be found, otherwise we just panic to stop the node start + // it's safe to panic since this will only happen when the node is starting with something wrong in the store + panic("validator update block not found in recovery mode for feeder manager") + } + // #nosec G115 // validatorUpdateBlock.Block represents blockheight + startHeight, replayRecentParamsList := getRecoveryStartPoint(height, recentParamsList, &prevRecentParams, &latestRecentParams, int64(validatorUpdateBlock.Block)) + + f.cs = newCaches() + params := replayRecentParamsList[0].Params + replayRecentParamsList = replayRecentParamsList[1:] + + validatorSet := f.k.GetAllExocoreValidators(ctx) + validatorPowers := make(map[string]*big.Int) + for _, v := range validatorSet { + validatorPowers[sdk.ConsAddress(v.Address).String()] = big.NewInt(v.Power) + } + + f.cs.Init(f.k, params, validatorPowers) + + replayHeight := startHeight - 1 + + ctxReplay := ctx.WithBlockHeight(replayHeight) + for tfID, tf := range params.TokenFeeders { + if tfID == 0 { + continue + } + // #nosec G115 // safe conversion + if tf.EndBlock > 0 && int64(tf.EndBlock) <= replayHeight { + continue + } + tfID := int64(tfID) + f.rounds[tfID] = newRound(tfID, tf, int64(params.MaxNonce), f.cs, NewAggMedian()) + f.sortedFeederIDs.add(tfID) + } + f.prepareRounds(ctxReplay) + + params = nil + recentMsgs := f.k.GetAllRecentMsg(ctxReplay) + for ; startHeight < height; startHeight++ { + ctxReplay = ctxReplay.WithBlockHeight(startHeight) + // only execute msgItems corresponding to rounds opened on or after replayHeight, since any rounds opened before replay height must be closed on or before height-1 + // which means no memory state need to be updated for thoes rounds + // and we don't need to take care of 'close quoting-window' since the size of replay window t most equals to maxNonce + // #nosec G115 // block is not negative + if len(recentMsgs) > 0 && int64(recentMsgs[0].Block) <= startHeight { + i := 0 + for idx, recentMsg := range recentMsgs { + // #nosec G115 // block height is defined as int64 in cosmossdk + if int64(recentMsg.Block) > startHeight { + break + } + i = idx + if int64(recentMsg.Block) == startHeight { + f.ProcessQuoteInRecovery(recentMsg.Msgs) + break + } + } + recentMsgs = recentMsgs[i+1:] + } + // #nosec G115 + if len(replayRecentParamsList) > 0 && int64(replayRecentParamsList[0].Block) == startHeight { + params = replayRecentParamsList[0].Params + replayRecentParamsList = replayRecentParamsList[1:] + } + f.EndBlockInRecovery(ctxReplay, params) + } + + f.cs.SkipCommit() + + return true +} + +func (f *FeederManager) Equals(fm *FeederManager) bool { + if f == nil && fm == nil { + return true + } + if f == nil || fm == nil { + return false + } + if f.fCheckTx == nil && fm.fCheckTx != nil { + return false + } + if f.fCheckTx != nil && fm.fCheckTx == nil { + return false + } + if !f.fCheckTx.Equals(fm.fCheckTx) { + return false + } + if f.paramsUpdated != fm.paramsUpdated || + f.validatorsUpdated != fm.validatorsUpdated || + f.resetSlashing != fm.resetSlashing || + f.forceSeal != fm.forceSeal { + return false + } + if !f.sortedFeederIDs.Equals(fm.sortedFeederIDs) { + return false + } + if !f.cs.Equals(fm.cs) { + return false + } + if len(f.rounds) != len(fm.rounds) { + return false + } + for id, r := range f.rounds { + if r2, ok := fm.rounds[id]; !ok { + return false + } else if !r.Equals(r2) { + return false + } + } + return true +} + +// recoveryStartPoint returns the height to start the recovery process +func getRecoveryStartPoint(currentHeight int64, recentParamsList []*oracletypes.RecentParams, prevRecentParams, latestRecentParams *oracletypes.RecentParams, validatorUpdateHeight int64) (height int64, replayRecentParamsList []*oracletypes.RecentParams) { + if currentHeight > int64(latestRecentParams.Params.MaxNonce) { + height = currentHeight - int64(latestRecentParams.Params.MaxNonce) + } + // there is no params updated in the recentParamsList, we can start from the validator update block if it's not too old(out of the distance of maxNonce from current height) + if len(recentParamsList) == 0 { + if height < validatorUpdateHeight { + height = validatorUpdateHeight + } + // for empty recetParamsList, use latestrecentParams as the start point + replayRecentParamsList = append(replayRecentParamsList, latestRecentParams) + height++ + return height, replayRecentParamsList + } + + if prevRecentParams.Block > 0 && prevRecentParams.Params.IsForceSealingUpdate(recentParamsList[0].Params) { + // #nosec G115 + height = int64(recentParamsList[0].Block) + } + idx := 0 + for i := 1; i < len(recentParamsList); i++ { + if recentParamsList[i-1].Params.IsForceSealingUpdate(recentParamsList[i].Params) { + // #nosec G115 + height = int64(recentParamsList[i].Block) + idx = i + } + } + replayRecentParamsList = recentParamsList[idx:] + + if height < validatorUpdateHeight { + height = validatorUpdateHeight + } + height++ + return height, replayRecentParamsList +} + +func getProtoMsgItemFromQuote(msg *oracletypes.MsgCreatePrice) *oracletypes.MsgItem { + // address has been valid before + validator, _ := oracletypes.ConsAddrStrFromCreator(msg.Creator) + + return &oracletypes.MsgItem{ + FeederID: msg.FeederID, + // validator's consAddr + Validator: validator, + PSources: msg.Prices, + } +} diff --git a/x/oracle/keeper/feedermanagement/feedermanager_test.go b/x/oracle/keeper/feedermanagement/feedermanager_test.go new file mode 100644 index 000000000..c6e9c1376 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/feedermanager_test.go @@ -0,0 +1,110 @@ +package feedermanagement + +import ( + "math/big" + "testing" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + gomock "go.uber.org/mock/gomock" + + . "github.com/smartystreets/goconvey/convey" +) + +//go:generate mockgen -destination mock_cachereader_test.go -package feedermanagement github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement CacheReader + +func TestFeederManagement(t *testing.T) { + Convey("compare FeederManager", t, func() { + fm := NewFeederManager(nil) + ctrl := gomock.NewController(t) + c := NewMockCacheReader(ctrl) + c.EXPECT(). + GetThreshold(). + Return(&threshold{big.NewInt(4), big.NewInt(1), big.NewInt(3)}). + AnyTimes() + Convey("add a new round", func() { + ps1 := priceSource{deterministic: true, prices: []*PriceInfo{{Price: "123"}}} + ps2 := ps1 + fm2 := *fm + + fm.rounds[1] = newRound(1, oracletypes.DefaultParams().TokenFeeders[1], 3, c, defaultAggMedian) + fm.rounds[1].PrepareForNextBlock(20) + fm.sortedFeederIDs.add(1) + fm.rounds[1].a.ds.AddPriceSource(&ps1, big.NewInt(1), "v1") + + fm2.rounds = make(map[int64]*round) + fm2.sortedFeederIDs = make([]int64, 0) + fm2.rounds[1] = newRound(1, oracletypes.DefaultParams().TokenFeeders[1], 3, c, defaultAggMedian) + fm2.rounds[1].PrepareForNextBlock(20) + fm2.sortedFeederIDs.add(1) + fm2.rounds[1].a.ds.AddPriceSource(&ps2, big.NewInt(1), "v1") + + So(fm.Equals(&fm2), ShouldBeTrue) + }) + }) + Convey("check copy results", t, func() { + ctrl := gomock.NewController(t) + c := NewMockCacheReader(ctrl) + c.EXPECT(). + GetThreshold(). + Return(&threshold{big.NewInt(4), big.NewInt(1), big.NewInt(3)}). + AnyTimes() + + // feedermanager + Convey("copy of feedermanager", func() { + f := tData.NewFeederManager(c) + f.updateCheckTx() + fc := f.fCheckTx + f.fCheckTx = nil + So(f.Equals(fc), ShouldBeTrue) + // So(reflect.DeepEqual(a, ac), ShouldBeTrue) + }) + Convey("copy of round", func() { + r := tData.NewRound(nil) + rc := r.CopyForCheckTx() + So(r.Equals(rc), ShouldBeTrue) + // So(reflect.DeepEqual(r, rc), ShouldBeTrue) + }) + Convey("copy of aggregagtor", func() { + a := tData.NewAggregator(true) + ac := a.CopyForCheckTx() + So(a.Equals(ac), ShouldBeTrue) + // So(reflect.DeepEqual(a, ac), ShouldBeTrue) + }) + Convey("copy of recordsValidators", func() { + v := tData.NewRecordsValidators(true) + vc := v.Cpy() + So(v.Equals(vc), ShouldBeTrue) + // So(reflect.DeepEqual(v, vc), ShouldBeTrue) + }) + Convey("copy of recordsDSs", func() { + dss := tData.NewRecordsDSs(true) + dssc := dss.Cpy() + So(dss.Equals(dssc), ShouldBeTrue) + // So(reflect.DeepEqual(dss, dssc), ShouldBeTrue) + }) + Convey("copy of recordsDS", func() { + ds := tData.NewRecordsDS(true) + dsc := ds.Cpy() + // So(reflect.DeepEqual(ds, dsc), ShouldBeTrue) + So(ds.Equals(dsc), ShouldBeTrue) + }) + Convey("copy of priceValidator", func() { + pv := tData.NewPriceValidator(true) + pvc := pv.Cpy() + So(pv.Equals(pvc), ShouldBeTrue) + // So(reflect.DeepEqual(pv, pvc), ShouldBeTrue) + }) + Convey("copy of priceSource", func() { + ps := tData.NewPriceSource(true, true) + psc := ps.Cpy() + So(ps.Equals(psc), ShouldBeTrue) + // So(reflect.DeepEqual(ps, psc), ShouldBeTrue) + }) + Convey("copy of pricePower", func() { + pw := tData.NewPricePower() + pwc := pw.Cpy() + So(pw.Equals(pwc), ShouldBeTrue) + // So(reflect.DeepEqual(pw, pwc), ShouldBeTrue) + }) + }) +} diff --git a/x/oracle/keeper/feedermanagement/helper_test.go b/x/oracle/keeper/feedermanagement/helper_test.go new file mode 100644 index 000000000..50bedf209 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/helper_test.go @@ -0,0 +1,111 @@ +package feedermanagement + +import ( + "math/big" + "math/rand" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" +) + +type Test struct { +} + +var ( + tData *Test + params = oracletypes.DefaultParams() + r = rand.New(rand.NewSource(1)) + timestamp = "2025-01-01 00:01:02" + decimal = int32(8) + big1 = big.NewInt(1) + big2 = big.NewInt(2) + big3 = big.NewInt(3) + big4 = big.NewInt(4) + th = &threshold{big4, big2, big3} +) + +func (t *Test) NewFeederManager(cs CacheReader) *FeederManager { + f := NewFeederManager(nil) + round := t.NewRound(cs) + f.rounds[round.feederID] = round + // prepare this Round + round.PrepareForNextBlock(int64(params.TokenFeeders[int(round.feederID)].StartBaseBlock)) + return f +} + +func (t *Test) NewPricePower() *PricePower { + return &PricePower{ + Price: t.NewPriceInfo("999", "1"), + Power: big1, + Validators: map[string]struct{}{"validator1": {}}, + } +} + +func (t *Test) NewPriceSource(deterministic bool, filled bool) *priceSource { + ret := newPriceSource(oracletypes.SourceChainlinkID, deterministic) + if filled { + price := t.NewPriceInfo("999", "1") + ret.prices = append(ret.prices, price) + } + return ret +} + +func (t *Test) NewPriceValidator(filled bool) *priceValidator { + ret := newPriceValidator("validator1", big1) + if filled { + ps := t.NewPriceSource(true, true) + ret.priceSources[oracletypes.SourceChainlinkID] = ps + } + return ret +} + +func (t *Test) NewRecordsDS(filled bool) *recordsDS { + ret := newRecordsDS() + if filled { + ret.validators["validtors"] = struct{}{} + ret.accumulatedPowers = big1 + ret.records = append(ret.records, t.NewPricePower()) + } + return ret +} + +func (t *Test) NewRecordsDSs(filled bool) *recordsDSs { + ret := newRecordsDSs(th) + if filled { + rds := t.NewRecordsDS(filled) + ret.dsMap[oracletypes.SourceChainlinkID] = rds + } + return nil +} + +func (t *Test) NewRecordsValidators(filled bool) *recordsValidators { + ret := newRecordsValidators() + if filled { + ret.accumulatedPower = big1 + ret.records["validtor1"] = t.NewPriceValidator(filled) + } + return nil +} + +func (t *Test) NewAggregator(filled bool) *aggregator { + ret := newAggregator(th, defaultAggMedian) + if filled { + ret.v = t.NewRecordsValidators(filled) + ret.ds = t.NewRecordsDSs(filled) + } + return ret +} + +func (t *Test) NewRound(c CacheReader) *round { + feederID := r.Intn(len(params.TokenFeeders)-1) + 1 + round := newRound(int64(feederID), params.TokenFeeders[feederID], int64(params.MaxNonce), c, defaultAggMedian) + return round +} + +func (f *Test) NewPriceInfo(price string, detID string) *PriceInfo { + return &PriceInfo{ + Price: price, + Decimal: decimal, + DetID: detID, + Timestamp: timestamp, + } +} diff --git a/x/oracle/keeper/feedermanagement/mock_cachereader_test.go b/x/oracle/keeper/feedermanagement/mock_cachereader_test.go new file mode 100644 index 000000000..dd5afb4f3 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/mock_cachereader_test.go @@ -0,0 +1,126 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement (interfaces: CacheReader) +// +// Generated by this command: +// +// mockgen -destination mock_cachereader_test.go -package feedermanagement github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement CacheReader +// + +// Package feedermanagement is a generated GoMock package. +package feedermanagement + +import ( + big "math/big" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockCacheReader is a mock of CacheReader interface. +type MockCacheReader struct { + ctrl *gomock.Controller + recorder *MockCacheReaderMockRecorder + isgomock struct{} +} + +// MockCacheReaderMockRecorder is the mock recorder for MockCacheReader. +type MockCacheReaderMockRecorder struct { + mock *MockCacheReader +} + +// NewMockCacheReader creates a new mock instance. +func NewMockCacheReader(ctrl *gomock.Controller) *MockCacheReader { + mock := &MockCacheReader{ctrl: ctrl} + mock.recorder = &MockCacheReaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCacheReader) EXPECT() *MockCacheReaderMockRecorder { + return m.recorder +} + +// GetPowerForValidator mocks base method. +func (m *MockCacheReader) GetPowerForValidator(validator string) (*big.Int, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPowerForValidator", validator) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetPowerForValidator indicates an expected call of GetPowerForValidator. +func (mr *MockCacheReaderMockRecorder) GetPowerForValidator(validator any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPowerForValidator", reflect.TypeOf((*MockCacheReader)(nil).GetPowerForValidator), validator) +} + +// GetThreshold mocks base method. +func (m *MockCacheReader) GetThreshold() *threshold { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetThreshold") + ret0, _ := ret[0].(*threshold) + return ret0 +} + +// GetThreshold indicates an expected call of GetThreshold. +func (mr *MockCacheReaderMockRecorder) GetThreshold() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetThreshold", reflect.TypeOf((*MockCacheReader)(nil).GetThreshold)) +} + +// GetTotalPower mocks base method. +func (m *MockCacheReader) GetTotalPower() *big.Int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTotalPower") + ret0, _ := ret[0].(*big.Int) + return ret0 +} + +// GetTotalPower indicates an expected call of GetTotalPower. +func (mr *MockCacheReaderMockRecorder) GetTotalPower() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTotalPower", reflect.TypeOf((*MockCacheReader)(nil).GetTotalPower)) +} + +// GetValidators mocks base method. +func (m *MockCacheReader) GetValidators() []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValidators") + ret0, _ := ret[0].([]string) + return ret0 +} + +// GetValidators indicates an expected call of GetValidators. +func (mr *MockCacheReaderMockRecorder) GetValidators() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidators", reflect.TypeOf((*MockCacheReader)(nil).GetValidators)) +} + +// IsDeterministic mocks base method. +func (m *MockCacheReader) IsDeterministic(sournceID int64) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsDeterministic", sournceID) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsDeterministic indicates an expected call of IsDeterministic. +func (mr *MockCacheReaderMockRecorder) IsDeterministic(sournceID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDeterministic", reflect.TypeOf((*MockCacheReader)(nil).IsDeterministic), sournceID) +} + +// IsRuleV1 mocks base method. +func (m *MockCacheReader) IsRuleV1(feederID int64) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsRuleV1", feederID) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsRuleV1 indicates an expected call of IsRuleV1. +func (mr *MockCacheReaderMockRecorder) IsRuleV1(feederID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsRuleV1", reflect.TypeOf((*MockCacheReader)(nil).IsRuleV1), feederID) +} diff --git a/x/oracle/keeper/feedermanagement/prices.go b/x/oracle/keeper/feedermanagement/prices.go new file mode 100644 index 000000000..22fb22491 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/prices.go @@ -0,0 +1,348 @@ +package feedermanagement + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "slices" + "sort" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" +) + +func GetPriceInfoFromProtoPriceTimeDetID(p *oracletypes.PriceTimeDetID) *PriceInfo { + return (*PriceInfo)(p) +} + +func (p *PriceInfo) ProtoPriceTimeDetID() *oracletypes.PriceTimeDetID { + return (*oracletypes.PriceTimeDetID)(p) +} + +func (p *PriceInfo) EqualDS(pi *PriceInfo) bool { + return p.Price == pi.Price && p.DetID == pi.DetID && p.Decimal == pi.Decimal +} + +func (p *PriceInfo) PriceResult() *PriceResult { + return (*PriceResult)(p) +} + +func (p *PriceResult) PriceInfo() *PriceInfo { + return (*PriceInfo)(p) +} + +func (p *PriceResult) ProtoPriceTimeRound(roundID int64, timestamp string) *oracletypes.PriceTimeRound { + return &oracletypes.PriceTimeRound{ + Price: p.Price, + Decimal: p.Decimal, + Timestamp: timestamp, + RoundID: uint64(roundID), + } +} + +func getPriceSourceFromProto(ps *oracletypes.PriceSource, checker sourceChecker) *priceSource { + prices := make([]*PriceInfo, 0, len(ps.Prices)) + for _, p := range ps.Prices { + prices = append(prices, GetPriceInfoFromProtoPriceTimeDetID(p)) + } + return &priceSource{ + // #nosec G115 + deterministic: checker.IsDeterministic(int64(ps.SourceID)), + // #nosec G115 + sourceID: int64(ps.SourceID), + prices: prices, + } +} + +func newPriceValidator(validator string, power *big.Int) *priceValidator { + return &priceValidator{ + finalPrice: nil, + validator: validator, + power: new(big.Int).Set(power), + priceSources: make(map[int64]*priceSource), + } +} + +func (pv *priceValidator) Cpy() *priceValidator { + if pv == nil { + return nil + } + var finalPrice *PriceResult + if pv.finalPrice != nil { + tmp := *pv.finalPrice + finalPrice = &tmp + } + priceSources := make(map[int64]*priceSource) + for id, ps := range pv.priceSources { + priceSources[id] = ps.Cpy() + } + return &priceValidator{ + finalPrice: finalPrice, + validator: pv.validator, + power: new(big.Int).Set(pv.power), + priceSources: priceSources, + } +} + +func (pv *priceValidator) Equals(pv2 *priceValidator) bool { + if pv == nil && pv2 == nil { + return true + } + if pv == nil || pv2 == nil { + return false + } + if pv.validator != pv2.validator || pv.power.Cmp(pv2.power) != 0 { + return false + } + if len(pv.priceSources) != len(pv2.priceSources) { + return false + } + for id, ps := range pv.priceSources { + ps2, ok := pv2.priceSources[id] + if !ok || !ps.Equals(ps2) { + return false + } + } + return true +} + +func (pv *priceValidator) GetPSCopy(sourceID int64, deterministic bool) *priceSource { + if ps, ok := pv.priceSources[sourceID]; ok { + return ps.Cpy() + } + return newPriceSource(sourceID, deterministic) +} + +func (pv *priceValidator) TryAddPriceSources(pSs []*priceSource) (updated map[int64]*priceSource, added []*priceSource, err error) { + var es errorStr + updated = make(map[int64]*priceSource) + for _, psNew := range pSs { + ps, ok := updated[psNew.sourceID] + if !ok { + ps, ok = pv.priceSources[psNew.sourceID] + if !ok { + ps = newPriceSource(psNew.sourceID, psNew.deterministic) + } else { + ps = ps.Cpy() + } + } + psAdded, err := ps.Add(psNew) + if err != nil { + es.add(fmt.Sprintf("sourceID:%d, error:%s", psNew.sourceID, err.Error())) + } else { + updated[psNew.sourceID] = ps + added = append(added, psAdded) + } + } + if len(updated) > 0 { + return updated, added, nil + } + return nil, nil, fmt.Errorf("failed to add priceSource listi, error:%s", es) +} + +func (pv *priceValidator) ApplyAddedPriceSources(psMap map[int64]*priceSource) { + for id, ps := range psMap { + pv.priceSources[id] = ps + } +} + +// TODO: V2: check valdiator has provided all sources required by rules(defined in oracle.params) +func (pv *priceValidator) GetFinalPrice(algo AggAlgorithm) (*PriceResult, bool) { + if pv.finalPrice != nil { + return pv.finalPrice, true + } + if len(pv.priceSources) == 0 { + return nil, false + } + keySlice := make([]int64, 0, len(pv.priceSources)) + for sourceID := range pv.priceSources { + keySlice = append(keySlice, sourceID) + } + slices.Sort(keySlice) + // safe to call multiple times + algo.Reset() + for _, sourceID := range keySlice { + price := pv.priceSources[sourceID] + if price.finalPrice == nil { + algo.Reset() + return nil, false + } + if !algo.Add(price.finalPrice) { + algo.Reset() + return nil, false + } + } + if ret := algo.GetResult(); ret != nil { + pv.finalPrice = ret + return ret, true + } + return nil, false +} + +func (pv *priceValidator) UpdateFinalPriceForDS(sourceID int64, finalPrice *PriceResult) bool { + if finalPrice == nil { + return false + } + if price, ok := pv.priceSources[sourceID]; ok { + price.finalPrice = finalPrice + return true + } + return false +} + +func newPriceSource(sourceID int64, deterministic bool) *priceSource { + return &priceSource{ + deterministic: deterministic, + finalPrice: nil, + sourceID: sourceID, + detIDs: make(map[string]struct{}), + prices: make([]*PriceInfo, 0), + } +} + +func (ps *priceSource) Equals(ps2 *priceSource) bool { + if ps == nil && ps2 == nil { + return true + } + if ps == nil || ps2 == nil { + return false + } + if ps.sourceID != ps2.sourceID || ps.deterministic != ps2.deterministic { + return false + } + if !reflect.DeepEqual(ps.detIDs, ps2.detIDs) { + return false + } + if !reflect.DeepEqual(ps.finalPrice, ps2.finalPrice) { + return false + } + if len(ps.prices) != len(ps2.prices) { + return false + } + if !reflect.DeepEqual(ps.prices, ps2.prices) { + return false + } + return true +} + +func (ps *priceSource) Cpy() *priceSource { + if ps == nil { + return nil + } + var finalPrice *PriceResult + if ps.finalPrice != nil { + tmp := *ps.finalPrice + finalPrice = &tmp + } + // deterministic, sourceID + detIDs := make(map[string]struct{}) + for detID := range ps.detIDs { + detIDs[detID] = struct{}{} + } + prices := make([]*PriceInfo, 0, len(ps.prices)) + for _, p := range ps.prices { + pCpy := *p + prices = append(prices, &pCpy) + } + return &priceSource{ + deterministic: ps.deterministic, + finalPrice: finalPrice, + sourceID: ps.sourceID, + detIDs: detIDs, + prices: prices, + } +} + +// Add adds prices of a source from priceSource +// we don't verify the input is DS or NS, it's just handled under the rule restrict by p.deterministic +func (ps *priceSource) Add(psNew *priceSource) (*priceSource, error) { + if ps.sourceID != psNew.sourceID { + return nil, fmt.Errorf("failed to add priceSource, sourceID mismatch, expected:%d, got:%d", ps.sourceID, psNew.sourceID) + } + + if !ps.deterministic { + // this is not ds, then just set the final price or overwrite if the input has a later timestamp + if ps.finalPrice == nil { + ps.finalPrice = psNew.prices[0].PriceResult() + ps.prices = append(ps.prices, psNew.prices[0]) + psNew.prices = psNew.prices[:1] + return psNew, nil + } + // equivalent to After, just overwrite the old value + if psNew.prices[0].Timestamp > ps.finalPrice.Timestamp { + ps.finalPrice = psNew.prices[0].PriceResult() + ps.prices = append(ps.prices, psNew.prices[0]) + psNew.prices = psNew.prices[:1] + return ps, nil + } + return nil, errors.New("failed to add ProtoPriceSource for NS, timestamp is old") + } + + var es errorStr + added := false + ret := &priceSource{ + deterministic: ps.deterministic, + sourceID: ps.sourceID, + prices: make([]*PriceInfo, 0), + } + for _, pNew := range psNew.prices { + if _, ok := ps.detIDs[pNew.DetID]; ok { + es.add(fmt.Sprintf("duplicated DetID:%s", pNew.DetID)) + continue + } + added = true + ps.detIDs[pNew.DetID] = struct{}{} + ps.prices = append(ps.prices, pNew) + ret.prices = append(ret.prices, pNew) + } + + if !added { + return nil, fmt.Errorf("failed to add ProtoPriceSource, sourceID:%d, errors:%s", ps.sourceID, es) + } + + sort.Slice(ps.prices, func(i, j int) bool { + return ps.prices[i].DetID < ps.prices[j].DetID + }) + return ret, nil +} + +func (p *PricePower) Equals(p2 *PricePower) bool { + if p == nil && p2 == nil { + return true + } + if p == nil || p2 == nil { + return false + } + if !reflect.DeepEqual(p.Price, p2.Price) || p.Power.Cmp(p2.Power) != 0 { + return false + } + if len(p.Validators) != len(p2.Validators) { + return false + } + for v := range p.Validators { + if _, ok := p2.Validators[v]; !ok { + return false + } + } + return true +} + +func (p *PricePower) Cpy() *PricePower { + price := *p.Price + validators := make(map[string]struct{}) + for v := range p.Validators { + validators[v] = struct{}{} + } + return &PricePower{ + Price: &price, + Power: new(big.Int).Set(p.Power), + Validators: validators, + } +} + +type errorStr string + +func (e *errorStr) add(s string) { + es := string(*e) + *e = errorStr(fmt.Sprintf("%s[%s]", es, s)) +} diff --git a/x/oracle/keeper/feedermanagement/round.go b/x/oracle/keeper/feedermanagement/round.go new file mode 100644 index 000000000..85d41b533 --- /dev/null +++ b/x/oracle/keeper/feedermanagement/round.go @@ -0,0 +1,259 @@ +package feedermanagement + +import ( + "fmt" + + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" +) + +func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64, cache CacheReader, algo AggAlgorithm) *round { + return &round{ + // #nosec G115 + startBaseBlock: int64(tokenFeeder.StartBaseBlock), + // #nosec G115 + startRoundID: int64(tokenFeeder.StartRoundID), + // #nosec G115 + endBlock: int64(tokenFeeder.EndBlock), + // #nosec G115 + interval: int64(tokenFeeder.Interval), + quoteWindowSize: quoteWindowSize, + feederID: feederID, + // #nosec G115 + tokenID: int64(tokenFeeder.TokenID), + cache: cache, + + // default value + status: roundStatusClosed, + a: nil, + roundBaseBlock: 0, + roundID: 0, + algo: algo, + } +} + +func (r *round) Equals(r2 *round) bool { + if r == nil && r2 == nil { + return true + } + if r == nil || r2 == nil { + return false + } + if r.startBaseBlock != r2.startBaseBlock || + r.startRoundID != r2.startRoundID || + r.endBlock != r2.endBlock || + r.interval != r2.interval || + r.quoteWindowSize != r2.quoteWindowSize || + r.feederID != r2.feederID || + r.tokenID != r2.tokenID || + r.roundBaseBlock != r2.roundBaseBlock || + r.roundID != r2.roundID || + r.status != r2.status { + return false + } + if !r.a.Equals(r2.a) { + return false + } + + return true +} + +func (r *round) CopyForCheckTx() *round { + // flags has been taken care of + ret := *r + // cache does not need to be copied since it's a readonly interface, + // and there's no race condition since abci requests are not executing concurrntly + ret.a = ret.a.CopyForCheckTx() + return &ret +} + +func (r *round) getMsgItemFromProto(msg *oracletypes.MsgItem) (*MsgItem, error) { + power, found := r.cache.GetPowerForValidator(msg.Validator) + if !found { + return nil, fmt.Errorf("failed to get power for validator:%s", msg.Validator) + } + priceSources := make([]*priceSource, 0, len(msg.PSources)) + for _, ps := range msg.PSources { + priceSources = append(priceSources, getPriceSourceFromProto(ps, r.cache)) + } + return &MsgItem{ + // #nosec G115 + FeederID: int64(msg.FeederID), + Validator: msg.Validator, + Power: power, + PriceSources: priceSources, + }, nil +} + +func (r *round) ValidQuotingBaseBlock(height int64) bool { + return r.IsQuotingWindowOpen() && r.roundBaseBlock == height +} + +// Tally process information to get the final price +// it does not verify if the msg is for the corresponding round(roundid/roundBaseBlock) +// TODO: use valid value instead of the original protoMsg in return +func (r *round) Tally(protoMsg *oracletypes.MsgItem) (*PriceResult, *oracletypes.MsgItem, error) { + if !r.IsQuotingWindowOpen() { + return nil, nil, fmt.Errorf("quoting window is not open, feederID:%d", r.feederID) + } + + msg, err := r.getMsgItemFromProto(protoMsg) + if err != nil { + return nil, nil, fmt.Errorf("failed to get msgItem from proto, error:%w", err) + } + if !r.IsQuoting() { + // record msg for 'handlQuotingMisBehavior' + err := r.a.RecordMsg(msg) + if err == nil { + return nil, protoMsg, oracletypes.ErrQuoteRecorded + } + return nil, nil, fmt.Errorf("failed to record quote for aggregated round, error:%w", err) + } + + err = r.a.AddMsg(msg) + if err != nil { + return nil, nil, fmt.Errorf("failed to add quote for aggregation of feederID:%d, roundID:%d, error:%w", r.feederID, r.roundID, err) + } + + finalPrice, ok := r.FinalPrice() + if ok { + r.status = roundStatusCommittable + // NOTE: for V1, we need return the DetID as well since chainlink is the only source + if r.cache.IsRuleV1(r.feederID) { + detID := r.getFinalDetIDForSourceID(oracletypes.SourceChainlinkID) + finalPrice.DetID = detID + } + return finalPrice, protoMsg, nil + } + + return nil, protoMsg, nil +} + +func (r *round) UpdateParams(tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64) { + // #nosec G115 + r.startBaseBlock = int64(tokenFeeder.StartBaseBlock) + // #nosec G115 + r.endBlock = int64(tokenFeeder.EndBlock) + // #nosec G115 + r.interval = int64(tokenFeeder.Interval) + r.quoteWindowSize = quoteWindowSize +} + +// PrepareForNextBlock sets status to Open and create a new aggregator on the block before the first block of quoting +func (r *round) PrepareForNextBlock(currentHeight int64) (open bool) { + if currentHeight < r.roundBaseBlock && r.IsQuoting() { + r.closeQuotingWindow() + return open + } + // currentHeight euqls to baseBlock + if currentHeight == r.roundBaseBlock && !r.IsQuoting() { + r.openQuotingWindow() + open = true + return open + } + baseBlock, roundID, delta, expired := r.getPosition(currentHeight) + + if expired && r.IsQuoting() { + r.closeQuotingWindow() + return open + } + // open a new round + if baseBlock > r.roundBaseBlock { + // move to next round + r.roundBaseBlock = baseBlock + r.roundID = roundID + // the first block in the quoting window + if delta == 0 && !r.IsQuoting() { + r.openQuotingWindow() + open = true + } + } + return open +} + +func (r *round) openQuotingWindow() { + r.status = roundStatusOpen + r.algo.Reset() + r.a = newAggregator(r.cache.GetThreshold(), r.algo) +} + +// IsQuotingWindowOpen returns if the round is inside its current quoting window including status of {open, committable, close} +func (r *round) IsQuotingWindowOpen() bool { + // aggregator is set when quoting window open and removed when the window reaches the end or be force sealed + return r.a != nil +} + +func (r *round) IsQuotingWindowEnd(currentHeight int64) bool { + _, _, delta, _ := r.getPosition(currentHeight) + return delta == r.quoteWindowSize +} + +func (r *round) IsQuoting() bool { + return r.status == roundStatusOpen +} + +func (r *round) FinalPrice() (*PriceResult, bool) { + if r.a == nil { + return nil, false + } + return r.a.GetFinalPrice() +} + +// Close sets round status to roundStatusClosed and remove current aggregator +func (r *round) closeQuotingWindow() { + r.status = roundStatusClosed + r.a = nil +} + +func (r *round) PerformanceReview(validator string) (miss, malicious bool) { + finalPrice, ok := r.FinalPrice() + if !ok { + return + } + if !r.cache.IsRuleV1(r.feederID) { + // only rulev1 is supported for now + return + } + miss = true + detID := r.getFinalDetIDForSourceID(oracletypes.SourceChainlinkID) + price := finalPrice.PriceInfo() + price.DetID = detID + prices, ok := r.a.v.GetValidatorQuotePricesForSourceID(validator, oracletypes.SourceChainlinkID) + if !ok { + return + } + for _, p := range prices { + if p.EqualDS(price) { + miss = false + } else if p.DetID == price.DetID { + miss = false + malicious = true + } + } + return +} + +//nolint:unparam +func (r *round) getFinalDetIDForSourceID(sourceID int64) string { + return r.a.ds.GetFinalDetIDForSourceID(sourceID) +} + +func (r *round) Committable() bool { + return r.status == roundStatusCommittable +} + +func (r *round) getPosition(currentHeight int64) (baseBlock, roundID, delta int64, expired bool) { + // endBlock is included + if r.endBlock > 0 && currentHeight > r.endBlock { + expired = true + return + } + if currentHeight < r.startBaseBlock { + return + } + delta = currentHeight - r.startBaseBlock + rounds := delta / r.interval + roundID = r.startRoundID + rounds + delta -= rounds * r.interval + baseBlock = currentHeight - delta + return +} diff --git a/x/oracle/keeper/feedermanagement/types.go b/x/oracle/keeper/feedermanagement/types.go new file mode 100644 index 000000000..adfa9196f --- /dev/null +++ b/x/oracle/keeper/feedermanagement/types.go @@ -0,0 +1,225 @@ +package feedermanagement + +import ( + "math/big" + "sort" + + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" + oracletypes "github.com/ExocoreNetwork/exocore/x/oracle/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type Submitter interface { + SetValidatorUpdateForCache(sdk.Context, oracletypes.ValidatorUpdateBlock) + SetParamsForCache(sdk.Context, oracletypes.RecentParams) + SetMsgItemsForCache(sdk.Context, oracletypes.RecentMsg) +} + +type CacheReader interface { + GetPowerForValidator(validator string) (*big.Int, bool) + GetTotalPower() (totalPower *big.Int) + GetValidators() []string + IsRuleV1(feederID int64) bool + IsDeterministic(sournceID int64) bool + GetThreshold() *threshold +} + +// used to track validator change +type cacheValidator struct { + validators map[string]*big.Int + update bool +} + +// used to track params change +type cacheParams struct { + params *oracletypes.Params + update bool +} + +type cacheMsgs []*oracletypes.MsgItem + +type caches struct { + k Submitter + + msg *cacheMsgs + validators *cacheValidator + params *cacheParams +} + +type MsgItem struct { + FeederID int64 + Validator string + Power *big.Int + PriceSources []*priceSource +} + +type PriceInfo oracletypes.PriceTimeDetID + +type PricePower struct { + Price *PriceInfo + Power *big.Int + Validators map[string]struct{} +} + +// type PriceResult oracletypes.PriceTimeRound +type PriceResult PriceInfo + +type priceSource struct { + deterministic bool + finalPrice *PriceResult + sourceID int64 + detIDs map[string]struct{} + // ordered by detID + prices []*PriceInfo +} + +type priceValidator struct { + finalPrice *PriceResult + validator string + power *big.Int + // each source will get a single final price independently, the order of sources does not matter, map is safe + priceSources map[int64]*priceSource +} + +type recordsValidators struct { + finalPrice *PriceResult + finalPrices map[string]*PriceResult + // TODO: V2: accumulatedValidPower only includes validators who prividing all sources required by rules(defined in oracle.Params) + // accumulatedValidVpower: map[string]*big.Int + accumulatedPower *big.Int + // each validator will get a single final price independently, the order of validators does not matter, map is safe + records map[string]*priceValidator +} + +// price records for deteministic source +type recordsDS struct { + finalPrice *PriceResult + // TODO: remove this + finalDetID string + accumulatedPowers *big.Int + validators map[string]struct{} + // ordered by detID + records []*PricePower +} + +// each source will get a final price independently, the order of sources does not matter, map is safe +// type recordsDSMap map[int64]*recordsDS +type recordsDSs struct { + t *threshold + dsMap map[int64]*recordsDS +} + +type threshold struct { + totalPower *big.Int + thresholdA *big.Int + thresholdB *big.Int +} + +func (t *threshold) Equals(t2 *threshold) bool { + if t == nil && t2 == nil { + return true + } + if t == nil || t2 == nil { + return false + } + return t.totalPower.Cmp(t2.totalPower) == 0 && t.thresholdA.Cmp(t2.thresholdA) == 0 && t.thresholdB.Cmp(t2.thresholdB) == 0 +} + +func (t *threshold) Cpy() *threshold { + return &threshold{ + totalPower: new(big.Int).Set(t.totalPower), + thresholdA: new(big.Int).Set(t.thresholdA), + thresholdB: new(big.Int).Set(t.thresholdB), + } +} + +func (t *threshold) Exceeds(power *big.Int) bool { + return new(big.Int).Mul(t.thresholdB, power).Cmp(new(big.Int).Mul(t.thresholdA, t.totalPower)) > 0 +} + +type aggregator struct { + t *threshold + finalPrice *PriceResult + v *recordsValidators + ds *recordsDSs + algo AggAlgorithm +} +type roundStatus int32 + +const ( + // define closed as default value 0 + roundStatusClosed roundStatus = iota + roundStatusOpen + roundStatusCommittable +) + +type round struct { + startBaseBlock int64 + startRoundID int64 + endBlock int64 + interval int64 + quoteWindowSize int64 + + feederID int64 + tokenID int64 + + roundBaseBlock int64 + roundID int64 + status roundStatus + a *aggregator + cache CacheReader + algo AggAlgorithm +} + +type orderedSliceInt64 []int64 + +func (osi orderedSliceInt64) Equals(o2 orderedSliceInt64) bool { + if len(osi) == 0 && len(o2) == 0 { + return true + } + if len(osi) == 0 || len(o2) == 0 { + return false + } + for idx, v := range osi { + if v != (o2)[idx] { + return false + } + } + return true +} + +func (osi *orderedSliceInt64) add(i int64) { + result := append(*osi, i) + sort.Slice(result, func(i, j int) bool { + return result[i] < result[j] + }) + *osi = result +} + +func (osi *orderedSliceInt64) remove(i int64) { + for idx, v := range *osi { + if v == i { + *osi = append((*osi)[:idx], (*osi)[idx+1:]...) + return + } + } +} + +func (osi *orderedSliceInt64) sort() { + sort.Slice(*osi, func(i, j int) bool { + return (*osi)[i] < (*osi)[j] + }) +} + +type FeederManager struct { + fCheckTx *FeederManager + k common.KeeperOracle + sortedFeederIDs orderedSliceInt64 + // this will not be ranged, map is safe + rounds map[int64]*round + cs *caches + paramsUpdated bool + validatorsUpdated bool + forceSeal bool + resetSlashing bool +} diff --git a/x/oracle/keeper/keeper.go b/x/oracle/keeper/keeper.go index 6738b8b37..97ca41238 100644 --- a/x/oracle/keeper/keeper.go +++ b/x/oracle/keeper/keeper.go @@ -11,20 +11,12 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/aggregator" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement" "github.com/ExocoreNetwork/exocore/x/oracle/types" ) type ( - memoryStore struct { - cs *cache.Cache - agc *aggregator.AggregatorContext - agcCheckTx *aggregator.AggregatorContext - updatedFeederIDs []string - } - Keeper struct { cdc codec.BinaryCodec storeKey storetypes.StoreKey @@ -36,7 +28,8 @@ type ( assetsKeeper types.AssetsKeeper types.SlashingKeeper // wrap all four memory cache into one pointer to track them among cpoies of Keeper (msgServer, module) - memStore *memoryStore + // TODO: remove this + *feedermanagement.FeederManager } ) @@ -62,7 +55,7 @@ func NewKeeper( ps = ps.WithKeyTable(types.ParamKeyTable()) } - return Keeper{ + ret := Keeper{ cdc: cdc, storeKey: storeKey, memKey: memKey, @@ -72,8 +65,11 @@ func NewKeeper( assetsKeeper: assetsKeeper, authority: authority, SlashingKeeper: slashingKeeper, - memStore: new(memoryStore), + // fm: feedermanagement.NewFeederManager(nil), + FeederManager: feedermanagement.NewFeederManager(nil), } + ret.SetKeeper(ret) + return ret } func (k Keeper) Logger(ctx sdk.Context) log.Logger { diff --git a/x/oracle/keeper/keeper_suite_test.go b/x/oracle/keeper/keeper_suite_test.go index 3bc01c6b0..10023e65e 100644 --- a/x/oracle/keeper/keeper_suite_test.go +++ b/x/oracle/keeper/keeper_suite_test.go @@ -67,7 +67,6 @@ func TestKeeper(t *testing.T) { suite.Run(t, ks) - resetSingle(ks.App.OracleKeeper) RegisterFailHandler(Fail) RunSpecs(t, "Keeper Suite") } @@ -76,10 +75,10 @@ func (suite *KeeperSuite) Reset() { p4Test := types.DefaultParams() p4Test.TokenFeeders[1].StartBaseBlock = 1 suite.k.SetParams(suite.ctx, p4Test) + suite.k.FeederManager.SetNilCaches() + suite.k.FeederManager.BeginBlock(suite.ctx) suite.ctx = suite.ctx.WithBlockHeight(12) - suite.ctrl = gomock.NewController(suite.t) - resetSingle(suite.App.OracleKeeper) } func (suite *KeeperSuite) SetupTest() { @@ -101,7 +100,6 @@ func (suite *KeeperSuite) SetupTest() { validators := suite.ValSet.Validators suite.valAddr1, _ = sdk.ValAddressFromBech32(sdk.ValAddress(validators[0].Address).String()) suite.valAddr2, _ = sdk.ValAddressFromBech32(sdk.ValAddress(validators[1].Address).String()) - resetSingle(suite.App.OracleKeeper) suite.k = suite.App.OracleKeeper suite.ms = keeper.NewMsgServerImpl(suite.App.OracleKeeper) @@ -111,9 +109,5 @@ func (suite *KeeperSuite) SetupTest() { p4Test.TokenFeeders[1].StartBaseBlock = 1 suite.k.SetParams(suite.ctx, p4Test) suite.ctx = suite.ctx.WithBlockHeight(12) -} - -func resetSingle(k keeper.Keeper) { - k.ResetAggregatorContext() - k.ResetCache() + suite.k.FeederManager.BeginBlock(suite.ctx) } diff --git a/x/oracle/keeper/msg_server_create_price.go b/x/oracle/keeper/msg_server_create_price.go index a2c903abb..b83487f06 100644 --- a/x/oracle/keeper/msg_server_create_price.go +++ b/x/oracle/keeper/msg_server_create_price.go @@ -2,10 +2,13 @@ package keeper import ( "context" - "errors" + "crypto/sha256" + "encoding/base64" "strconv" + "strings" "time" + sdkerrors "cosmossdk.io/errors" "github.com/ExocoreNetwork/exocore/x/oracle/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -24,78 +27,66 @@ func (ms msgServer) CreatePrice(goCtx context.Context, msg *types.MsgCreatePrice defer func() { ctx = ctx.WithGasMeter(gasMeter) }() - logger := ms.Keeper.Logger(ctx) + + logger := ms.Logger(ctx) + + validator, _ := types.ConsAddrStrFromCreator(msg.Creator) + logQuote := []interface{}{"feederID", msg.FeederID, "baseBlock", msg.BasedBlock, "proposer", validator, "msg-nonce", msg.Nonce, "height", ctx.BlockHeight()} + if err := checkTimestamp(ctx, msg); err != nil { - logger.Info("price proposal timestamp check failed", "error", err, "height", ctx.BlockHeight()) + logger.Error("quote has invalid timestamp", append(logQuote, "error", err)...) return nil, types.ErrPriceProposalFormatInvalid.Wrap(err.Error()) } - agc := ms.Keeper.GetAggregatorContext(ctx) - newItem, caches, err := agc.NewCreatePrice(ctx, msg) + if err := ms.ValidateMsg(msg); err != nil { + logger.Error("failed to validate msg", append(logQuote, "error", err)...) + return nil, err + } + // core logic and functionality of Price Aggregation + finalPrice, err := ms.ProcessQuote(ctx, msg, ctx.IsCheckTx()) if err != nil { - logger.Info("price proposal failed", "error", err, "height", ctx.BlockHeight(), "feederID", msg.FeederID) + if sdkerrors.IsOf(err, types.ErrQuoteRecorded) { + // quote is recorded only, this happens when a quoting-window is not availalbe before that window end due to final price aggregated successfully in advance + // we will still record this msg if it's valid + logger.Info("recorded quote for oracle-behavior evaluation", logQuote...) + return &types.MsgCreatePriceResponse{}, nil + } + logger.Error("failed to process quote", append(logQuote, "error", err)...) return nil, err } - logger.Info("add price proposal for aggregation", "feederID", msg.FeederID, "basedBlock", msg.BasedBlock, "proposer", msg.Creator, "height", ctx.BlockHeight()) - + logger.Info("added quote for aggregation", logQuote...) + // TODO: use another type ctx.EventManager().EmitEvent(sdk.NewEvent( types.EventTypeCreatePrice, sdk.NewAttribute(types.AttributeKeyFeederID, strconv.FormatUint(msg.FeederID, 10)), sdk.NewAttribute(types.AttributeKeyBasedBlock, strconv.FormatUint(msg.BasedBlock, 10)), - sdk.NewAttribute(types.AttributeKeyProposer, msg.Creator), - ), - ) + sdk.NewAttribute(types.AttributeKeyProposer, validator), + )) - if caches == nil { - return &types.MsgCreatePriceResponse{}, nil - } - if newItem != nil { - if success := ms.AppendPriceTR(ctx, newItem.TokenID, newItem.PriceTR); !success { - // This case should not exist, keep this line to avoid consensus fail if this happens - prevPrice, nextRoundID := ms.GrowRoundID(ctx, newItem.TokenID) - logger.Error("append new price round fail for mismatch roundID, and will just grow roundID with previous price", "roundID from finalPrice", newItem.PriceTR.RoundID, "expect nextRoundID", nextRoundID, "prevPrice", prevPrice) - } else { - logger.Info("final price aggregation done", "feederID", msg.FeederID, "roundID", newItem.PriceTR.RoundID, "price", newItem.PriceTR.Price) + if finalPrice != nil { + logger.Info("final price successfully aggregated", "price", finalPrice, "feederID", msg.FeederID, "height", ctx.BlockHeight()) + decimalStr := strconv.FormatInt(int64(finalPrice.Decimal), 10) + // #nosec G115 + tokenID, _ := ms.GetTokenIDForFeederID(int64(msg.FeederID)) + tokenIDStr := strconv.FormatInt(tokenID, 10) + roundIDStr := strconv.FormatUint(finalPrice.RoundID, 10) + priceStr := finalPrice.Price + + if len(priceStr) >= 32 { + hash := sha256.New() + hash.Write([]byte(priceStr)) + priceStr = base64.StdEncoding.EncodeToString(hash.Sum(nil)) } - decimalStr := strconv.FormatInt(int64(newItem.PriceTR.Decimal), 10) - tokenIDStr := strconv.FormatUint(newItem.TokenID, 10) - roundIDStr := strconv.FormatUint(newItem.PriceTR.RoundID, 10) + // emit event to tell price is updated for current round of corresponding feederID ctx.EventManager().EmitEvent(sdk.NewEvent( types.EventTypeCreatePrice, sdk.NewAttribute(types.AttributeKeyRoundID, roundIDStr), - sdk.NewAttribute(types.AttributeKeyFinalPrice, tokenIDStr+"_"+roundIDStr+"_"+newItem.PriceTR.Price+"_"+decimalStr), + sdk.NewAttribute(types.AttributeKeyFinalPrice, strings.Join([]string{tokenIDStr, roundIDStr, priceStr, decimalStr}, "_")), sdk.NewAttribute(types.AttributeKeyPriceUpdated, types.AttributeValuePriceUpdatedSuccess)), ) - if !ctx.IsCheckTx() { - ms.Keeper.GetCaches().RemoveCache(caches) - ms.Keeper.AppendUpdatedFeederIDs(msg.FeederID) - } - } else if !ctx.IsCheckTx() { - ms.Keeper.GetCaches().AddCache(caches) } return &types.MsgCreatePriceResponse{}, nil } - -func checkTimestamp(goCtx context.Context, msg *types.MsgCreatePrice) error { - ctx := sdk.UnwrapSDKContext(goCtx) - now := ctx.BlockTime().UTC() - for _, ps := range msg.Prices { - for _, price := range ps.Prices { - ts := price.Timestamp - if len(ts) == 0 { - return errors.New("timestamp should not be empty") - } - t, err := time.ParseInLocation(layout, ts, time.UTC) - if err != nil { - return errors.New("timestamp format invalid") - } - if now.Add(maxFutureOffset).Before(t) { - return errors.New("timestamp is in the future") - } - } - } - return nil -} diff --git a/x/oracle/keeper/msg_server_create_price_test.go b/x/oracle/keeper/msg_server_create_price_test.go deleted file mode 100644 index 3b5b3121f..000000000 --- a/x/oracle/keeper/msg_server_create_price_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package keeper_test - -import ( - reflect "reflect" - - math "cosmossdk.io/math" - dogfoodkeeper "github.com/ExocoreNetwork/exocore/x/dogfood/keeper" - dogfoodtypes "github.com/ExocoreNetwork/exocore/x/dogfood/types" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/testdata" - "github.com/ExocoreNetwork/exocore/x/oracle/types" - . "github.com/agiledragon/gomonkey/v2" - sdk "github.com/cosmos/cosmos-sdk/types" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -//go:generate mockgen -destination mock_validator_test.go -package keeper_test github.com/cosmos/cosmos-sdk/x/staking/types ValidatorI - -var _ = Describe("MsgCreatePrice", func() { - var c *cache.Cache - var p *Patches - BeforeEach(func() { - ks.Reset() - Expect(ks.ms).ToNot(BeNil()) - - // TODO: remove monkey patch for test - p = ApplyMethod(reflect.TypeOf(dogfoodkeeper.Keeper{}), "GetLastTotalPower", func(k dogfoodkeeper.Keeper, ctx sdk.Context) math.Int { return math.NewInt(3) }) - p.ApplyMethod(reflect.TypeOf(dogfoodkeeper.Keeper{}), "GetAllExocoreValidators", func(k dogfoodkeeper.Keeper, ctx sdk.Context) []dogfoodtypes.ExocoreValidator { - return []dogfoodtypes.ExocoreValidator{ - { - Address: ks.mockValAddr1, - Power: 1, - }, - { - Address: ks.mockValAddr2, - Power: 1, - }, - { - Address: ks.mockValAddr3, - Power: 1, - }, - } - }) - - Expect(ks.ctx.BlockHeight()).To(Equal(int64(12))) - }) - - AfterEach(func() { - ks.ctrl.Finish() - if p != nil { - p.Reset() - } - }) - - Context("3 validators with 1 voting power each", func() { - BeforeEach(func() { - ks.ms.CreatePrice(ks.ctx, &types.MsgCreatePrice{ - Creator: ks.mockConsAddr1.String(), - FeederID: 1, - Prices: testdata.PS1, - BasedBlock: 11, - Nonce: 1, - }) - - c = ks.App.OracleKeeper.GetCaches() - // c = ks.ms.Keeper.GetCaches() - var pRes cache.ItemP - c.GetCache(&pRes) - p4Test := types.DefaultParams() - p4Test.TokenFeeders[1].StartBaseBlock = 1 - Expect(pRes).Should(BeEquivalentTo(p4Test)) - }) - - It("success on 3rd message", func() { - iRes := make([]*cache.ItemM, 0) - c.GetCache(&iRes) - Expect(iRes[0].Validator).Should(Equal(ks.mockConsAddr1.String())) - - ks.ms.CreatePrice(ks.ctx, &types.MsgCreatePrice{ - Creator: ks.mockConsAddr2.String(), - FeederID: 1, - Prices: testdata.PS2, - BasedBlock: 11, - Nonce: 1, - }, - ) - ks.ms.CreatePrice(ks.ctx, &types.MsgCreatePrice{}) - c.GetCache(&iRes) - Expect(len(iRes)).Should(Equal(2)) - - ks.ms.CreatePrice(ks.ctx, &types.MsgCreatePrice{ - Creator: ks.mockConsAddr3.String(), - FeederID: 1, - Prices: testdata.PS4, - BasedBlock: 11, - Nonce: 1, - }, - ) - c.GetCache(&iRes) - Expect(len(iRes)).Should(Equal(0)) - prices := ks.k.GetAllPrices(sdk.UnwrapSDKContext(ks.ctx)) - Expect(prices[0]).Should(BeEquivalentTo(types.Prices{ - TokenID: 1, - NextRoundID: 3, - PriceList: []*types.PriceTimeRound{ - { - Price: "1", - Decimal: 0, - Timestamp: "", - RoundID: 1, - }, - { - Price: testdata.PTD2.Price, - Decimal: testdata.PTD2.Decimal, - Timestamp: prices[0].PriceList[1].Timestamp, - RoundID: 2, - }, - }, - })) - }) - }) -}) diff --git a/x/oracle/keeper/msg_server_update_params.go b/x/oracle/keeper/msg_server_update_params.go index 87f35f2c4..3a8051303 100644 --- a/x/oracle/keeper/msg_server_update_params.go +++ b/x/oracle/keeper/msg_server_update_params.go @@ -4,7 +4,6 @@ import ( "context" utils "github.com/ExocoreNetwork/exocore/utils" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" "github.com/ExocoreNetwork/exocore/x/oracle/types" sdk "github.com/cosmos/cosmos-sdk/types" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" @@ -66,7 +65,8 @@ func (ms msgServer) UpdateParams(goCtx context.Context, msg *types.MsgUpdatePara } // set updated new params ms.SetParams(ctx, p) - _ = ms.Keeper.GetAggregatorContext(ctx) - ms.Keeper.GetCaches().AddCache(cache.ItemP(p)) + if !ctx.IsCheckTx() { + ms.SetParamsUpdated() + } return &types.MsgUpdateParamsResponse{}, nil } diff --git a/x/oracle/keeper/native_token.go b/x/oracle/keeper/native_token.go index e67b9beb0..d111c1891 100644 --- a/x/oracle/keeper/native_token.go +++ b/x/oracle/keeper/native_token.go @@ -254,11 +254,13 @@ func (k Keeper) UpdateNSTValidatorListForStaker(ctx sdk.Context, assetID, staker } else { eventValue = fmt.Sprintf("%s_%s", types.AttributeValueNativeTokenWithdraw, eventValue) } - // emit an event to tell a new valdiator added/or a validator is removed for the staker + // emit an event to tell the details that a new valdiator added/or a validator is removed for the staker + // deposit_stakerID_validatorKey ctx.EventManager().EmitEvent(sdk.NewEvent( types.EventTypeCreatePrice, sdk.NewAttribute(types.AttributeKeyNativeTokenChange, eventValue), )) + return nil } @@ -295,13 +297,10 @@ func (k Keeper) UpdateNSTByBalanceChange(ctx sdk.Context, assetID string, rawDat newBalance = *(stakerInfo.BalanceList[length-1]) } newBalance.Block = uint64(ctx.BlockHeight()) - if newBalance.RoundID == roundID { - newBalance.Index++ - } else { - newBalance.RoundID = roundID - newBalance.Index = 0 - } + // we set index as a global reference used through all rounds + newBalance.Index++ newBalance.Change = types.Action_ACTION_SLASH_REFUND + newBalance.RoundID = roundID // balance update are based on initial/max effective balance: 32 maxBalance := maxEffectiveBalance(assetID) * (len(stakerInfo.ValidatorPubkeyList)) balance := maxBalance + change diff --git a/x/oracle/keeper/native_token_test.go b/x/oracle/keeper/native_token_test.go index ec02bb94c..234575ecb 100644 --- a/x/oracle/keeper/native_token_test.go +++ b/x/oracle/keeper/native_token_test.go @@ -83,6 +83,7 @@ func (ks *KeeperSuite) TestNSTLifeCycleOneStaker() { stakerInfo = ks.App.OracleKeeper.GetStakerInfo(ks.Ctx, assetID, stakerStr) ks.Equal(types.BalanceInfo{ Block: 1, + Index: 1, RoundID: 9, Change: types.Action_ACTION_SLASH_REFUND, // this is expected to be 32-10=22, not 100-10 @@ -121,7 +122,7 @@ func (ks *KeeperSuite) TestNSTLifeCycleOneStaker() { ks.Equal(types.BalanceInfo{ Block: 1, RoundID: 9, - Index: 1, + Index: 2, Change: types.Action_ACTION_DEPOSIT, Balance: 54, }, *stakerInfo.BalanceList[2]) @@ -141,7 +142,7 @@ func (ks *KeeperSuite) TestNSTLifeCycleOneStaker() { Balance: 59, Block: 1, RoundID: 11, - Index: 0, + Index: 3, Change: types.Action_ACTION_SLASH_REFUND, }, *stakerInfo.BalanceList[3]) // check stakerAssetInfo is updated correctly in assets module, this should be triggered in assets module by oracle module's UpdateNSTByBalanceChange @@ -162,7 +163,7 @@ func (ks *KeeperSuite) TestNSTLifeCycleOneStaker() { Balance: 29, Block: 1, RoundID: 11, - Index: 1, + Index: 4, Change: types.Action_ACTION_WITHDRAW, }, *stakerInfo.BalanceList[4]) // withdraw will remove this validator diff --git a/x/oracle/keeper/nonce.go b/x/oracle/keeper/nonce.go index 447d8133b..ddd1509f6 100644 --- a/x/oracle/keeper/nonce.go +++ b/x/oracle/keeper/nonce.go @@ -110,6 +110,7 @@ func (k Keeper) RemoveNonceWithFeederIDForAll(ctx sdk.Context, feederID uint64) // CheckAndIncreaseNonce check and increase the nonce for a specific validator and feederID func (k Keeper) CheckAndIncreaseNonce(ctx sdk.Context, validator string, feederID uint64, nonce uint32) (prevNonce uint32, err error) { + // #nosec G115 // safe conversion if nonce > uint32(common.MaxNonce) { return 0, fmt.Errorf("nonce_check_failed: max_exceeded: limit=%d received=%d", common.MaxNonce, nonce) } diff --git a/x/oracle/keeper/params.go b/x/oracle/keeper/params.go index 9f7ccff5a..6a189b9db 100644 --- a/x/oracle/keeper/params.go +++ b/x/oracle/keeper/params.go @@ -5,7 +5,6 @@ import ( "strconv" "strings" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" "github.com/ExocoreNetwork/exocore/x/oracle/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -40,6 +39,7 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. chainID := uint64(0) for id, c := range p.Chains { if c.Name == oInfo.Chain.Name { + // #nosec G115 chainID = uint64(id) break } @@ -50,6 +50,7 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. Name: oInfo.Chain.Name, Desc: oInfo.Chain.Desc, }) + // #nosec G115 chainID = uint64(len(p.Chains) - 1) } decimalInt, err := strconv.ParseInt(oInfo.Token.Decimal, 10, 32) @@ -67,16 +68,18 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. intervalInt = defaultInterval } + defer func() { + if !ctx.IsCheckTx() { + k.SetParamsUpdated() + } + }() + for _, t := range p.Tokens { // token exists, bind assetID for this token // it's possible for one price bonded with multiple assetID, like ETHUSDT from sepolia/mainnet if t.Name == oInfo.Token.Name && t.ChainID == chainID { t.AssetID = strings.Join([]string{t.AssetID, oInfo.AssetID}, ",") k.SetParams(ctx, p) - if !ctx.IsCheckTx() { - _ = k.GetAggregatorContext(ctx) - k.GetCaches().AddCache(cache.ItemP(p)) - } // there should have been existing tokenFeeder running(currently we register tokens from assets-module and with infinite endBlock) return nil } @@ -94,10 +97,12 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. // set a tokenFeeder for the new token p.TokenFeeders = append(p.TokenFeeders, &types.TokenFeeder{ + // #nosec G115 // len(p.Tokens) must be positive since we just append an element for it TokenID: uint64(len(p.Tokens) - 1), // we only support rule_1 for v1 - RuleID: 1, - StartRoundID: 1, + RuleID: 1, + StartRoundID: 1, + // #nosec G115 StartBaseBlock: uint64(ctx.BlockHeight() + startAfterBlocks), Interval: intervalInt, // we don't end feeders for v1 @@ -105,11 +110,5 @@ func (k Keeper) RegisterNewTokenAndSetTokenFeeder(ctx sdk.Context, oInfo *types. }) k.SetParams(ctx, p) - // skip cache update if this is not deliverTx - // for normal cosmostx, checkTx will skip actual message exucution and do anteHandler only, but from ethc.callContract the message will be executed without anteHandler check as checkTx mode. - if !ctx.IsCheckTx() { - _ = k.GetAggregatorContext(ctx) - k.GetCaches().AddCache(cache.ItemP(p)) - } return nil } diff --git a/x/oracle/keeper/params_test.go b/x/oracle/keeper/params_test.go index 592a962bc..8ffa02517 100644 --- a/x/oracle/keeper/params_test.go +++ b/x/oracle/keeper/params_test.go @@ -269,7 +269,7 @@ func TestTokenFeederValidate(t *testing.T) { }, { name: "valid case with two feeders", - prevEndBlock: 1000015, + prevEndBlock: 35, feeder: &types.TokenFeeder{ TokenID: 1, RuleID: 1, diff --git a/x/oracle/keeper/prices.go b/x/oracle/keeper/prices.go index 3a3862c67..26a80e52d 100644 --- a/x/oracle/keeper/prices.go +++ b/x/oracle/keeper/prices.go @@ -34,10 +34,12 @@ func (k Keeper) GetPrices( val.TokenID = tokenID val.NextRoundID = nextRoundID var i uint64 + // #nosec G11 if nextRoundID <= uint64(common.MaxSizePrices) { i = 1 val.PriceList = make([]*types.PriceTimeRound, 0, nextRoundID) } else { + // #nosec G11 i = nextRoundID - uint64(common.MaxSizePrices) val.PriceList = make([]*types.PriceTimeRound, 0, common.MaxSizePrices) } @@ -63,17 +65,13 @@ func (k Keeper) GetSpecifiedAssetsPrice(ctx sdk.Context, assetID string) (types. }, nil } - var p types.Params // get params from cache if exists - if k.memStore.agc != nil { - p = k.memStore.agc.GetParams() - } else { - p = k.GetParams(ctx) - } + p := k.GetParamsFromCache() tokenID := p.GetTokenIDFromAssetID(assetID) if tokenID == 0 { return types.Price{}, types.ErrGetPriceAssetNotFound.Wrapf("assetID does not exist in oracle %s", assetID) } + // #nosec G115 price, found := k.GetPriceTRLatest(ctx, uint64(tokenID)) if !found { return types.Price{ @@ -97,13 +95,8 @@ func (k Keeper) GetSpecifiedAssetsPrice(ctx sdk.Context, assetID string) (types. // return latest price for assets func (k Keeper) GetMultipleAssetsPrices(ctx sdk.Context, assets map[string]interface{}) (prices map[string]types.Price, err error) { - var p types.Params // get params from cache if exists - if k.memStore.agc != nil { - p = k.memStore.agc.GetParams() - } else { - p = k.GetParams(ctx) - } + p := k.GetParamsFromCache() // ret := make(map[string]types.Price) prices = make(map[string]types.Price) info := "" @@ -122,6 +115,7 @@ func (k Keeper) GetMultipleAssetsPrices(ctx sdk.Context, assets map[string]inter prices = nil break } + // #nosec G115 price, found := k.GetPriceTRLatest(ctx, uint64(tokenID)) if !found { info = info + assetID + " " @@ -209,26 +203,21 @@ func (k Keeper) AppendPriceTR(ctx sdk.Context, tokenID uint64, priceTR types.Pri store := k.getPriceTRStore(ctx, tokenID) b := k.cdc.MustMarshal(&priceTR) store.Set(types.PricesRoundKey(nextRoundID), b) - if expiredRoundID := nextRoundID - k.memStore.agc.GetParamsMaxSizePrices(); expiredRoundID > 0 { + p := *k.GetParamsFromCache() + // #nosec G115 // maxSizePrices is not negative + if expiredRoundID := nextRoundID - uint64(p.MaxSizePrices); expiredRoundID > 0 { store.Delete(types.PricesRoundKey(expiredRoundID)) } roundID := k.IncreaseNextRoundID(ctx, tokenID) - - // update for native tokens - // TODO: set hooks as a genral approach - var p types.Params - // get params from cache if exists - if k.memStore.agc != nil { - p = k.memStore.agc.GetParams() - } else { - p = k.GetParams(ctx) - } - assetIDs := p.GetAssetIDsFromTokenID(tokenID) - for _, assetID := range assetIDs { - if nstChain, ok := strings.CutPrefix(assetID, types.NSTIDPrefix); ok { - if err := k.UpdateNSTByBalanceChange(ctx, fmt.Sprintf("%s%s", NSTETHAssetAddr, nstChain), []byte(priceTR.Price), roundID); err != nil { - // we just report this error in log to notify validators - k.Logger(ctx).Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err) + // we dont' update empty value for nst records + if len(priceTR.Price) > 0 { + assetIDs := p.GetAssetIDsFromTokenID(tokenID) + for _, assetID := range assetIDs { + if nstChain, ok := strings.CutPrefix(strings.ToLower(assetID), types.NSTIDPrefix); ok { + if err := k.UpdateNSTByBalanceChange(ctx, fmt.Sprintf("%s%s", NSTETHAssetAddr, nstChain), []byte(priceTR.Price), roundID); err != nil { + // we just report this error in log to notify validators + k.Logger(ctx).Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err) + } } } } @@ -237,6 +226,7 @@ func (k Keeper) AppendPriceTR(ctx sdk.Context, tokenID uint64, priceTR types.Pri } // GrowRoundID Increases roundID with the previous price +// func (k Keeper) GrowRoundID(ctx sdk.Context, tokenID uint64) (price *types.PriceTimeRound, roundID uint64) { func (k Keeper) GrowRoundID(ctx sdk.Context, tokenID uint64) (price string, roundID uint64) { if pTR, ok := k.GetPriceTRLatest(ctx, tokenID); ok { pTR.RoundID++ @@ -248,8 +238,8 @@ func (k Keeper) GrowRoundID(ctx sdk.Context, tokenID uint64) (price string, roun k.AppendPriceTR(ctx, tokenID, types.PriceTimeRound{ RoundID: nextRoundID, }) - price = "" roundID = nextRoundID + price = "" } return } diff --git a/x/oracle/keeper/prices_test.go b/x/oracle/keeper/prices_test.go index cad34ee6b..4d84f4e7a 100644 --- a/x/oracle/keeper/prices_test.go +++ b/x/oracle/keeper/prices_test.go @@ -48,6 +48,8 @@ func TestPricesGet(t *testing.T) { func TestPricesGetMultiAssets(t *testing.T) { keeper, ctx := keepertest.OracleKeeper(t) + keeper.FeederManager.SetNilCaches() + keeper.FeederManager.BeginBlock(ctx) keeper.SetPrices(ctx, testdata.P1) assets := make(map[string]interface{}) assets["0x0b34c4d876cd569129cf56bafabb3f9e97a4ff42_0x9ce1"] = new(interface{}) diff --git a/x/oracle/keeper/recent_msg.go b/x/oracle/keeper/recent_msg.go index bf7f25983..3c15d0f9b 100644 --- a/x/oracle/keeper/recent_msg.go +++ b/x/oracle/keeper/recent_msg.go @@ -6,6 +6,29 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) +// SetMsgItemsForCache set a specific recentMsg with its height as index in the store +func (k Keeper) SetMsgItemsForCache(ctx sdk.Context, recentMsg types.RecentMsg) { + index, found := k.GetIndexRecentMsg(ctx) + block := uint64(ctx.BlockHeight()) + if found { + i := 0 + maxNonce := k.GetParams(ctx).MaxNonce + for ; i < len(index.Index); i++ { + b := index.Index[i] + // #nosec G115 // maxNonce is not negative + if b > block-uint64(maxNonce) { + break + } + // remove old recentMsg + k.RemoveRecentMsg(ctx, b) + } + index.Index = index.Index[i:] + } + index.Index = append(index.Index, block) + k.SetIndexRecentMsg(ctx, index) + k.SetRecentMsg(ctx, recentMsg) +} + // SetRecentMsg set a specific recentMsg in the store from its index func (k Keeper) SetRecentMsg(ctx sdk.Context, recentMsg types.RecentMsg) { store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.RecentMsgKeyPrefix)) diff --git a/x/oracle/keeper/recent_params.go b/x/oracle/keeper/recent_params.go index efd4af316..231e29df6 100644 --- a/x/oracle/keeper/recent_params.go +++ b/x/oracle/keeper/recent_params.go @@ -6,6 +6,29 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) +func (k Keeper) SetParamsForCache(ctx sdk.Context, params types.RecentParams) { + block := uint64(ctx.BlockHeight()) + index, found := k.GetIndexRecentParams(ctx) + if found { + i := 0 + // if the maxNonce is changed in this block, all rounds would be force sealed, so it's ok to use either the old or new maxNonce + maxNonce := k.GetParams(ctx).MaxNonce + for ; i < len(index.Index); i++ { + b := index.Index[i] + // #nosec G115 // maxNonce is not negative + if b > block-uint64(maxNonce) { + break + } + // remove old recentParams + k.RemoveRecentParams(ctx, b) + } + index.Index = index.Index[i:] + } + index.Index = append(index.Index, block) + k.SetIndexRecentParams(ctx, index) + k.SetRecentParams(ctx, params) +} + // SetRecentParams set a specific recentParams in the store from its index func (k Keeper) SetRecentParams(ctx sdk.Context, recentParams types.RecentParams) { store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.RecentParamsKeyPrefix)) @@ -77,3 +100,42 @@ func (k Keeper) GetAllRecentParamsAsMap(ctx sdk.Context) (result map[int64]*type return } + +// GetRecentParamsWithinMaxNonce returns all recentParams within the maxNonce and the latest recentParams separately +func (k Keeper) GetRecentParamsWithinMaxNonce(ctx sdk.Context) (recentParamsList []*types.RecentParams, prev, latest types.RecentParams) { + maxNonce := k.GetParams(ctx).MaxNonce + var startHeight uint64 + if uint64(ctx.BlockHeight()) > uint64(maxNonce) { + startHeight = uint64(ctx.BlockHeight()) - uint64(maxNonce) + } + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.RecentParamsKeyPrefix)) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + recentParamsList = make([]*types.RecentParams, 0, maxNonce) + notFound := true + for ; iterator.Valid(); iterator.Next() { + var val types.RecentParams + k.cdc.MustUnmarshal(iterator.Value(), &val) + latest = val + if notFound { + prev = val + } + if val.Block >= startHeight { + if notFound { + notFound = false + } + recentParamsList = append(recentParamsList, &val) + } + if notFound { + prev = val + } + } + if len(recentParamsList) > 0 { + if prev.Block == recentParamsList[0].Block { + prev = types.RecentParams{} + } + } + return recentParamsList, prev, latest +} diff --git a/x/oracle/keeper/single.go b/x/oracle/keeper/single.go deleted file mode 100644 index b40668333..000000000 --- a/x/oracle/keeper/single.go +++ /dev/null @@ -1,225 +0,0 @@ -package keeper - -import ( - "math/big" - "strconv" - - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/aggregator" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/common" - "github.com/ExocoreNetwork/exocore/x/oracle/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -func (k *Keeper) GetCaches() *cache.Cache { - if k.memStore.cs != nil { - return k.memStore.cs - } - k.memStore.cs = cache.NewCache() - return k.memStore.cs -} - -// GetAggregatorContext returns singleton aggregatorContext used to calculate final price for each round of each tokenFeeder -func (k *Keeper) GetAggregatorContext(ctx sdk.Context) *aggregator.AggregatorContext { - if ctx.IsCheckTx() { - if k.memStore.agcCheckTx != nil { - return k.memStore.agcCheckTx - } - if k.memStore.agc == nil { - c := k.GetCaches() - c.ResetCaches() - k.memStore.agcCheckTx = aggregator.NewAggregatorContext() - if ok := k.recacheAggregatorContext(ctx, k.memStore.agcCheckTx, c); !ok { - // this is the very first time oracle has been started, fill relalted info as initialization - initAggregatorContext(ctx, k.memStore.agcCheckTx, k, c) - } - return k.memStore.agcCheckTx - } - k.memStore.agcCheckTx = k.memStore.agc.Copy4CheckTx() - return k.memStore.agcCheckTx - } - - if k.memStore.agc != nil { - return k.memStore.agc - } - - c := k.GetCaches() - c.ResetCaches() - k.memStore.agc = aggregator.NewAggregatorContext() - if ok := k.recacheAggregatorContext(ctx, k.memStore.agc, c); !ok { - // this is the very first time oracle has been started, fill relalted info as initialization - initAggregatorContext(ctx, k.memStore.agc, k, c) - } else { - // this is when a node restart and use the persistent state to refill cache, we don't need to commit these data again - c.SkipCommit() - } - return k.memStore.agc -} - -func (k Keeper) recacheAggregatorContext(ctx sdk.Context, agc *aggregator.AggregatorContext, c *cache.Cache) bool { - logger := k.Logger(ctx) - oracleParams := k.GetParams(ctx) - from := ctx.BlockHeight() - int64(oracleParams.MaxNonce) + 1 - to := ctx.BlockHeight() - - h, ok := k.GetValidatorUpdateBlock(ctx) - recentParamsMap := k.GetAllRecentParamsAsMap(ctx) - if !ok || len(recentParamsMap) == 0 { - logger.Info("recacheAggregatorContext: no validatorUpdateBlock found, go to initial process", "height", ctx.BlockHeight()) - // no cache, this is the very first running, so go to initial process instead - return false - } - - forceSealHeight := h.Block - // #nosec G115 - if int64(forceSealHeight) >= from { - from = int64(h.Block) + 1 - logger.Info("recacheAggregatorContext: with validatorSet updated recently", "latestValidatorUpdateBlock", h.Block, "currentHeight", ctx.BlockHeight()) - } - - logger.Info("recacheAggregatorContext", "from", from, "to", to, "height", ctx.BlockHeight()) - totalPower := big.NewInt(0) - validatorPowers := make(map[string]*big.Int) - validatorSet := k.GetAllExocoreValidators(ctx) - for _, v := range validatorSet { - validatorPowers[sdk.ConsAddress(v.Address).String()] = big.NewInt(v.Power) - totalPower = new(big.Int).Add(totalPower, big.NewInt(v.Power)) - } - agc.SetValidatorPowers(validatorPowers) - - // reset validators - c.AddCache(cache.ItemV(validatorPowers)) - - recentMsgs := k.GetAllRecentMsgAsMap(ctx) - var p *types.Params - var b int64 - if from >= to { - // backwards compatible for that the validatorUpdateBlock updated every block - prev := int64(0) - for b = range recentParamsMap { - if b > prev { - prev = b - } - } - p = recentParamsMap[prev] - agc.SetParams(p) - setCommonParams(p) - } else { - prev := int64(0) - for ; from < to; from++ { - // fill params - for b, p = range recentParamsMap { - // find the params which is the latest one before the replayed block height since prepareRoundEndBlock will use it and it should be the latest one before current block - if b < from && b > prev { - agc.SetParams(p) - prev = b - setCommonParams(p) - delete(recentParamsMap, b) - } - } - - logger.Info("recacheAggregatorContext: prepareRoundEndBlock", "baseBlock", from-1, "forceSealHeight", forceSealHeight) - agc.PrepareRoundEndBlock(ctx, from-1, forceSealHeight) - - if msgs := recentMsgs[from]; msgs != nil { - for _, msg := range msgs { - // these messages are retreived for recache, just skip the validation check and fill the memory cache - //nolint - agc.FillPrice(&types.MsgCreatePrice{ - Creator: msg.Validator, - FeederID: msg.FeederID, - Prices: msg.PSources, - }) - } - } - ctxReplay := ctx.WithBlockHeight(from) - logger.Info("recacheAggregatorContext: sealRound", "blockEnd", from) - agc.SealRound(ctxReplay, false) - } - - for b, p = range recentParamsMap { - // use the latest params before the current block height - if b < to && b > prev { - agc.SetParams(p) - prev = b - setCommonParams(p) - } - } - } - logger.Info("recacheAggregatorContext: PrepareRoundEndBlock", "baseBlock", to-1) - agc.PrepareRoundEndBlock(ctx, to-1, forceSealHeight) - - var pRet cache.ItemP - if updated := c.GetCache(&pRet); !updated { - c.AddCache(cache.ItemP(*p)) - } - // TODO: these 4 lines are mainly used for hot fix - // since the latest params stored in KV for recache should be the same with the latest params, so these lines are just duplicated actions if everything is fine. - *p = k.GetParams(ctx) - agc.SetParams(p) - setCommonParams(p) - c.AddCache(cache.ItemP(*p)) - - return true -} - -func initAggregatorContext(ctx sdk.Context, agc *aggregator.AggregatorContext, k *Keeper, c *cache.Cache) { - ctx.Logger().Info("initAggregatorContext", "height", ctx.BlockHeight()) - // set params - p := k.GetParams(ctx) - agc.SetParams(&p) - // set params cache - c.AddCache(cache.ItemP(p)) - setCommonParams(&p) - - totalPower := big.NewInt(0) - validatorPowers := make(map[string]*big.Int) - validatorSet := k.GetAllExocoreValidators(ctx) - for _, v := range validatorSet { - validatorPowers[sdk.ConsAddress(v.Address).String()] = big.NewInt(v.Power) - totalPower = new(big.Int).Add(totalPower, big.NewInt(v.Power)) - } - - agc.SetValidatorPowers(validatorPowers) - // set validatorPower cache - c.AddCache(cache.ItemV(validatorPowers)) - - agc.PrepareRoundEndBlock(ctx, ctx.BlockHeight()-1, 0) -} - -func (k *Keeper) ResetAggregatorContext() { - k.memStore.agc = nil -} - -func (k *Keeper) ResetCache() { - k.memStore.cs = nil -} - -func (k *Keeper) ResetAggregatorContextCheckTx() { - k.memStore.agcCheckTx = nil -} - -// setCommonParams save static fields in params in memory cache since these fields will not change during node running -// TODO: further when params is abled to be updated through tx/gov, this cache should be taken care if any is available to be changed -func setCommonParams(p *types.Params) { - common.MaxNonce = p.MaxNonce - common.ThresholdA = p.ThresholdA - common.ThresholdB = p.ThresholdB - common.MaxDetID = p.MaxDetId - common.Mode = p.Mode - common.MaxSizePrices = int(p.MaxSizePrices) -} - -func (k *Keeper) ResetUpdatedFeederIDs() { - if k.memStore.updatedFeederIDs != nil { - k.memStore.updatedFeederIDs = nil - } -} - -func (k Keeper) GetUpdatedFeederIDs() []string { - return k.memStore.updatedFeederIDs -} - -func (k *Keeper) AppendUpdatedFeederIDs(id uint64) { - k.memStore.updatedFeederIDs = append(k.memStore.updatedFeederIDs, strconv.FormatUint(id, 10)) -} diff --git a/x/oracle/keeper/slashing.go b/x/oracle/keeper/slashing.go index b05c761e8..af64c6557 100644 --- a/x/oracle/keeper/slashing.go +++ b/x/oracle/keeper/slashing.go @@ -26,6 +26,16 @@ func (k Keeper) InitValidatorReportInfo(ctx sdk.Context, validator string, heigh } } +func (k Keeper) ClearAllValidatorReportInfo(ctx sdk.Context) { + // k.ClearAllValidatorMissedRoundBitArray(ctx) + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValidatorReportInfoPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + for ; iterator.Valid(); iterator.Next() { + store.Delete(iterator.Key()) + } + iterator.Close() +} + // SetValidatorReportInfo sets the validator reporting info for a validator func (k Keeper) SetValidatorReportInfo(ctx sdk.Context, validator string, info types.ValidatorReportInfo) { store := ctx.KVStore(k.storeKey) @@ -118,7 +128,7 @@ func (k Keeper) IterateValidatorReportInfos(ctx sdk.Context, handler func(addres // IterateValidatorMissedRoundBitArrray iterates all missed rounds in one performance window of rounds func (k Keeper) IterateValidatorMissedRoundBitArray(ctx sdk.Context, validator string, handler func(index uint64, missed bool) (stop bool)) { - store := prefix.NewStore(ctx.KVStore(k.storeKey), types.SlashingMissedBitArrayPrefix(validator)) + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.SlashingMissedBitArrayValidatorPrefix(validator)) iterator := sdk.KVStorePrefixIterator(store, []byte{}) defer iterator.Close() for ; iterator.Valid(); iterator.Next() { @@ -145,7 +155,17 @@ func (k Keeper) GetValidatorMissedRounds(ctx sdk.Context, address string) []*typ // ClearValidatorMissedBlockBitArray deletes every instance of ValidatorMissedBlockBitArray in the store func (k Keeper) ClearValidatorMissedRoundBitArray(ctx sdk.Context, validator string) { store := ctx.KVStore(k.storeKey) - iterator := sdk.KVStorePrefixIterator(store, types.SlashingMissedBitArrayPrefix(validator)) + iterator := sdk.KVStorePrefixIterator(store, types.SlashingMissedBitArrayValidatorPrefix(validator)) + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + store.Delete(iterator.Key()) + } +} + +// ClearAllValidatorMissedRoundBitArray clear all instances of ValidatorMissedBlockBitArray in the store +func (k Keeper) ClearAllValidatorMissedRoundBitArray(ctx sdk.Context) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.MissedBitArrayPrefix) defer iterator.Close() for ; iterator.Valid(); iterator.Next() { store.Delete(iterator.Key()) diff --git a/x/oracle/keeper/tokens.go b/x/oracle/keeper/tokens.go index 6b97e8733..3221af141 100644 --- a/x/oracle/keeper/tokens.go +++ b/x/oracle/keeper/tokens.go @@ -12,6 +12,7 @@ func (k Keeper) GetTokens(ctx sdk.Context) []*types.TokenIndex { for idx, token := range params.Tokens { ret = append(ret, &types.TokenIndex{ Token: token.Name, + // #nosec G115 Index: uint64(idx), }) } diff --git a/x/oracle/keeper/validate_timestamp.go b/x/oracle/keeper/validate_timestamp.go new file mode 100644 index 000000000..4fb0f8ac7 --- /dev/null +++ b/x/oracle/keeper/validate_timestamp.go @@ -0,0 +1,31 @@ +package keeper + +import ( + "context" + "fmt" + "time" + + "github.com/ExocoreNetwork/exocore/x/oracle/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func checkTimestamp(goCtx context.Context, msg *types.MsgCreatePrice) error { + ctx := sdk.UnwrapSDKContext(goCtx) + now := ctx.BlockTime().UTC() + for _, ps := range msg.Prices { + for _, price := range ps.Prices { + ts := price.Timestamp + if len(ts) == 0 { + return fmt.Errorf("timestamp should not be empty, blockTime:%s, got:%s", now.Format(layout), ts) + } + t, err := time.ParseInLocation(layout, ts, time.UTC) + if err != nil { + return fmt.Errorf("timestamp format invalid, blockTime:%s, got:%s", now.Format(layout), ts) + } + if now.Add(maxFutureOffset).Before(t) { + return fmt.Errorf("timestamp is in the future, blockTime:%s, got:%s", now.Format(layout), ts) + } + } + } + return nil +} diff --git a/x/oracle/keeper/validator_update_block.go b/x/oracle/keeper/validator_update_block.go index e5e6ea96d..03e3dcbab 100644 --- a/x/oracle/keeper/validator_update_block.go +++ b/x/oracle/keeper/validator_update_block.go @@ -6,6 +6,12 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) +func (k Keeper) SetValidatorUpdateForCache(ctx sdk.Context, validatorUpdateBlock types.ValidatorUpdateBlock) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.ValidatorUpdateBlockKey)) + b := k.cdc.MustMarshal(&validatorUpdateBlock) + store.Set(types.BlockKey, b) +} + // SetValidatorUpdateBlock set validatorUpdateBlock in the store func (k Keeper) SetValidatorUpdateBlock(ctx sdk.Context, validatorUpdateBlock types.ValidatorUpdateBlock) { store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.ValidatorUpdateBlockKey)) diff --git a/x/oracle/module.go b/x/oracle/module.go index 097376d29..b2745b2ae 100644 --- a/x/oracle/module.go +++ b/x/oracle/module.go @@ -4,9 +4,6 @@ import ( "context" "encoding/json" "fmt" - "math/big" - "sort" - "strings" // this line is used by starport scaffolding # 1 @@ -15,16 +12,13 @@ import ( "github.com/ExocoreNetwork/exocore/x/oracle/client/cli" "github.com/ExocoreNetwork/exocore/x/oracle/keeper" - "github.com/ExocoreNetwork/exocore/x/oracle/keeper/cache" "github.com/ExocoreNetwork/exocore/x/oracle/types" abci "github.com/cometbft/cometbft/abci/types" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" ) var ( @@ -151,244 +145,8 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw // ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 func (AppModule) ConsensusVersion() uint64 { return 1 } -// BeginBlock contains the logic that is automatically triggered at the beginning of each block -func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { - // init caches and aggregatorContext for node restart - // TODO: try better way to init caches and aggregatorContext than beginBlock - _ = am.keeper.GetCaches() - agc := am.keeper.GetAggregatorContext(ctx) - validatorPowers := agc.GetValidatorPowers() - // set validatorReportInfo to track performance - for validator := range validatorPowers { - am.keeper.InitValidatorReportInfo(ctx, validator, ctx.BlockHeight()) - } -} - // EndBlock contains the logic that is automatically triggered at the end of each block func (am AppModule) EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { - cs := am.keeper.GetCaches() - validatorUpdates := am.keeper.GetValidatorUpdates(ctx) - forceSeal := false - agc := am.keeper.GetAggregatorContext(ctx) - - logger := am.keeper.Logger(ctx) - height := ctx.BlockHeight() - if len(validatorUpdates) > 0 { - validatorList := make(map[string]*big.Int) - for _, vu := range validatorUpdates { - pubKey, _ := cryptocodec.FromTmProtoPublicKey(vu.PubKey) - validatorStr := sdk.ConsAddress(pubKey.Address()).String() - validatorList[validatorStr] = big.NewInt(vu.Power) - // add possible new added validator info for slashing tracking - if vu.Power > 0 { - am.keeper.InitValidatorReportInfo(ctx, validatorStr, height) - } - } - // update validator set information in cache - cs.AddCache(cache.ItemV(validatorList)) - validatorPowers := make(map[string]*big.Int) - cs.GetCache(cache.ItemV(validatorPowers)) - // update validatorPowerList in aggregatorContext - agc.SetValidatorPowers(validatorPowers) - // TODO: seal all alive round since validatorSet changed here - forceSeal = true - logger.Info("validator set changed, force seal all active rounds", "height", height) - } - - // TODO: for v1 use mode==1, just check the failed feeders - _, failed, _, windowClosed := agc.SealRound(ctx, forceSeal) - defer func() { - logger.Debug("remove aggregators(workers) on window closed", "feederIDs", windowClosed) - for _, feederID := range windowClosed { - agc.RemoveWorker(feederID) - am.keeper.RemoveNonceWithFeederIDForValidators(ctx, feederID, agc.GetValidators()) - } - }() - // update&check slashing info - validatorPowers := agc.GetValidatorPowers() - validators := make([]string, 0, len(validatorPowers)) - for validator := range validatorPowers { - validators = append(validators, validator) - } - sort.Strings(validators) - for _, validator := range validators { - power := validatorPowers[validator] - reportedInfo, found := am.keeper.GetValidatorReportInfo(ctx, validator) - if !found { - logger.Error(fmt.Sprintf("Expected report info for validator %s but not found", validator)) - continue - } - // TODO: for the round calculation, now only sourceID=1 is used so {feederID, sourceID} have only one value for each feederID which corresponding to one round. - // But when we came to multiple sources, we should consider the round corresponding to feedeerID instead of {feederID, sourceID} - for _, finalPrice := range agc.GetFinalPriceListForFeederIDs(windowClosed) { - exist, matched := agc.PerformanceReview(ctx, finalPrice, validator) - if exist && !matched { - // TODO: malicious price, just slash&jail immediately - logger.Info( - "confirmed malicious price", - "validator", validator, - "infraction_height", height, - "infraction_time", ctx.BlockTime(), - "feederID", finalPrice.FeederID, - "detID", finalPrice.DetID, - "sourceID", finalPrice.SourceID, - "finalPrice", finalPrice.Price, - ) - consAddr, err := sdk.ConsAddressFromBech32(validator) - if err != nil { - panic("invalid consAddr string") - } - - operator := am.keeper.ValidatorByConsAddr(ctx, consAddr) - if operator != nil && !operator.IsJailed() { - coinsBurned := am.keeper.SlashWithInfractionReason(ctx, consAddr, height, power.Int64(), am.keeper.GetSlashFractionMalicious(ctx), stakingtypes.Infraction_INFRACTION_UNSPECIFIED) - ctx.EventManager().EmitEvent( - sdk.NewEvent( - types.EventTypeOracleSlash, - sdk.NewAttribute(types.AttributeKeyValidatorKey, validator), - sdk.NewAttribute(types.AttributeKeyPower, fmt.Sprintf("%d", power)), - sdk.NewAttribute(types.AttributeKeyReason, types.AttributeValueMaliciousReportPrice), - sdk.NewAttribute(types.AttributeKeyJailed, validator), - sdk.NewAttribute(types.AttributeKeyBurnedCoins, coinsBurned.String()), - ), - ) - am.keeper.Jail(ctx, consAddr) - jailUntil := ctx.BlockHeader().Time.Add(am.keeper.GetMaliciousJailDuration(ctx)) - am.keeper.JailUntil(ctx, consAddr, jailUntil) - reportedInfo.MissedRoundsCounter = 0 - reportedInfo.IndexOffset = 0 - am.keeper.ClearValidatorMissedRoundBitArray(ctx, validator) - } - continue - } - - reportedRoundsWindow := am.keeper.GetReportedRoundsWindow(ctx) - index := uint64(reportedInfo.IndexOffset % reportedRoundsWindow) - reportedInfo.IndexOffset++ - // Update reported round bit array & counter - // This counter just tracks the sum of the bit array - // That way we avoid needing to read/write the whole array each time - previous := am.keeper.GetValidatorMissedRoundBitArray(ctx, validator, index) - missed := !exist - switch { - case !previous && missed: - // Array value has changed from not missed to missed, increment counter - am.keeper.SetValidatorMissedRoundBitArray(ctx, validator, index, true) - reportedInfo.MissedRoundsCounter++ - case previous && !missed: - // Array value has changed from missed to not missed, decrement counter - am.keeper.SetValidatorMissedRoundBitArray(ctx, validator, index, false) - reportedInfo.MissedRoundsCounter-- - default: - // Array value at this index has not changed, no need to update counter - } - - minReportedPerWindow := am.keeper.GetMinReportedPerWindow(ctx) - - if missed { - ctx.EventManager().EmitEvent( - sdk.NewEvent( - types.EventTypeOracleLiveness, - sdk.NewAttribute(types.AttributeKeyValidatorKey, validator), - sdk.NewAttribute(types.AttributeKeyMissedRounds, fmt.Sprintf("%d", reportedInfo.MissedRoundsCounter)), - sdk.NewAttribute(types.AttributeKeyHeight, fmt.Sprintf("%d", height)), - ), - ) - - logger.Debug( - "absent validator", - "height", ctx.BlockHeight(), - "validator", validator, - "missed", reportedInfo.MissedRoundsCounter, - "threshold", minReportedPerWindow, - ) - } - - minHeight := reportedInfo.StartHeight + reportedRoundsWindow - maxMissed := reportedRoundsWindow - minReportedPerWindow - // if we are past the minimum height and the validator has missed too many rounds reporting prices, punish them - if height > minHeight && reportedInfo.MissedRoundsCounter > maxMissed { - consAddr, err := sdk.ConsAddressFromBech32(validator) - if err != nil { - panic("invalid consAddr string") - } - operator := am.keeper.ValidatorByConsAddr(ctx, consAddr) - if operator != nil && !operator.IsJailed() { - // missing rounds confirmed: slash and jail the validator - coinsBurned := am.keeper.SlashWithInfractionReason(ctx, consAddr, height, power.Int64(), am.keeper.GetSlashFractionMiss(ctx), stakingtypes.Infraction_INFRACTION_UNSPECIFIED) - ctx.EventManager().EmitEvent( - sdk.NewEvent( - types.EventTypeOracleSlash, - sdk.NewAttribute(types.AttributeKeyValidatorKey, validator), - sdk.NewAttribute(types.AttributeKeyPower, fmt.Sprintf("%d", power)), - sdk.NewAttribute(types.AttributeKeyReason, types.AttributeValueMissingReportPrice), - sdk.NewAttribute(types.AttributeKeyJailed, validator), - sdk.NewAttribute(types.AttributeKeyBurnedCoins, coinsBurned.String()), - ), - ) - am.keeper.Jail(ctx, consAddr) - jailUntil := ctx.BlockHeader().Time.Add(am.keeper.GetMissJailDuration(ctx)) - am.keeper.JailUntil(ctx, consAddr, jailUntil) - - // We need to reset the counter & array so that the validator won't be immediately slashed for miss report info upon rebonding. - reportedInfo.MissedRoundsCounter = 0 - reportedInfo.IndexOffset = 0 - am.keeper.ClearValidatorMissedRoundBitArray(ctx, validator) - - logger.Info( - "slashing and jailing validator due to liveness fault", - "height", height, - "validator", consAddr.String(), - "min_height", minHeight, - "threshold", minReportedPerWindow, - "slashed", am.keeper.GetSlashFractionMiss(ctx).String(), - "jailed_until", jailUntil, - ) - } else { - // validator was (a) not found or (b) already jailed so we do not slash - logger.Info( - "validator would have been slashed for too many missed repoerting price, but was either not found in store or already jailed", - "validator", validator, - ) - } - } - // Set the updated reportInfo - am.keeper.SetValidatorReportInfo(ctx, validator, reportedInfo) - } - } - - // append new round with previous price for fail-sealed token - for _, tokenID := range failed { - prevPrice, nextRoundID := am.keeper.GrowRoundID(ctx, tokenID) - logger.Info("add new round with previous price under fail aggregation", "tokenID", tokenID, "roundID", nextRoundID, "price", prevPrice) - } - - am.keeper.ResetAggregatorContextCheckTx() - - if _, _, paramsUpdated := cs.CommitCache(ctx, false, am.keeper); paramsUpdated { - var p cache.ItemP - cs.GetCache(&p) - params := types.Params(p) - agc.SetParams(¶ms) - ctx.EventManager().EmitEvent(sdk.NewEvent( - types.EventTypeCreatePrice, - sdk.NewAttribute(types.AttributeKeyParamsUpdated, types.AttributeValueParamsUpdatedSuccess), - )) - } - - if feederIDs := am.keeper.GetUpdatedFeederIDs(); len(feederIDs) > 0 { - feederIDsStr := strings.Join(feederIDs, "_") - ctx.EventManager().EmitEvent(sdk.NewEvent( - types.EventTypeCreatePrice, - sdk.NewAttribute(types.AttributeKeyPriceUpdated, types.AttributeValuePriceUpdatedSuccess), - sdk.NewAttribute(types.AttributeKeyFeederIDs, feederIDsStr), - )) - am.keeper.ResetUpdatedFeederIDs() - } - - newRoundFeederIDs := agc.PrepareRoundEndBlock(ctx, ctx.BlockHeight(), 0) - for _, feederID := range newRoundFeederIDs { - am.keeper.AddZeroNonceItemWithFeederIDForValidators(ctx, feederID, agc.GetValidators()) - } + am.keeper.EndBlock(ctx) return []abci.ValidatorUpdate{} } diff --git a/x/oracle/module_beginblock.go b/x/oracle/module_beginblock.go new file mode 100644 index 000000000..c010b6669 --- /dev/null +++ b/x/oracle/module_beginblock.go @@ -0,0 +1,13 @@ +//go:build !devmode + +package oracle + +import ( + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + am.keeper.BeginBlock(ctx) +} diff --git a/x/oracle/module_beginblock_devmode.go b/x/oracle/module_beginblock_devmode.go new file mode 100644 index 000000000..55cda7e7e --- /dev/null +++ b/x/oracle/module_beginblock_devmode.go @@ -0,0 +1,34 @@ +//go:build devmode + +package oracle + +import ( + "fmt" + + "github.com/ExocoreNetwork/exocore/x/oracle/keeper" + "github.com/ExocoreNetwork/exocore/x/oracle/keeper/feedermanagement" + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + logger := am.keeper.Logger(ctx) + am.keeper.BeginBlock(ctx) + + logger.Info("start simulating recovery in BeginBlock", "height", ctx.BlockHeight()) + // check the result of recovery + f := recoveryFeederManagerOnNextBlock(ctx, am.keeper) + if ok := am.keeper.FeederManager.Equals(f); !ok { + panic(fmt.Sprintf("there's something wrong in the recovery logic of feedermanager, block:%d", ctx.BlockHeight())) + } +} + +func recoveryFeederManagerOnNextBlock(ctx sdk.Context, k keeper.Keeper) *feedermanagement.FeederManager { + f := feedermanagement.NewFeederManager(k) + recovered := f.BeginBlock(ctx) + if ctx.BlockHeight() > 1 && !recovered { + panic(fmt.Sprintf("failed to do recovery for feedermanager, block:%d", ctx.BlockHeight())) + } + return f +} diff --git a/x/oracle/types/errors.go b/x/oracle/types/errors.go index 79598e6aa..d8409eada 100644 --- a/x/oracle/types/errors.go +++ b/x/oracle/types/errors.go @@ -15,16 +15,20 @@ const ( getPriceFailedRoundNotFound updateNativeTokenVirtualPriceFail nstAssetNotSurpported + failedInAggregation + quoteRecorded ) // x/oracle module sentinel errors var ( ErrInvalidMsg = sdkerrors.Register(ModuleName, invalidMsg, "invalid input create price") - ErrPriceProposalIgnored = sdkerrors.Register(ModuleName, priceProposalIgnored, "price proposal ignored") + ErrPriceProposalIgnored = sdkerrors.Register(ModuleName, priceProposalIgnored, "quote is ignored") ErrPriceProposalFormatInvalid = sdkerrors.Register(ModuleName, priceProposalFormatInvalid, "price proposal message format invalid") ErrInvalidParams = sdkerrors.Register(ModuleName, invalidParams, "invalid params") ErrGetPriceAssetNotFound = sdkerrors.Register(ModuleName, getPriceFailedAssetNotFound, "get price failed for asset not found") ErrGetPriceRoundNotFound = sdkerrors.Register(ModuleName, getPriceFailedRoundNotFound, "get price failed for round not found") ErrUpdateNativeTokenVirtualPriceFail = sdkerrors.Register(ModuleName, updateNativeTokenVirtualPriceFail, "update native token balance change failed") ErrNSTAssetNotSupported = sdkerrors.Register(ModuleName, nstAssetNotSurpported, "nstAsset not supported") + ErrFailedInAggregation = sdkerrors.Register(ModuleName, failedInAggregation, "failed in aggregation") + ErrQuoteRecorded = sdkerrors.Register(ModuleName, quoteRecorded, "quote recorded") ) diff --git a/x/oracle/types/key_slashing.go b/x/oracle/types/key_slashing.go index ec5f13ee2..f296f0ebe 100644 --- a/x/oracle/types/key_slashing.go +++ b/x/oracle/types/key_slashing.go @@ -6,15 +6,23 @@ var ( MissedBitArrayPrefix = append(SlashingPrefix, []byte("missed/value/")...) ) +// func SlashingValidatorReportInfoPrefix() []byte { +// return ValidatorReportInfoPrefix +// } + func SlashingValidatorReportInfoKey(validator string) []byte { return append(ValidatorReportInfoPrefix, []byte(validator)...) } -func SlashingMissedBitArrayPrefix(validator string) []byte { +func SlashingMissedBitArrayPrefix() []byte { + return MissedBitArrayPrefix +} + +func SlashingMissedBitArrayValidatorPrefix(validator string) []byte { key := append([]byte(validator), DelimiterForCombinedKey) return append(MissedBitArrayPrefix, key...) } func SlashingMissedBitArrayKey(validator string, index uint64) []byte { - return append(SlashingMissedBitArrayPrefix(validator), Uint64Bytes(index)...) + return append(SlashingMissedBitArrayValidatorPrefix(validator), Uint64Bytes(index)...) } diff --git a/x/oracle/types/params.go b/x/oracle/types/params.go index 1e72de69e..b5262d0dc 100644 --- a/x/oracle/types/params.go +++ b/x/oracle/types/params.go @@ -60,7 +60,7 @@ func DefaultParams() Params { Name: "ETH", ChainID: 1, ContractAddress: "0x", - Decimal: 18, + Decimal: 8, Active: true, AssetID: "0x0b34c4d876cd569129cf56bafabb3f9e97a4ff42_0x9ce1", }, @@ -71,7 +71,7 @@ func DefaultParams() Params { Name: "0 position is reserved", }, { - Name: "Chainlink", + Name: SourceChainlinkName, Entry: &Endpoint{ Offchain: map[uint64]string{0: ""}, }, @@ -95,7 +95,7 @@ func DefaultParams() Params { TokenID: 1, RuleID: 1, StartRoundID: 1, - StartBaseBlock: 1000000, + StartBaseBlock: 20, Interval: 10, }, }, @@ -322,6 +322,8 @@ func (p Params) UpdateTokens(currentHeight uint64, tokens ...*Token) (Params, er if len(t.AssetID) > 0 { token.AssetID = t.AssetID } + // tokenID is actually uint since it's index of array + // #gosec G115 if !p.TokenStarted(uint64(tokenID), currentHeight) { // contractAddres is mainly used as a description information if len(t.ContractAddress) > 0 { @@ -553,7 +555,8 @@ func (p Params) IsValidSource(sourceID uint64) bool { func (p Params) GetTokenFeeder(feederID uint64) *TokenFeeder { for k, v := range p.TokenFeeders { - if uint64(k) == feederID { + // #nosec G115 // index of array is uint + if k >= 0 && uint64(k) == feederID { return v } } @@ -562,6 +565,7 @@ func (p Params) GetTokenFeeder(feederID uint64) *TokenFeeder { func (p Params) GetTokenInfo(feederID uint64) *Token { for k, v := range p.TokenFeeders { + // #nosec G115 // index of arry is uint if uint64(k) == feederID { return p.Tokens[v.TokenID] } @@ -587,6 +591,7 @@ func (p Params) CheckRules(feederID uint64, prices []*PriceSource) (bool, error) if source.Valid { notFound = true for _, p := range prices { + // #nosec G115 // index of array is uint if p.SourceID == uint64(sID) { notFound = false break @@ -622,3 +627,22 @@ func (p Params) CheckDecimal(feederID uint64, decimal int32) bool { token := p.Tokens[feeder.TokenID] return token.Decimal == decimal } + +func (p Params) IsForceSealingUpdate(params *Params) bool { + if p.MaxNonce != params.MaxNonce || + p.MaxDetId != params.MaxDetId || + p.ThresholdA != params.ThresholdA || + p.ThresholdB != params.ThresholdB || + p.Mode != params.Mode { + return true + } + return false +} + +func (p Params) IsSlashingResetUpdate(params *Params) bool { + if p.Slashing.ReportedRoundsWindow != params.Slashing.ReportedRoundsWindow || + p.Slashing.MinReportedPerWindow != params.Slashing.MinReportedPerWindow { + return true + } + return false +} diff --git a/x/oracle/types/price.pb.go b/x/oracle/types/price.pb.go index 3c9f6aaac..295b4e36c 100644 --- a/x/oracle/types/price.pb.go +++ b/x/oracle/types/price.pb.go @@ -172,7 +172,7 @@ type PriceTimeRound struct { Decimal int32 `protobuf:"varint,2,opt,name=decimal,proto3" json:"decimal,omitempty"` // timestamp when the price is corresponded Timestamp string `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // roundid of the price if the source is deteministic + // round id in Exocore of the price RoundID uint64 `protobuf:"varint,4,opt,name=round_id,json=roundId,proto3" json:"round_id,omitempty"` } diff --git a/x/oracle/types/types.go b/x/oracle/types/types.go index b1b6c2bc1..e811252cb 100644 --- a/x/oracle/types/types.go +++ b/x/oracle/types/types.go @@ -4,6 +4,7 @@ import ( "encoding/binary" sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" ) type OracleInfo struct { @@ -47,7 +48,11 @@ const ( DefaultPriceValue = 1 DefaultPriceDecimal = 0 - NSTIDPrefix = "NST" + NSTIDPrefix = "nst" + + SourceChainlinkName = "Chainlink" + SourceChainlinkID = 1 + TimeLayout = "2006-01-02 15:04:05" ) var DelimiterForCombinedKey = byte('/') @@ -57,3 +62,11 @@ func Uint64Bytes(value uint64) []byte { binary.BigEndian.PutUint64(valueBytes, value) return valueBytes } + +func ConsAddrStrFromCreator(creator string) (string, error) { + accAddress, err := sdk.AccAddressFromBech32(creator) + if err != nil { + return "", err + } + return sdk.ConsAddress(accAddress).String(), nil +}