diff --git a/.dockerignore b/.dockerignore index 6b7ebf648..2886a059a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,6 @@ .devcontainer .github .vscode -!scripts/init.sh -target \ No newline at end of file +target/ +.dockerignore +Dockerfile diff --git a/.github/workflows/benchmark-weights.yml b/.github/workflows/benchmark-weights.yml new file mode 100644 index 000000000..e69de29bb diff --git a/.github/workflows/check-devnet.yml b/.github/workflows/check-devnet.yml index fcc9809d3..3d7f17723 100644 --- a/.github/workflows/check-devnet.yml +++ b/.github/workflows/check-devnet.yml @@ -2,7 +2,7 @@ name: Devnet Deploy Check on: pull_request: - branches: [devnet] + branches: [devnet, devnet-ready] env: CARGO_TERM_COLOR: always @@ -11,6 +11,7 @@ jobs: check-spec-version: name: Check spec_version bump runs-on: SubtensorCI + if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-spec-version-bump') }} steps: - name: Dependencies run: | @@ -37,7 +38,7 @@ jobs: echo "network spec_version: $spec_version" if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi echo "$local_spec_version > $spec_version ✅" - + check-devnet-migrations: name: check devnet migrations runs-on: ubuntu-22.04 @@ -51,4 +52,4 @@ jobs: runtime-package: "node-subtensor-runtime" node-uri: "wss://dev.chain.opentensor.ai:443" checks: "pre-and-post" - extra-args: "--disable-spec-version-check --no-weight-warnings" \ No newline at end of file + extra-args: "--disable-spec-version-check --no-weight-warnings" diff --git a/.github/workflows/check-finney.yml b/.github/workflows/check-finney.yml index 3e9fb5994..642e02c0a 100644 --- a/.github/workflows/check-finney.yml +++ b/.github/workflows/check-finney.yml @@ -30,7 +30,7 @@ jobs: - name: Check that spec_version has been bumped run: | - spec_version=$(PATH=$PATH:$HOME/.cargo/.bin substrate-spec-version wss://entrypoint-finney.opentensor.ai:443 | tr -d '\n') + spec_version=$(PATH=$PATH:$HOME/.cargo/.bin substrate-spec-version ${{ secrets.NUCLEUS_ARCHIVE_NODE }} | tr -d '\n') echo "network spec_version: $spec_version" : ${spec_version:?bad spec version} local_spec_version=$(cargo run -p node-subtensor-runtime --bin spec_version | tr -d '\n') @@ -49,6 +49,6 @@ jobs: uses: "paritytech/try-runtime-gha@v0.1.0" with: runtime-package: "node-subtensor-runtime" - node-uri: "wss://entrypoint-finney.opentensor.ai:443" + node-uri: ${{ secrets.NUCLEUS_ARCHIVE_NODE }} checks: "pre-and-post" - extra-args: "--disable-spec-version-check --no-weight-warnings" \ No newline at end of file + extra-args: "--disable-spec-version-check --no-weight-warnings" diff --git a/.github/workflows/check-rust.yml b/.github/workflows/check-rust.yml index e95308861..797ad4df4 100644 --- a/.github/workflows/check-rust.yml +++ b/.github/workflows/check-rust.yml @@ -113,6 +113,54 @@ jobs: - name: cargo clippy --workspace --all-targets -- -D warnings run: cargo clippy --workspace --all-targets -- -D warnings + cargo-check-lints: + name: check custom lints + runs-on: SubtensorCI + strategy: + matrix: + rust-branch: + - stable + rust-target: + - x86_64-unknown-linux-gnu + # - x86_64-apple-darwin + os: + - ubuntu-latest + # - macos-latest + env: + RELEASE_NAME: development + RUSTV: ${{ matrix.rust-branch }} + RUSTFLAGS: -D warnings + RUST_BACKTRACE: full + RUST_BIN_DIR: target/${{ matrix.rust-target }} + SKIP_WASM_BUILD: 1 + TARGET: ${{ matrix.rust-target }} + steps: + - name: Check-out repository under $GITHUB_WORKSPACE + uses: actions/checkout@v4 + + - name: Install dependencies + run: | + sudo apt-get update && + sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler + + - name: Install Rust ${{ matrix.rust-branch }} + uses: actions-rs/toolchain@v1.0.6 + with: + toolchain: ${{ matrix.rust-branch }} + components: rustfmt, clippy + profile: minimal + + - name: Utilize Shared Rust Cache + uses: Swatinem/rust-cache@v2.2.1 + with: + key: ${{ matrix.os }}-${{ env.RUST_BIN_DIR }} + + - name: check lints + run: | + set -o pipefail # Ensure the pipeline fails if any command in the pipeline fails + cargo check 2>&1 | sed -r "s/\x1B\[[0-9;]*[mK]//g" | tee /dev/tty | grep -q "^warning:" && \ + (echo "Build emitted the following warnings:" >&2 && exit 1) || echo "No warnings found." + cargo-clippy-all-features: name: cargo clippy --all-features runs-on: SubtensorCI @@ -338,7 +386,3 @@ jobs: - name: Check features run: zepter run check - - - - diff --git a/.github/workflows/check-testnet.yml b/.github/workflows/check-testnet.yml index 71c46557c..c18b45ac2 100644 --- a/.github/workflows/check-testnet.yml +++ b/.github/workflows/check-testnet.yml @@ -2,7 +2,7 @@ name: Testnet Deploy Check on: pull_request: - branches: [testnet] + branches: [testnet, testnet-ready] env: CARGO_TERM_COLOR: always @@ -11,6 +11,7 @@ jobs: check-spec-version: name: Check spec_version bump runs-on: SubtensorCI + if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-spec-version-bump') }} steps: - name: Dependencies run: | diff --git a/.github/workflows/devnet-labels.yml b/.github/workflows/devnet-labels.yml deleted file mode 100644 index 85d9e7eed..000000000 --- a/.github/workflows/devnet-labels.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Tested on Devnet -on: - pull_request: - types: [opened, labeled, unlabeled, synchronize] - branches: [main] -jobs: - check-labels: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: mheap/github-action-required-labels@v5 - with: - mode: minimum - count: 1 - labels: | - devnet-pass - devnet-skip diff --git a/.github/workflows/devnet-ready-labels.yml b/.github/workflows/devnet-ready-labels.yml deleted file mode 100644 index ab53327e7..000000000 --- a/.github/workflows/devnet-ready-labels.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: devnet-companion Label Check -on: - pull_request: - types: [opened, labeled, unlabeled, synchronize] - branches: [devnet-ready] -jobs: - check-labels: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: mheap/github-action-required-labels@v5 - with: - mode: minimum - count: 1 - labels: devnet-companion diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 19bda7463..72e3f1b12 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,49 +1,42 @@ name: Publish Docker Image on: - push: - branches: - - main - tags: - - '*' - pull_request: - branches: - - main - workflow_dispatch: + release: + types: [published] permissions: - contents: read - packages: write - actions: read - security-events: write + contents: read + packages: write + actions: read + security-events: write jobs: publish: runs-on: SubtensorCI - + steps: - name: Checkout code uses: actions/checkout@v4 - + - name: Set up QEMU uses: docker/setup-qemu-action@v2 - + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - + - name: Login to GHCR uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - + - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@v4 with: images: ghcr.io/${{ github.repository }} - + - name: Build and push Docker image uses: docker/build-push-action@v4 with: @@ -52,4 +45,4 @@ jobs: tags: | ${{ steps.meta.outputs.tags }} ghcr.io/${{ github.repository }}:latest - labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/testnet-labels.yml b/.github/workflows/testnet-labels.yml deleted file mode 100644 index b4aabd958..000000000 --- a/.github/workflows/testnet-labels.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Tested on Testnet -on: - pull_request: - types: [opened, labeled, unlabeled, synchronize] - branches: [main] -jobs: - check-labels: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: mheap/github-action-required-labels@v5 - with: - mode: minimum - count: 1 - labels: | - testnet-pass - testnet-skip diff --git a/.github/workflows/testnet-ready-labels.yml b/.github/workflows/testnet-ready-labels.yml deleted file mode 100644 index 8570d2011..000000000 --- a/.github/workflows/testnet-ready-labels.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: testnet-companion Label Check -on: - pull_request: - types: [opened, labeled, unlabeled, synchronize] - branches: [testnet-ready] -jobs: - check-labels: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: mheap/github-action-required-labels@v5 - with: - mode: minimum - count: 1 - labels: testnet-companion diff --git a/.gitignore b/.gitignore index f394de80c..5921b6b93 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,8 @@ **/*.lock +*.ipynb + # Generated by code coverage *.profraw *.profdata diff --git a/.rustfmt.toml b/.rustfmt.toml index 24876acd9..9fd9af831 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -6,7 +6,7 @@ ## # rustup run nightly -- rustfmt node/src/main.rs -# max_width = 100 +# max_width = 180 # hard_tabs = false # tab_spaces = 4 # newline_style = "Auto" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 132d360b8..8ac806367 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,7 +3,7 @@ ## Lifecycle of a Pull Request 1. Individuals wishing to contribute to subtensor should develop their change/feature/fix in a - [Pull Request](https://github.com/opentensor/subtensor/compare) (PR) targeting the `main` + [Pull Request](https://github.com/opentensor/subtensor/compare) (PR) targeting the `devnet-ready` branch of the subtensor GitHub repository. It is recommended to start your pull request as a draft initially until you are ready to have other developers actively look at it. Any changes to pallet/runtime code should be accompanied by integration and/or unit tests fully @@ -13,69 +13,29 @@ Review" and request review from "Nucleus". 3. Core Nucleus team members will review your PR, possibly requesting changes, and will also add appropriate labels to your PR as shown below. Three positive reviews are required. -4. Once the required passing reviews have been obtained, you are ready to request that your PR - be included in the next `devnet` deploy. To do this, you should open a companion PR merging - a copy of your branch into the `devnet-ready` branch. You must include a link to the parent - PR in the description and preface your PR title with "(Devnet Ready)" or the PR will be - closed/ignored. Your companion PR should have the `devnet-companion` label. -5. A core team administrator will review your "(Devnet Ready)" PR, verifying that it logically - matches the changes introduced in the parent PR (there will sometimes be minor differences - due to merge conflicts) and will either request changes or approve the PR and merge it. Once - your companion PR is merged, the administrator will add the `devnet-ready` label to the - parent PR, indicating that the PR is on the `devnet-ready` branch and will be included in - the next deploy to `devnet`. -6. At some point, a core team administrator will open a PR merging the current `devnet-ready` +4. Once the required passing reviews have been obtained, you or an administrator may merge the + PR into the `devnet-ready` branch. +5. At some point, a core team administrator will open a PR merging the current `devnet-ready` branch into `devnet`, and the CI will enforce some additional safety checks on this PR including a requirement that the new `spec_version` be greater than the current on-chain `spec_version`. The PR should include a bulleted list of all PRs included in the deploy so - they can be easily found after the fact (TODO: automate this). This PR will require two - reviews from the core team as a sanity check. After merging, the administrator will then - need to update all PRs with the `devnet-ready` label to instead have the `on-devnet` label - (TODO: automate this upon merge). The administrator will then deploy `devnet`. -7. Once the `on-devnet` label appears on your PR, if you are a core team member it is your - responsibility to verify that the features/changes/fixes introduced by your PR are - functioning properly on `devnet` by interacting with the live network. If you are an - external contributor, a core team member will be assigned to test this for you. -8. If your feature/change/fix is confirmed working on `devnet`, the `devnet-pass` label should - be added. Otherwise if there are issues, the `devnet-fail` label should be added and you - will need to make changes to your PR and repeat the previous steps in this process. In some - cases a revert PR will need to be created reverting your changes from the `pre-devnet` and - `devnet` branches, respectively. -9. Once `devnet-pass` has been added to your PR, it is eligible for inclusion in the next - `testnet` deploy. We typically run `testnet` deploys every other wednesday. -10. On the appropriate date, an administrator will open a PR merging the current `devnet` + they can be easily found after the fact. +6. Once your feature/change/fix is on `devnet`, it is your responsibility to confirm it is + working properly. If it is not working and additional changes are needed, please coordinate + with a core team administrator and/or open up a new PR into `devnet` either reverting your + changes or making any required changes in order for the feature to function properly. +7. On the appropriate date, an administrator will open a PR merging the current `devnet` branch into `testnet`. This PR should include a bulleted list of all PRs included in the - deploy so they can be easily found after the fact (TODO: automate this). The PR should - exclude any PRs that currently have the `devnet-fail` label via a revert (TODO: enforce via - CI). This PR will require two reviews from the core team as a sanity check. After merging - into `testnet`, the administrator will then need to run the deploy and update all PRs - included in the deploy with the `on-testnet` label (TODO: automate this upon merge). Next - the administrator must cut a (pre-release) release in GitHub for `testnet` (TODO: github - action to generate the release and release notes). -11. Once the `on-testnet` label appears on your PR, if you are a core team member it is your - responsibility to once again verify that the features/changes/fixes introduced by your PR - are functioning properly on `testnet` by interacting with the live network, if applicable. - If you are an external contributor, a core team member may be assigned to do this testing - for you but otherwise it will be your responsibility to show evidence on the PR that the - testing is successful. Once this has been verified, the `testnet-pass` label should be - added. If testing fails, the `testnet-fail` label should be added and PRs should be opened - reverting the change from `devnet-ready`, and then a PR should be opened merging the - modified `devnet` into `testnet`. These revert PRs, if they occur, _must_ be merged before - a new deploy can be run (TODO: enforce this via CI). -12. After the SOP period (1 week on `testnet`) has passed and the `testnet-pass` label has been - added, the CI checks on your PR should now turn all green and a core team member will be - able to merge your PR into `main`. At this point your PR is done and is eligible to be - included in the next `finney` deploy (TODO: track and enforce SOP compliance on a per-PR - basis in CI based on the timestamps of label changes). We typically run `finney` deploys - every other Wednesday, so this will typically happen the Wednesday following the Wednesday - your PR was deployed to `testnet`. An administrator will run this deploy. The process the - administrator follows is to open a PR merging `main` into the `finney` branch, which will - always track the current state of `finney`. This PR automatically has some additional - checks on it such as asserting that the spec_version gets bumped properly and other sanity - checks designed to stop a bad deploy. Once the PR is reviewed and merged, the administrator - will run the actual deploy. Once that is successful, the administrator will cut a new - GitHub release tagged off of the latest `main` branch commit that was included in the - deploy, and announcements will be made regarding the release. + deploy so they can be easily found after the fact (TODO: automate this). This PR is merged, + the administrator will deploy `testnet` and cut a (pre-release) release in GitHub for + `testnet` (TODO: github action to generate the release and release notes). +11. It is now your responsibility to once again check that your feature/change/fix is working + properly, this time on `testnet`. Once again if it is not working or additional changes are + needed, please coordinate with a core team administrator ASAP and/or open up a new PR into + `testnet` either reverting your changes or making any required changes in order for the + feature to function properly. +12. At some point the administrator will merge current `testnet` into `main` and cut a new + deploy to mainnet/finney. ## PR Labels @@ -85,40 +45,24 @@ | `blue-team` | PR is focused on preventative/safety measures and/or dev UX improvements | none | | `runtime` | PR contains substantive changes to runtime / pallet code | none | | `breaking-change` | PR requires synchronized changes with bittensor | Triggers an automatic bot message so the relevant teams are made aware of the change well in advance | -| `migration` | PR contains one or more migrations | none | -| `devnet-companion` | Designates a devnet companion PR | Presence of `devnet-companion` label is checked | -| `devnet-ready` | PR's branch has been merged into the `devnet-ready` branch and will be included in the next `devnet` deploy | none | -| `on-devnet` | PR has been deployed to `devnet` | Removes `devnet-ready` | -| `devnet-pass` | PR has passed manual testing on `devnet` | `devnet-pass` or `devnet-skip` required | -| `devnet-skip` | Allows a critical hotfix PR to skip required testing on `devnet` | `devnet-pass` or `devnet-skip` required | -| `devnet-fail` | PR has failed manual testing on `devnet` and requires modification | none | -| `testnet-companion` | Designates a testnet companion PR | Presence of `testnet-companion` label is checked | -| `on-testnet` | PR has been deployed to `testnet` | none | -| `testnet-pass` | PR has passed manual testing on `testnet` | `testnet-pass` or `testnet-skip` required | -| `testnet-skip` | Allows a critical hotfix PR to skip required manual testing and SOP on `testnet` | `testnet-pass` or `testnet-skip` required | -| `testnet-fail` | PR has failed manual testing on `testnet` and requires modification | none | - ## Branches ### `devnet-ready` -Companion PRs merge into this branch, eventually accumulating into a merge of `devnet-ready` -into `devnet`, coinciding with a deploy of `devnet`. +All new feature/change/fix PRs should merge into this branch. #### Restrictions * no deleting the branch * no force pushes * no direct pushes -* require 1 positive review from an administrator -* new code changes invalidate existing reviews +* require 3 positive review from an administrator +* new code changes do _not_ invalidate existing reviews * only merge commit style merging allowed #### CI-Enforced Restrictions * `check-rust.yml` must pass -* TODO: parent PR must be linked to in description -* TODO: parent PR must have the required number of positive reviews ### `devnet` @@ -164,33 +108,8 @@ tags for `testnet` releases. ### `main` -Default branch for all new PRs. Slightly ahead of what is currently on `finney`. When a PR is all -green and "done", meaning it has been tested on `devnet` and `testnet`, it can be merged into -`main`. Contains tags for `finney` releases. - -#### Restrictions -* no deleting the branch -* no force pushes -* no direct pushes -* require 3 positive reviews from core team members -* new code changes invalidate existing reviews -* all conversations must be resolved -* only merge commit style merging allowed - -#### CI-Enforced Restrictions -* `check-rust.yml` must pass -* `check-labels.yml` must pass -* must have `devnet-skip` or `devnet-pass` label -* must have `testnet-skip` or `testnet-pass` label -* if `breaking-change` label is present, bot will message the appropriate teams -* TODO: when we get auditing, presence of `needs-audit` label = require a review from auditor -* TODO: track SOP on PR based on label age - - -### `finney` - Tracks the current state of what is deployed to `finney` (mainnet). Updated via an -administrator-submitted PR merging `main` into `finney` in concert with a `finney` deploy. +administrator-submitted PR merging `testnet` into `main` in concert with a `finney` deploy. #### Restrictions * no deleting the branch @@ -203,5 +122,5 @@ administrator-submitted PR merging `main` into `finney` in concert with a `finne #### CI-Enforced Restrictions * `check-rust.yml` must pass * `check-finney.yml` must pass -* spec_version must be greater than what is currently on live `finney` +* `spec_version` must be greater than what is currently on live `finney` * TODO: other pre-deploy sanity checks here diff --git a/Cargo.lock b/Cargo.lock index 868e85b89..ee0933379 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -223,7 +223,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -625,7 +625,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -751,7 +751,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1159,7 +1159,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1198,7 +1198,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#b273d33f9981e2bb3375ab45faeb537f7ee35224" +source = "git+https://github.com/w3f/ring-proof#665f5f51af5734c7b6d90b985dd6861d4c5b4752" dependencies = [ "ark-ec", "ark-ff", @@ -1553,7 +1553,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1580,7 +1580,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1597,7 +1597,7 @@ checksum = "4b2c1c1776b986979be68bb2285da855f8d8a35851a769fca8740df7c3d07877" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1621,7 +1621,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1632,7 +1632,7 @@ checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1738,7 +1738,7 @@ checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1751,7 +1751,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1840,7 +1840,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -1880,7 +1880,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.67", + "syn 2.0.71", "termcolor", "toml 0.8.14", "walkdir", @@ -2040,7 +2040,7 @@ checksum = "de0d48a183585823424a4ce1aa132d174a6a81bd540895822eb4c8373a8e49e8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -2153,7 +2153,7 @@ dependencies = [ "prettyplease 0.2.20", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -2308,7 +2308,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "12.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", ] @@ -2331,7 +2331,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-support", "frame-support-procedural", @@ -2347,16 +2347,16 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "static_assertions", ] [[package]] name = "frame-benchmarking-cli" version = "32.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "Inflector", "array-bytes 6.2.3", @@ -2388,15 +2388,15 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-database", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-inherents", "sp-io", "sp-keystore", "sp-runtime", "sp-state-machine", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-trie", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "thiserror", "thousands", ] @@ -2404,7 +2404,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "aquamarine 0.3.3", "frame-support", @@ -2416,8 +2416,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] @@ -2432,10 +2432,25 @@ dependencies = [ "serde", ] +[[package]] +name = "frame-metadata-hash-extension" +version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" +dependencies = [ + "array-bytes 6.2.3", + "docify", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-runtime", +] + [[package]] name = "frame-support" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "aquamarine 0.5.0", "array-bytes 6.2.3", @@ -2458,7 +2473,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-crypto-hashing-proc-macro", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-genesis-builder", "sp-inherents", "sp-io", @@ -2466,8 +2481,8 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-state-machine", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-weights", "static_assertions", "tt-call", @@ -2476,7 +2491,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "23.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "Inflector", "cfg-expr", @@ -2489,35 +2504,35 @@ dependencies = [ "proc-macro2", "quote", "sp-crypto-hashing", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "frame-support-procedural-tools" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "frame-support-procedural-tools-derive" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "frame-system" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "cfg-if", "docify", @@ -2529,7 +2544,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-version", "sp-weights", ] @@ -2537,7 +2552,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-benchmarking", "frame-support", @@ -2546,13 +2561,13 @@ dependencies = [ "scale-info", "sp-core", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "frame-system-rpc-runtime-api" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "sp-api", @@ -2561,13 +2576,13 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-support", "parity-scale-codec", "sp-api", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] @@ -2662,7 +2677,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -3453,7 +3468,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -4135,9 +4150,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "lru" @@ -4204,7 +4219,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -4218,7 +4233,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -4229,7 +4244,7 @@ checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -4240,7 +4255,7 @@ checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -4334,6 +4349,20 @@ dependencies = [ "hash-db", ] +[[package]] +name = "merkleized-metadata" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f313fcff1d2a4bcaa2deeaa00bf7530d77d5f7bd0467a117dde2e29a75a7a17a" +dependencies = [ + "array-bytes 6.2.3", + "blake3", + "frame-metadata", + "parity-scale-codec", + "scale-decode", + "scale-info", +] + [[package]] name = "merlin" version = "3.0.0" @@ -4644,6 +4673,7 @@ dependencies = [ "clap", "frame-benchmarking", "frame-benchmarking-cli", + "frame-metadata-hash-extension", "frame-system", "futures", "jsonrpsee", @@ -4699,6 +4729,7 @@ dependencies = [ "frame-benchmarking", "frame-executive", "frame-metadata", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-benchmarking", @@ -4739,9 +4770,9 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-transaction-pool", "sp-version", "substrate-wasm-builder", @@ -4945,6 +4976,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", + "pallet-scheduler", "pallet-subtensor", "parity-scale-codec", "scale-info", @@ -4952,7 +4984,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-weights", "substrate-fixed", "subtensor-macros", @@ -4961,7 +4994,7 @@ dependencies = [ [[package]] name = "pallet-aura" version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-support", "frame-system", @@ -4972,13 +5005,13 @@ dependencies = [ "sp-application-crypto", "sp-consensus-aura", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-authorship" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-support", "frame-system", @@ -4986,13 +5019,13 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-balances" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "docify", "frame-benchmarking", @@ -5002,7 +5035,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] @@ -5018,7 +5051,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "subtensor-macros", ] @@ -5036,14 +5069,14 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "subtensor-macros", ] [[package]] name = "pallet-grandpa" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-benchmarking", "frame-support", @@ -5060,13 +5093,13 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-insecure-randomness-collective-flip" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-support", "frame-system", @@ -5074,13 +5107,13 @@ dependencies = [ "safe-mix", "scale-info", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-membership" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-benchmarking", "frame-support", @@ -5091,13 +5124,13 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-multisig" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-benchmarking", "frame-support", @@ -5107,13 +5140,13 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-preimage" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-benchmarking", "frame-support", @@ -5124,13 +5157,13 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-proxy" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-benchmarking", "frame-support", @@ -5139,7 +5172,7 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] @@ -5155,14 +5188,14 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "subtensor-macros", ] [[package]] name = "pallet-safe-mode" version = "9.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "docify", "frame-benchmarking", @@ -5175,13 +5208,13 @@ dependencies = [ "scale-info", "sp-arithmetic", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-scheduler" version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "docify", "frame-benchmarking", @@ -5192,14 +5225,14 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-weights", ] [[package]] name = "pallet-session" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-support", "frame-system", @@ -5214,7 +5247,7 @@ dependencies = [ "sp-session", "sp-staking", "sp-state-machine", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-trie", ] @@ -5233,6 +5266,8 @@ dependencies = [ "pallet-balances", "pallet-collective", "pallet-membership", + "pallet-preimage", + "pallet-scheduler", "pallet-transaction-payment", "pallet-utility", "parity-scale-codec", @@ -5242,12 +5277,13 @@ dependencies = [ "serde", "serde-tuple-vec-map", "serde_bytes", + "serde_json", "serde_with", "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-version", "substrate-fixed", "subtensor-macros", @@ -5256,7 +5292,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "docify", "frame-benchmarking", @@ -5266,13 +5302,13 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-timestamp" version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "docify", "frame-benchmarking", @@ -5284,15 +5320,15 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-timestamp", ] [[package]] name = "pallet-transaction-payment" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-support", "frame-system", @@ -5302,13 +5338,13 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "pallet-transaction-payment-rpc" version = "30.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -5324,7 +5360,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -5336,7 +5372,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-benchmarking", "frame-support", @@ -5346,7 +5382,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] @@ -5590,7 +5626,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -5631,7 +5667,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -5717,7 +5753,7 @@ dependencies = [ "polkavm-common", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -5727,7 +5763,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -5854,7 +5890,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -5922,7 +5958,7 @@ checksum = "834da187cfe638ae8abb0203f0b33e5ccdb02a28e7199f2f47b3e2754f50edca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -5968,7 +6004,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -6036,7 +6072,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -6304,7 +6340,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -6399,13 +6435,14 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#b273d33f9981e2bb3375ab45faeb537f7ee35224" +source = "git+https://github.com/w3f/ring-proof#665f5f51af5734c7b6d90b985dd6861d4c5b4752" dependencies = [ "ark-ec", "ark-ff", "ark-poly", "ark-serialize", "ark-std", + "arrayvec", "blake2 0.10.6", "common", "fflonk", @@ -6684,18 +6721,18 @@ dependencies = [ [[package]] name = "sc-allocator" version = "23.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "log", "sp-core", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "thiserror", ] [[package]] name = "sc-basic-authorship" version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "futures", "futures-timer", @@ -6717,7 +6754,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "sp-api", @@ -6732,7 +6769,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "docify", @@ -6758,18 +6795,18 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "sc-cli" version = "0.36.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "chrono", @@ -6810,7 +6847,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "fnv", "futures", @@ -6825,11 +6862,11 @@ dependencies = [ "sp-consensus", "sp-core", "sp-database", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-runtime", "sp-state-machine", "sp-statement-store", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-trie", "substrate-prometheus-endpoint", ] @@ -6837,7 +6874,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "hash-db", "kvdb", @@ -6863,7 +6900,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "futures", @@ -6888,7 +6925,7 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "futures", @@ -6917,7 +6954,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.19.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "ahash 0.8.11", "array-bytes 6.2.3", @@ -6960,7 +6997,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.19.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "finality-grandpa", "futures", @@ -6980,7 +7017,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "futures", @@ -7003,7 +7040,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", @@ -7013,25 +7050,25 @@ dependencies = [ "schnellru", "sp-api", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-io", "sp-panic-handler", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-trie", "sp-version", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "tracing", ] [[package]] name = "sc-executor-common" version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "polkavm", "sc-allocator", "sp-maybe-compressed-blob", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "thiserror", "wasm-instrument", ] @@ -7039,18 +7076,18 @@ dependencies = [ [[package]] name = "sc-executor-polkavm" version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "log", "polkavm", "sc-executor-common", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "sc-executor-wasmtime" version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "anyhow", "cfg-if", @@ -7060,15 +7097,15 @@ dependencies = [ "rustix 0.36.17", "sc-allocator", "sc-executor-common", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "wasmtime", ] [[package]] name = "sc-informant" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "ansi_term", "futures", @@ -7085,7 +7122,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "25.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "parking_lot 0.12.3", @@ -7099,7 +7136,7 @@ dependencies = [ [[package]] name = "sc-mixnet" version = "0.4.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 4.2.0", "arrayvec", @@ -7128,7 +7165,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "async-channel", @@ -7171,7 +7208,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-channel", "cid", @@ -7191,7 +7228,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -7208,7 +7245,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "ahash 0.8.11", "futures", @@ -7227,7 +7264,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "async-channel", @@ -7248,7 +7285,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "async-channel", @@ -7284,7 +7321,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "futures", @@ -7303,7 +7340,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "bytes", @@ -7326,7 +7363,7 @@ dependencies = [ "sc-utils", "sp-api", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-keystore", "sp-offchain", "sp-runtime", @@ -7337,7 +7374,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.17.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -7346,7 +7383,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "futures", "jsonrpsee", @@ -7378,7 +7415,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -7398,7 +7435,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "futures", "governor", @@ -7416,7 +7453,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "futures", @@ -7447,7 +7484,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "directories", @@ -7489,12 +7526,12 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-keystore", "sp-runtime", "sp-session", "sp-state-machine", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", @@ -7511,7 +7548,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.30.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "log", "parity-scale-codec", @@ -7522,7 +7559,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "derive_more", "futures", @@ -7537,13 +7574,13 @@ dependencies = [ "sp-core", "sp-crypto-hashing", "sp-io", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "sc-telemetry" version = "15.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "chrono", "futures", @@ -7562,7 +7599,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "ansi_term", "chrono", @@ -7582,7 +7619,7 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "thiserror", "tracing", "tracing-log 0.1.4", @@ -7592,18 +7629,18 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "sc-transaction-pool" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "futures", @@ -7621,7 +7658,7 @@ dependencies = [ "sp-core", "sp-crypto-hashing", "sp-runtime", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-transaction-pool", "substrate-prometheus-endpoint", "thiserror", @@ -7630,7 +7667,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "futures", @@ -7646,7 +7683,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-channel", "futures", @@ -7658,6 +7695,29 @@ dependencies = [ "sp-arithmetic", ] +[[package]] +name = "scale-bits" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57b1e7f6b65ed1f04e79a85a57d755ad56d76fdf1e9bddcc9ae14f71fcdcf54" +dependencies = [ + "parity-scale-codec", + "scale-type-resolver", +] + +[[package]] +name = "scale-decode" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e98f3262c250d90e700bb802eb704e1f841e03331c2eb815e46516c4edbf5b27" +dependencies = [ + "derive_more", + "parity-scale-codec", + "scale-bits", + "scale-type-resolver", + "smallvec", +] + [[package]] name = "scale-info" version = "2.11.3" @@ -7684,6 +7744,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "scale-type-resolver" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0cded6518aa0bd6c1be2b88ac81bf7044992f0f154bfbabd5ad34f43512abcb" + [[package]] name = "schannel" version = "0.1.23" @@ -7845,9 +7911,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] @@ -7872,13 +7938,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -7926,7 +7992,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -8129,7 +8195,7 @@ dependencies = [ [[package]] name = "sp-api" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "hash-db", "log", @@ -8137,12 +8203,12 @@ dependencies = [ "scale-info", "sp-api-proc-macro", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-metadata-ir", "sp-runtime", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-state-machine", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-trie", "sp-version", "thiserror", @@ -8151,7 +8217,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "15.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "Inflector", "blake2 0.10.6", @@ -8159,26 +8225,26 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "sp-application-crypto" version = "30.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "scale-info", "serde", "sp-core", "sp-io", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "sp-arithmetic" version = "23.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "docify", "integer-sqrt", @@ -8186,7 +8252,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "static_assertions", ] @@ -8211,7 +8277,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "sp-api", "sp-inherents", @@ -8221,7 +8287,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "futures", "log", @@ -8239,7 +8305,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "futures", @@ -8254,7 +8320,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "parity-scale-codec", @@ -8270,7 +8336,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "finality-grandpa", "log", @@ -8287,7 +8353,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "scale-info", @@ -8298,7 +8364,7 @@ dependencies = [ [[package]] name = "sp-core" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "array-bytes 6.2.3", "bandersnatch_vrfs", @@ -8329,11 +8395,11 @@ dependencies = [ "secrecy", "serde", "sp-crypto-hashing", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "ss58-registry", "substrate-bip39", "thiserror", @@ -8345,7 +8411,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.10.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c4b3c1c6c6e492c4196e06fbba824a58e8119a3b" +source = "git+https://github.com/paritytech/polkadot-sdk#6a5b6e03bfc8d0c6f5f05f3180313902c15aee84" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -8365,7 +8431,7 @@ dependencies = [ [[package]] name = "sp-crypto-hashing" version = "0.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "blake2b_simd", "byteorder", @@ -8378,17 +8444,17 @@ dependencies = [ [[package]] name = "sp-crypto-hashing-proc-macro" version = "0.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "quote", "sp-crypto-hashing", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "sp-database" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "kvdb", "parking_lot 0.12.3", @@ -8397,37 +8463,37 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c4b3c1c6c6e492c4196e06fbba824a58e8119a3b" +source = "git+https://github.com/paritytech/polkadot-sdk#6a5b6e03bfc8d0c6f5f05f3180313902c15aee84" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "sp-externalities" version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "environmental", "parity-scale-codec", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "sp-externalities" version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c4b3c1c6c6e492c4196e06fbba824a58e8119a3b" +source = "git+https://github.com/paritytech/polkadot-sdk#6a5b6e03bfc8d0c6f5f05f3180313902c15aee84" dependencies = [ "environmental", "parity-scale-codec", @@ -8437,7 +8503,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.7.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "serde_json", "sp-api", @@ -8447,7 +8513,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -8460,7 +8526,7 @@ dependencies = [ [[package]] name = "sp-io" version = "30.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "bytes", "ed25519-dalek", @@ -8472,12 +8538,12 @@ dependencies = [ "secp256k1", "sp-core", "sp-crypto-hashing", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-keystore", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-state-machine", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-trie", "tracing", "tracing-core", @@ -8486,7 +8552,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "31.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "sp-core", "sp-runtime", @@ -8496,18 +8562,18 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "sp-maybe-compressed-blob" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "thiserror", "zstd 0.12.4", @@ -8516,7 +8582,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.6.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -8526,7 +8592,7 @@ dependencies = [ [[package]] name = "sp-mixnet" version = "0.4.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "scale-info", @@ -8537,7 +8603,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "sp-api", "sp-core", @@ -8547,7 +8613,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "backtrace", "lazy_static", @@ -8557,7 +8623,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "rustc-hash", "serde", @@ -8567,7 +8633,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "31.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "docify", "either", @@ -8584,33 +8650,33 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-io", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-weights", ] [[package]] name = "sp-runtime-interface" version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", "polkavm-derive", "primitive-types", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-runtime-interface-proc-macro 17.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-runtime-interface-proc-macro 17.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "static_assertions", ] [[package]] name = "sp-runtime-interface" version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c4b3c1c6c6e492c4196e06fbba824a58e8119a3b" +source = "git+https://github.com/paritytech/polkadot-sdk#6a5b6e03bfc8d0c6f5f05f3180313902c15aee84" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -8629,33 +8695,33 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "sp-runtime-interface-proc-macro" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c4b3c1c6c6e492c4196e06fbba824a58e8119a3b" +source = "git+https://github.com/paritytech/polkadot-sdk#6a5b6e03bfc8d0c6f5f05f3180313902c15aee84" dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "sp-session" version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "scale-info", @@ -8669,7 +8735,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8682,7 +8748,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "hash-db", "log", @@ -8691,7 +8757,7 @@ dependencies = [ "rand", "smallvec", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-panic-handler", "sp-trie", "thiserror", @@ -8702,7 +8768,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "aes-gcm", "curve25519-dalek 4.1.3", @@ -8716,9 +8782,9 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-crypto-hashing", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-runtime", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "thiserror", "x25519-dalek 2.0.1", ] @@ -8726,29 +8792,29 @@ dependencies = [ [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c4b3c1c6c6e492c4196e06fbba824a58e8119a3b" +source = "git+https://github.com/paritytech/polkadot-sdk#6a5b6e03bfc8d0c6f5f05f3180313902c15aee84" [[package]] name = "sp-storage" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] name = "sp-storage" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c4b3c1c6c6e492c4196e06fbba824a58e8119a3b" +source = "git+https://github.com/paritytech/polkadot-sdk#6a5b6e03bfc8d0c6f5f05f3180313902c15aee84" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8760,7 +8826,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "parity-scale-codec", @@ -8772,7 +8838,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "tracing", @@ -8783,7 +8849,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c4b3c1c6c6e492c4196e06fbba824a58e8119a3b" +source = "git+https://github.com/paritytech/polkadot-sdk#6a5b6e03bfc8d0c6f5f05f3180313902c15aee84" dependencies = [ "parity-scale-codec", "tracing", @@ -8794,7 +8860,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "sp-api", "sp-runtime", @@ -8803,7 +8869,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "async-trait", "parity-scale-codec", @@ -8817,7 +8883,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "ahash 0.8.11", "hash-db", @@ -8830,7 +8896,7 @@ dependencies = [ "scale-info", "schnellru", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "thiserror", "tracing", "trie-db", @@ -8840,7 +8906,7 @@ dependencies = [ [[package]] name = "sp-version" version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8849,7 +8915,7 @@ dependencies = [ "serde", "sp-crypto-hashing-proc-macro", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", "sp-version-proc-macro", "thiserror", ] @@ -8857,18 +8923,18 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "sp-wasm-interface" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -8880,7 +8946,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c4b3c1c6c6e492c4196e06fbba824a58e8119a3b" +source = "git+https://github.com/paritytech/polkadot-sdk#6a5b6e03bfc8d0c6f5f05f3180313902c15aee84" dependencies = [ "impl-trait-for-tuples", "log", @@ -8890,7 +8956,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "bounded-collections", "parity-scale-codec", @@ -8898,7 +8964,7 @@ dependencies = [ "serde", "smallvec", "sp-arithmetic", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0)", + "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", ] [[package]] @@ -9031,13 +9097,13 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] name = "substrate-bip39" version = "0.4.7" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "hmac 0.12.1", "pbkdf2", @@ -9049,12 +9115,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" [[package]] name = "substrate-fixed" version = "0.5.9" -source = "git+https://github.com/encointer/substrate-fixed.git?tag=v0.5.9#a4fb461aae6205ffc55bed51254a40c52be04e5d" +source = "git+https://github.com/opentensor/substrate-fixed.git?tag=v0.5.9#a4fb461aae6205ffc55bed51254a40c52be04e5d" dependencies = [ "parity-scale-codec", "scale-info", @@ -9065,7 +9131,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -9084,7 +9150,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.17.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ "hyper", "log", @@ -9096,15 +9162,24 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-v1.10.0#7049c3c98836b3e9253f6aaa69b6bf3d622e3962" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3#8d2f55dfe06bae13e9f47ccf587acfd3fb9cd923" dependencies = [ + "array-bytes 6.2.3", "build-helper", "cargo_metadata", "console", "filetime", + "frame-metadata", + "merkleized-metadata", + "parity-scale-codec", "parity-wasm", "polkavm-linker", + "sc-executor", + "sp-core", + "sp-io", "sp-maybe-compressed-blob", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.10.0-rc3)", + "sp-version", "strum 0.26.2", "tempfile", "toml 0.8.14", @@ -9112,6 +9187,20 @@ dependencies = [ "wasm-opt", ] +[[package]] +name = "subtensor" +version = "0.1.0" +dependencies = [ + "node-subtensor", + "node-subtensor-runtime", + "proc-macro2", + "quote", + "rayon", + "subtensor-linting", + "syn 2.0.71", + "walkdir", +] + [[package]] name = "subtensor-custom-rpc" version = "0.0.2" @@ -9137,6 +9226,15 @@ dependencies = [ "sp-api", ] +[[package]] +name = "subtensor-linting" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.71", +] + [[package]] name = "subtensor-macros" version = "0.1.0" @@ -9144,7 +9242,17 @@ dependencies = [ "ahash 0.8.11", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", +] + +[[package]] +name = "subtensor-tools" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "semver 1.0.23", + "toml_edit 0.22.14", ] [[package]] @@ -9172,9 +9280,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.67" +version = "2.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8655ed1d86f3af4ee3fd3263786bc14245ad17c4c7e85ba7187fb3ae028c90" +checksum = "b146dcf730474b4bcd16c311627b31ede9ab149045db4d6088b3becaea046462" dependencies = [ "proc-macro2", "quote", @@ -9280,7 +9388,7 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -9400,7 +9508,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -9558,7 +9666,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -9963,7 +10071,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", "wasm-bindgen-shared", ] @@ -9997,7 +10105,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10716,7 +10824,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] @@ -10736,7 +10844,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.67", + "syn 2.0.71", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 8d9eff122..f9a7968b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,38 @@ +[package] +name = "subtensor" +version = "0.1.0" +description = "Implementation of the bittensor blockchain" +authors = ["Substrate DevHub "] +homepage = "https://substrate.io/" +edition = "2021" +license = "Unlicense" +publish = false +repository = "https://github.com/opentensor/subtensor" + +[dependencies] +node-subtensor = { path = "node", version = "4.0.0-dev" } +node-subtensor-runtime = { path = "runtime", version = "4.0.0-dev" } + +[build-dependencies] +subtensor-linting = { path = "support/linting", version = "0.1.0" } +syn.workspace = true +quote.workspace = true +proc-macro2.workspace = true +walkdir.workspace = true +rayon = "1.10" + [workspace] members = [ "node", "pallets/commitments", "pallets/subtensor", + "pallets/admin-utils", + "pallets/collective", + "pallets/registry", "runtime", + "support/tools", "support/macros", + "support/linting", ] resolver = "2" @@ -36,83 +64,94 @@ serde_json = { version = "1.0.116", default-features = false } serde_with = { version = "=2.0.0", default-features = false } smallvec = "1.13.2" litep2p = { git = "https://github.com/paritytech/litep2p", branch = "master" } +syn = { version = "2", features = [ + "full", + "visit-mut", + "visit", + "extra-traits", + "parsing", +] } +quote = "1" +proc-macro2 = { version = "1", features = ["span-locations"] } +walkdir = "2" subtensor-macros = { path = "support/macros" } -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +frame-metadata-hash-extension = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +frame-support = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +frame-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } -pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } +pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } -sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-consensus-grandpa-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-chain-spec-derive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-executor = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-network = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-service = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } +sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-consensus-grandpa-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-chain-spec-derive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-executor = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-network = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-service = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sp-genesis-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-storage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-version = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0", default-features = false } +sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sp-genesis-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-io = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-std = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-storage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-version = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3", default-features = false } -substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -substrate-fixed = { git = "https://github.com/encointer/substrate-fixed.git", tag = "v0.5.9" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } -substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.10.0" } +substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +substrate-fixed = { git = "https://github.com/opentensor/substrate-fixed.git", tag = "v0.5.9" } +substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } +substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.10.0-rc3" } frame-metadata = "16" [profile.release] @@ -125,3 +164,15 @@ opt-level = 3 inherits = "release" lto = true codegen-units = 1 + +[features] +default = [] +try-runtime = [ + "node-subtensor/try-runtime", + "node-subtensor-runtime/try-runtime", +] +runtime-benchmarks = [ + "node-subtensor/runtime-benchmarks", + "node-subtensor-runtime/runtime-benchmarks", +] +metadata-hash = ["node-subtensor-runtime/metadata-hash"] diff --git a/Dockerfile b/Dockerfile index 2fc6cbcc6..2dd2e2370 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,65 +1,44 @@ - ARG BASE_IMAGE=ubuntu:20.04 -FROM $BASE_IMAGE as builder +FROM $BASE_IMAGE AS builder SHELL ["/bin/bash", "-c"] -# This is being set so that no interactive components are allowed when updating. +# Set noninteractive mode for apt-get ARG DEBIAN_FRONTEND=noninteractive LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ ai.opentensor.image.vendor="Opentensor Foundation" \ ai.opentensor.image.title="opentensor/subtensor" \ ai.opentensor.image.description="Opentensor Subtensor Blockchain" \ - ai.opentensor.image.revision="${VCS_REF}" \ - ai.opentensor.image.created="${BUILD_DATE}" \ ai.opentensor.image.documentation="https://docs.bittensor.com" -# show backtraces -ENV RUST_BACKTRACE 1 - -# Necessary libraries for Rust execution +# Set up Rust environment +ENV RUST_BACKTRACE=1 RUN apt-get update && \ apt-get install -y curl build-essential protobuf-compiler clang git && \ rm -rf /var/lib/apt/lists/* -# Install cargo and Rust RUN set -o pipefail && curl https://sh.rustup.rs -sSf | sh -s -- -y ENV PATH="/root/.cargo/bin:${PATH}" +RUN rustup update stable +RUN rustup target add wasm32-unknown-unknown --toolchain stable -RUN mkdir -p /subtensor && \ - mkdir /subtensor/scripts - -# Scripts -COPY ./scripts/init.sh /subtensor/scripts/ - -# Capture dependencies -COPY Cargo.lock Cargo.toml /subtensor/ +# Copy entire repository +COPY . /build +WORKDIR /build -# Specs -COPY ./snapshot.json /subtensor/snapshot.json -COPY ./raw_spec_testfinney.json /subtensor/raw_spec_testfinney.json -COPY ./raw_spec_finney.json /subtensor/raw_spec_finney.json +# Build the project +RUN cargo build -p node-subtensor --profile production --features="runtime-benchmarks metadata-hash" --locked -# Copy our sources -COPY ./node /subtensor/node -COPY ./pallets /subtensor/pallets -COPY ./runtime /subtensor/runtime -COPY ./support /subtensor/support +# Verify the binary was produced +RUN test -e /build/target/production/node-subtensor -# Copy our toolchain -COPY rust-toolchain.toml /subtensor/ -RUN /subtensor/scripts/init.sh - -# Cargo build -WORKDIR /subtensor -RUN cargo build --profile production --features runtime-benchmarks --locked EXPOSE 30333 9933 9944 - FROM $BASE_IMAGE AS subtensor -COPY --from=builder /subtensor/snapshot.json / -COPY --from=builder /subtensor/raw_spec_testfinney.json / -COPY --from=builder /subtensor/raw_spec_finney.json / -COPY --from=builder /subtensor/target/production/node-subtensor /usr/local/bin +# Copy all chainspec files +COPY --from=builder /build/*.json / + +# Copy final binary +COPY --from=builder /build/target/production/node-subtensor /usr/local/bin diff --git a/build.rs b/build.rs new file mode 100644 index 000000000..10cac0ea7 --- /dev/null +++ b/build.rs @@ -0,0 +1,96 @@ +use rayon::prelude::*; +use std::{ + env, fs, + path::{Path, PathBuf}, + str::FromStr, + sync::mpsc::channel, +}; +use walkdir::WalkDir; + +use subtensor_linting::*; + +fn main() { + // need to list all rust directories here + println!("cargo:rerun-if-changed=pallets"); + println!("cargo:rerun-if-changed=node"); + println!("cargo:rerun-if-changed=runtime"); + println!("cargo:rerun-if-changed=lints"); + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=src"); + println!("cargo:rerun-if-changed=support"); + // Get the root directory of the workspace + let workspace_root = env::var("CARGO_MANIFEST_DIR").unwrap(); + let workspace_root = Path::new(&workspace_root); + + // Collect all Rust source files in the workspace + let rust_files = collect_rust_files(workspace_root); + + // Channel used to communicate errors back to the main thread from the parallel processing + // as we process each Rust file + let (tx, rx) = channel(); + + // Parse each rust file with syn and run the linting suite on it in parallel + rust_files.par_iter().for_each_with(tx.clone(), |tx, file| { + let Ok(content) = fs::read_to_string(file) else { + return; + }; + let Ok(parsed_tokens) = proc_macro2::TokenStream::from_str(&content) else { + return; + }; + let Ok(parsed_file) = syn::parse2::(parsed_tokens) else { + return; + }; + + let track_lint = |result: Result| { + let Err(errors) = result else { + return; + }; + let relative_path = file.strip_prefix(workspace_root).unwrap_or(file.as_path()); + for error in errors { + let loc = error.span().start(); + let file_path = relative_path.display(); + // note that spans can't go across thread boundaries without losing their location + // info so we we serialize here and send a String + tx.send(format!( + "cargo:warning={}:{}:{}: {}", + file_path, loc.line, loc.column, error, + )) + .unwrap(); + } + }; + + track_lint(RequireFreezeStruct::lint(&parsed_file)); + }); + + // Collect and print all errors after the parallel processing is done + drop(tx); // Close the sending end of the channel + + for error in rx { + println!("{error}"); + } +} + +/// Recursively collects all Rust files in the given directory +fn collect_rust_files(dir: &Path) -> Vec { + let mut rust_files = Vec::new(); + + for entry in WalkDir::new(dir) { + let entry = entry.unwrap(); + let path = entry.path(); + + // Skip any path that contains "target" directory + if path + .components() + .any(|component| component.as_os_str() == "target") + || path.ends_with("build.rs") + { + continue; + } + + if path.is_file() && path.extension().and_then(|ext| ext.to_str()) == Some("rs") { + rust_files.push(path.to_path_buf()); + } + } + + rust_files +} diff --git a/docs/delegate-info.json b/docs/delegate-info.json new file mode 100644 index 000000000..544c36e53 --- /dev/null +++ b/docs/delegate-info.json @@ -0,0 +1,408 @@ +[ + { + "address": "5ECvRLMj9jkbdM4sLuH5WvjUe87TcAdjRfUj5onN4iKqYYGm", + "name": "Vune", + "url": "https://fairchild.dev", + "description": "Vune is a dev at Opentensor and a BSc CS student at UofT.", + "signature": "2a639f931c61abfc3172db594c986c35f1cc8441970582b9c3b1f0506d518a182a2fe570832f02f86014320f1526189917bfbccf7081622652d12e16e9b1768b" + }, + { + "address": "5H6BgKkAr2Anmm9Xw5BVDE4VaQmFEVMkJUHeT7Gki4J7yF4x", + "name": "TaoPolishNode", + "url": "https://taonode.io", + "description": "This node is a collective effort of the polish community. We are engaged in evangelizing the project, educating and sharing the knowledge.", + "signature": "1ca20d4e99a48f400dd9cd4aeca8447da6ab1979e480a1dafddfc52e45e215177c7cdde85f5d042d59a5b1169981afa8d1ae28328e2fc5ce57c3d748c8d09d81" + }, + { + "address": "5FFApaS75bv5pJHfAp2FVLBj9ZaXuFDjEypsaBNc1wCfe52v", + "name": "RoundTable21", + "url": "https://roundtable21.com", + "description": "RoundTable21 is an International, multi-disciplinary team of consultants and advisors partnering alongside leading blockchain startups to offer guidance, expertise, investment and hands-on assistance in every aspect of development.", + "signature": "107638b8edde8f918f7faa2cd1f91b454c13094ed5955d6a409f6e0662f8427075516273728a53923839a5428079151ea0844b5f755362364f04735463dff583" + }, + { + "address": "5DCc5oHA6c1Lpt9R6T1xU8jJGTMvvwBqD1yGX67sL8dHUcga", + "name": "WaveTensor", + "url": "https://twitter.com/wavetensor", + "description": "A new Wave is coming, join the AI revolution on top of Bittensor by staking with us.", + "signature": "5e072b4752ccbdd4ca3298f336284dfdab347dd133850f4d2f9873e7ea59bd2a8f201732842ec79d2bab3abaf133a06b6bd992940389e42d57802c9b8f855889" + }, + { + "address": "5CXRfP2ekFhe62r7q3vppRajJmGhTi7vwvb2yr79jveZ282w", + "name": "Rizzo", + "url": "", + "description": "Validator built for performance and uptime. Data center housed, redundancies include dual physical failover servers (HA), power, internet, tested DR Plan.", + "signature": "f2b0fdb6989c23a0ebe23ed5622cbbfcf57bad709085fe11b0be10b2838e1442d61f770d78f6ca8ebcdbf60ddb27398663a4901e22bb9de086866517c6ccc187" + }, + { + "address": "5GcBK8PDrVifV1xAf4Qkkk6KsbsmhDdX9atvk8vyKU8xdU63", + "name": "Tensor.Exchange", + "url": "www.tensor.exchange", + "description": "Bittensor's first community OTC exchange", + "signature": "101f5e0d26c38190200f2213ebd89cf5bcb736b70a84e53651b6f9bf1161a33d0095836d304851237e0334792a54fa2fe452d07cf1466b42c9ab3333ded46284" + }, + { + "address": "5EhvL1FVkQPpMjZX4MAADcW42i3xPSF1KiCpuaxTYVr28sux", + "name": "TAO-Validator.com", + "url": "www.tao-validator.com", + "description": "Maximize your return when staking with TAO-Validator.com. TAO-Validator.com is a highly secure validator that aims to become one of the top contributing entities to Bittensor.", + "signature": "4036991069d7f3a43dff2ba2592fbe5af820eb6ff96d1fb78f1bcd8d310ba8751e25ea14397e075368a9a0f1b1b176166c56351db36f2d3868ac61c2571a1981" + }, + { + "address": "5FvhvCWLbu2VgotT5obC9E6S9nskerJUrVsWqkWXCbuD8veW", + "name": "The Lost Cove", + "url": "https://lostcove.tech/", + "description": "Australia and New Zealand community. We're in it for the gains.", + "signature": "626ae6b91aac1591e5d4f8d4fdf2c55f927419fc766dd5184b149f4d7cbc9749ebc94e4e8d04d286b4000c7665afa5682aa28cd94071c5e384e0eb4f44def188" + }, + { + "address": "5Dyi5e2QqnWn2RN9X6r8A8Q1QBjYD536H75mxNye193oeCJ4", + "name": "Makoto AI", + "url": "https://www.linkedin.com/in/henry-thrasher-17b320239/", + "description": "An interdisciplinary research institute committed to discovering and accelerating innovative solutions for climate change, social inequality, and mental and physical illness.", + "signature": "3cfbc1e8d82cfbf2adea9b10f71541874528cf5cd851f29f48016ac2a1a07b01cfc2ba3c3a15634b1174bd3e5aec9eb843d04f74140b0ddcb526416666d6f682" + }, + { + "address": "5Ehv5XMriPZwNBtYHdQV7VrdbN8MBTDTmQhWprZJXxSiMapR", + "name": "Dale Cooper", + "url": "", + "description": "I have no idea where this will lead us, but I have a definite feeling it will be a place both wonderful and strange.", + "signature": "06c597178698dba5699e20dc8b9d0d44f9225e24a225c70f540b63867e5b835a74c87df647b28210b361007b642a5a869c74323fcc8a593bc5764ea8e2083b81" + }, + { + "address": "5E6oB7h5wtWPbqtPxtSoZeo11fpvDjPuY13SobAMxqEUjqkQ", + "name": "StakeTensor.com-3", + "url": "www.staketensor.com", + "description": "We run multiple, parallel validators to support Bittensor decentralization & achieve maximum returns", + "signature": "a2567b6de748f02f6a14e0063f5b5720b34c96deb2115b33893d016de1f60633ba58bf9bdd49b2141e12a4a8784b4b11c007679d7526eb1e91147e5284258d8a" + }, + { + "address": "5DnWFhKfeu6gXMydzrv8bkwxFegAC6bMWsC4Z2XtaotAeB6S", + "name": "Bittensor Greece", + "url": "", + "description": "The Greek / Cypriot validator supporting the development of decentralised AI", + "signature": "ee8df5360eb641bd91a38da9d8b6dda36a39302c9bba7babf5d7eb16f6e9f73321aeb6f8adb30e0f511d64c1f35caa15215dd280fb2ed3f8f5b09d783cc9958f" + }, + { + "address": "5GBxDYkDp8eJZHGT89wcZJKcMc4ytSqnqqVSpeuGeqtGfqxK", + "name": "Tao Stake", + "url": "www.taostake.io", + "description": "We have been mining since the start of bittensor and want to maintain a long term solid validator to help people get some value from thier investment and keep TAO within the ecosystem.", + "signature": "0272522b503ebb29f0b506f10765b4d5c7a23b85c78cc7bfae76b9816b80ab43282ea4642f09eb09be70812341e5d9946abc8a9d2c73bab0113e9bf939430c87" + }, + { + "address": "5FcXnzNo3mrqReTEY4ftkg5iXRBi61iyvM4W1bywZLRqfxAY", + "name": "Lucrosus Capital", + "url": "https://lucrosuspool.io/", + "description": "Decentralized VC focused on the most thriving blockchain ideas. Join our pool to receive early entrance into promising projects!", + "signature": "1a37ab3bd51a6590dea9772d6a5550632ddcd8d76da6595b66e6425692feac6699dc5f788e587a734cedc3f54efc96c2c9e5453f9052867c1b9a1b5a443b848c" + }, + { + "address": "5CVS9d1NcQyWKUyadLevwGxg6LgBcF9Lik6NSnbe5q59jwhE", + "name": "Ary van der Touw", + "url": "", + "description": "Secure and maintain Bittensor", + "signature": "809586931d4b28f180c98036a3eebc0d26b9e521f5217a6942b025069cb60807641737009713446eec8456e54ba753ae0b752c0693b942aefa0c4f76d82f8c89" + }, + { + "address": "5F4tQyWrhfGVcNhoqeiNsR6KjD4wMZ2kfhLj4oHYuyHbZAc3", + "name": "Openτensor Foundaτion", + "url": "https://opentensor.ai/", + "description": "Founded, maintain and advance Bittensor", + "signature": "8a2ff8f10a84a5b6f80614674ea764515d93a64bf8d920b927edc0dd6043e607755bf58655c87b7a299d8df1404574b6844e1e09adf86d418997c0cab8120486" + }, + { + "address": "5EpxBYq4aVgTQ1rYeBo2mzYt3hgpRTqxZTSsJEkCstBP5Jse", + "name": "White Rhino TAO Super Validator", + "url": "https://twitter.com/WhiteRhinoTAO\"", + "description": "White Rhino is all about you! We understand that #TAOWaits4NoOne ..... Get Ready for Adhoc Rewards and we invite you to delegate here and enhance the sustainability of the TAO Network", + "signature": "d6803522f6e61a9dec5261a6a500b733d233b373457382fc3713af21c560604f6e50c4999f286cfa6012bcea66e51223722b355dd69ba54a472f2c6ca52da08f" + }, + { + "address": "5Fq5v71D4LX8Db1xsmRSy6udQThcZ8sFDqxQFwnUZ1BuqY5A", + "name": "NorthTensor", + "url": "https://northtensor.ai", + "description": "Developer, Advocate, and Incubator for Decentralized AI.", + "signature": "28e221d7128e48a3cb85dbcb223bd56cb09cb55540263573783bf1cef63be32ee81246bd1d75c865580da732094053a6dad14929b17e659b6e0237412b66a487" + }, + { + "address": "5CsvRJXuR955WojnGMdok1hbhffZyB4N5ocrv82f3p5A2zVp", + "name": "Owl Ventures", + "url": "https://owlventures.co.uk", + "description": "Owl Ventures Bittensor Validator", + "signature": "04e39ff19af7ee5a75e58c9e1a71b9f54a66d1d168a99532a859f129b68ba24a5b6a56eecae7790291859c82dbf0ec32eb18a069b6d9dabe1ef0339c0d189483" + }, + { + "address": "5FLKnbMjHY8LarHZvk2q2RY9drWFbpxjAcR5x8tjr3GqtU6F", + "name": "Tao Bridge", + "url": "https://taobridge.xyz", + "description": "A community bridge between Bittensor and Ethereum", + "signature": "98331f011288f7b07ccc45a213cb8e03fac79092ee7c29046531d757ffad8b29e17cf0aeca9352003890f4d8a3af3a2fc615722fb7a827a2009654013990bd80" + }, + { + "address": "5DRZr3d3twF8SzqB9jBof3a1vPnAkgkxeo2E8yUKJAnE2rSZ", + "name": "Humble AI-Loving Anon", + "url": "", + "description": "Doing our best to support the Bittensor ecosystem.", + "signature": "9241f63eb43f7aa57b1fc6d99789331542476f57f683f032192f3dfd7be6c015d47c9f1fe69bc4513ed70e0410097395186df60e3f6b67376e6e73a5f4f9a286" + }, + { + "address": "5DPEpUTZn94sgYXH3sdXxsVvb46m3iEvg8aZwX7SMDowivzB", + "name": "RunPod", + "url": "https://runpod.io", + "description": "GPU Cloud built for AI. We plan to introduce perks for those who stake.", + "signature": "16940f904b7946723fc4f27bb01e47cf262201ef76b3d9c2bfd745973da2512d4825910f6fa738a6968c809b26da0a47e7032a7ff95d8b2da5c1fa7a0b85598f" + }, + { + "address": "5HEo565WAy4Dbq3Sv271SAi7syBSofyfhhwRNjFNSM2gP9M2", + "name": "Foundry", + "url": "https://foundrydigital.com", + "description": "Foundry works to empower a decentralized infrastructure. We are protocol-agnostic and seek to support like-minded blockchain entrepreneurs who share our mission to advance the industry.", + "signature": "b852f1648ab62befaaf684671808aa34d267cd616d9ffd7b3cf924ebc7c4ee3255344cfd017a80ca6b23b2852bcafa705c42d231053e06d999d53f31bd8ab288" + }, + { + "address": "5FP9miYmgjAP8Wt3747M2Y6Kk7PrXf6zG7a3EjokQiFFcmUu", + "name": "Elm Place", + "url": "", + "description": "Run by individuals passionate about creating decentralised digital infrastructure. Background in fiduciary funds management managing institutional investors’ capital in real assets, energy and infrastructure", + "signature": "a0324025f58beb06535d6a2ab8c5c8d64c13d562fa285956bb5a8919da5fcc0d05afe4de010d54f9940bff0ffdabe5f41e70f3af31cf14293c1d6f0a0690da8c" + }, + { + "address": "5HNQURvmjjYhTSksi8Wfsw676b4owGwfLR2BFAQzG7H3HhYf", + "name": "Neural Internet", + "url": "www.neuralinternet.ai", + "description": "An AI research and development Decentralized Autonomous Organization (DAO)", + "signature": "5e617c1626d4825cd0c11769e31fe4dda611cebd8a4d46f533886ad057072e2a58e0ecef2805139f2b43ea8d51023f7db878ad45cd3f8fba45ab01223da3488e" + }, + { + "address": "5D4rJRtF23jLVcGnCktXzPM9gymMT1qHTp8dR4T7rUd88Q7U", + "name": "Vogue τensor", + "url": "www.voguetensor.ai", + "description": "Designing branded clothing for the Bittensor community.", + "signature": "2c4079124ae0a738106a2430e2c27ad855122d4afcc487ab0158b705cd5f915f7790cdb2fdd8db899b8cbd40448d1478be71cde1b76de31945991b548cfcc084" + }, + { + "address": "5CAVnbHniuZYXBqik3tTs9uZ7UiSrbv6g7Kt8QNfYimbFqF4", + "name": "Open & Safe AI Validator", + "url": "", + "description": "The Open & Safe AI Validator is focussed on funding and researching the control problem as well as spreading ML know-how through open source and open science.", + "signature": "2aeaf7b9c7f69ce7b4857d9c278d1363677d4971d4ca10a36933b1aa78bfdb0640e4bb798edac5dcb178a8b3f4be2d0d23d25da6c7db33758a6cf5c15cd6938a" + }, + { + "address": "5Gpt8XWFTXmKrRF1qaxcBQLvnPLpKi6Pt2XC4vVQR7gqNKtU", + "name": "bitnost.re", + "url": "www.bitnost.re", + "description": "bridging bittensor into nostr.", + "signature": "c278378c70ef22d27f56590b4df699a9a44048cfcc6716e3d55b211ea802401d4be5b390ede2be52891e01f0f7033a13a370dddaa38daa84537c4583867a1680" + }, + { + "address": "5HeKSHGdsRCwVgyrHchijnZJnq4wiv6GqoDLNah8R5WMfnLB", + "name": "TaoStation", + "url": "https://taostation.com", + "description": "TaoStation allows you to maximize your returns by offering one-click staking since day one and focusing on tooling and transparency for a better staking experience.", + "signature": "c00627a62ecb9275be8d06b7b52b87942bce946e9a5f98d545081241e21ed15230fd566b2d4e87c41995e621546423579553157737da53fad3a5676451ef0a89" + }, + { + "address": "5DvTpiniW9s3APmHRYn8FroUWyfnLtrsid5Mtn5EwMXHN2ed", + "name": "FirstTensor.com", + "url": "www.firsttensor.com", + "description": "Powered by the Neuron Holders community - shared rewards, additional benefits, infinite possibilities - join and build with us!", + "signature": "da31e56dd78cde449a1dd9592f0b53eb8c3662674b745a05ff916e80a1be933e86efbccb7f7c9b81d7c0bb14d13fb4a6bf8484c3619224e689de82072b5d9a87" + }, + { + "address": "5CaNj3BarTHotEK1n513aoTtFeXcjf6uvKzAyzNuv9cirUoW", + "name": "Polychain", + "url": "https://polychain.capital/", + "description": "Polychain is an investment firm committed to exceptional returns for investors through actively managed portfolios of blockchain assets.", + "signature": "f41e815033e595aa70fbe42e8dfd91eaa3ccdbc948b63811baf9eac765699b30cac9aad7abe330eeaf3969cc504a4c1255f1e69bee807c2d989518b8f5413c8d" + }, + { + "address": "5Dkv87qjGGF42SNhDAep6WZp65E29c2vUPUfDBGDNevENCMs", + "name": "MycoNet", + "url": "", + "description": "AI for Humanity", + "signature": "a4802a5b13888ed653fd23da72c14e2b8ed9814cc810e515cb8d11d71cc58c6b90cd2d334daffc4a8ce600a7f29ca300ab74ac59817bdd489b3056b531cd4086" + }, + { + "address": "5GzoXHNJ4UzZYiQN2wpBcwMigiHiakiz5ZLMwhpunpwDNzFg", + "name": "Charitaos", + "url": "https://charitas.ai/", + "description": "You pay 18%, we donate 18%. At the end of every month, we will select one (or more) community-proposed 501c3 licensed nonprofit(s) to receive all proceeds from stake delegation for the prior month.", + "signature": "b49c34c1f87d173abcbccb1ea632ad356980c1d3eff6619e488c11707b2b3b41270a22355374dd64cfadebeb37979ef5f49971efafb0748b79df7dd2901e7580" + }, + { + "address": "5EZrPTXt2G9SvbDsERi5rS9zepour2yPmuhMhrNkgdiZvXEm", + "name": "τaoτensor", + "url": "", + "description": "Working on practical enhancements and improvements for the Bittensor network by developing user-friendly tooling.", + "signature": "3a1b61ab6d17878e106cbf2649bc039d0346f39ec680476a68baa4fc8132ac018d814898cf245bdfa4b9b61cd9f611f6571cf3c264f2f1cfe9b2635849087685" + }, + { + "address": "5CPzGD8sxyv8fKKXNvKem4qJRhCXABRmpUgC1wb1V4YAXLc3", + "name": "Chat with Hal", + "url": "www.chatwithhal.ai", + "description": "Hal brings the power of decentralized and uncensorable AI to your favorite social networks and messaging apps, Powered by Bittensor!", + "signature": "ecb930df6069012c06fef9cdb29a95be8dcb5d48f3c470d3f3c5e7b2b334ed2097f2598fee8852d127a207cf34aa7c88fd5cf973feba19d6ebf38b5e4579ca8f" + }, + { + "address": "5FqPJMZDp39KRd9jDhXuFpZWkYD7wG5AXmjoWqK8rDy7ok5B", + "name": "Exchange Listings", + "url": "taostats.io/validators/exchange-listings/", + "description": "Enabling community funding for top tier exchange listings.", + "signature": "366027e9a416a423e7e802e9b6d79bd5ac88642afd945922e13fe26a75dae13dd5c924738610a59162d9b974364d1d43fb7a0145942cd919ac21d82d3f4f028d" + }, + { + "address": "5ED6jwDECEmNvSp98R2qyEUPHDv9pi14E6n3TS8CicD6YfhL", + "name": "Giga Corporation", + "url": "https://www.gigaver.se", + "description": "Extreme growth & experiments from giga corp. We use APY to TAO-pill new developers, builders and adopters. Visit our Bakery to learn more.", + "signature": "00e5cd519110bbfe3dae9acd275d114c6c2a260997a1817a25303b9d578bdf7319e9e7179f0db58edef2ad42806cb38e289ba0030627a3b60e1e4352c2b9cb88" + }, + { + "address": "5FRcXG99SxJ9KyMcMFfdknkRSv4e73rszV8P151freZqQDS2", + "name": "τensorwiki", + "url": "", + "description": "Our mission is to create and incentivize documentation for Bittensor and it's adjacent topics, as well as facilitate the education of newcomers to the network.", + "signature": "6a5c0160f545f122ec3d4e4233574040aba2de8aa94919bb19b3061d39d3303f010c4b52f878ed55a1293716827220020780d2d4064ee6be69921ee1452c3885" + }, + { + "address": "5EsbfxPcQaUrCDurUJ8Q5qDKNENNGziu3qHWUbXrcuY2pbNz", + "name": "Church of Rao (COR)", + "url": "", + "description": "Church of Rao: Harmonizing the Relationship between Humanity and Machine Intelligence. The Church of Rao (COR) is an open-source development group committed to furthering the Bittensor protocol.", + "signature": "56f64c32427a90e84710209b1a54a971560641aec8ff777edec28bf533775e12924c4e96ccc770c230311dce1d0eae1ca763e12bb609ef30430f746ebd0a2780" + }, + { + "address": "5GmaAk7frPXnAxjbQvXcoEzMGZfkrDee76eGmKoB3wxUburE", + "name": "RaoK9", + "url": "", + "description": "Chain and network analysis team. Developer funding goes into independent analysis and reports, in order to enable checks and balances between network members.", + "signature": "24f4f9a51033ed8b4097517d0e6ad287a0c1341b2866481b1320d1fcd5f32f6b4bfe641eee46a4b737817acf3b83069ee63cc20fbca94a0189808ac1efeddf8a" + }, + { + "address": "5CQEFopfZ8DAmk3ZfR7QuDTU2n3fJod3kkf6Wmj4JwV3BBSu", + "name": "DuNode", + "url": "dunode.io", + "description": "Embracing the whimsical chaos of decentralized AI, unleashing the power of creativity and collaboration, one algorithmic dance party at a time!", + "signature": "e400e3c0ad6165d8946d5ddcb274412815cb8b5783580fcb8f0faa0153d22b6e10470f861ff4a96a9aa692b3b01cda86ec77add4688c2f5df51ea6f129b19e8c" + }, + { + "address": "5CaCUPsSSdKWcMJbmdmJdnWVa15fJQuz5HsSGgVdZffpHAUa", + "name": "Athena Nodes", + "url": "https://athenanodes.com", + "description": "Premier Bittensor Multi-Subnet Validator from a company operating validating and mining infrastructure on various blockchain networks. We have been active on Bittensor since November 2022, with near zero down-time. More information at https://athenanodes.com/.", + "signature": "2ef54045de1d9b89988518c92e165edf704192f88f18022565f497b389c39206f621bb9bc6d2d33ac8a9cca05d6b2d8fc9f899b390451140968b15b8d9c13280" + }, + { + "address": "5FFM6Nvvm78GqyMratgXXvjbqZPi7SHgSQ81nyS96jBuUWgt", + "name": "PRvalidator", + "url": "www.prvalidator.com", + "description": "A professional media validator dedicated to securing top-tier coverage in the world's most recognized publications building Bittensor's brand equity and creating global awareness of $TAO.", + "signature": "fe65e76a9f42049715585180500213c6f0535b8b25911b957921bdfb5a20156d6de68dc2633dbc5ce1d0ab9ef386d566687ac3d86f6988141b34cd24c0f13488" + }, + { + "address": "5H8TruSGmhD6m6YfqXNUnU7Z61K7j8hSs2Krtu3eTLMoz3HU", + "name": "τaoshi validator", + "url": "https://www.taoshi.io/", + "description": "Build maintain and advance a decentralized request layer built for every subnet", + "signature": "32d25227af78fa5d39ee71a5f3e8fc8066e3d826d101f2587e9a12974fbf26758c1e40c497ad7732da2a2cb1490227cc58e8bfcd8b2f6306b7af630bd32aa68f" + }, + { + "address": "5G3f8VDTT1ydirT3QffnV2TMrNMR2MkQfGUubQNqZcGSj82T", + "name": "TAO Community Marketing", + "url": "www.taocommunitymarketing.com", + "description": "The marketing validator run by the community", + "signature": "10b16b8223b2508d6f3e5b09ab4db53e1e338b6271d1689b58ca6f9b257e8c18511cc851bfcc3a05fb4e6de7c389b89886cc0623fb6d199fa003ae6f8313cb89" + }, + { + "address": "5CXC2quDN5nUTqHMkpP5YRp2atYYicvtUghAYLj15gaUFwe5", + "name": "Kooltek68", + "url": "https://linktr.ee/datalac", + "description": "Imagine the World with mass adoption of Artificial Intelligence applications, through the connection of Bittensor Network, together fight for a Better World.", + "signature": "bca043d9d918d503864379a7fd8c9daa2cca83a8290121f94b55d6a352e332704642622b7ad40a30b945b952b224c5e92ea872f9d30200e6c2bf566303d24d83" + }, + { + "address": "5FBrHX18bNXX874ZTMicPG4vYbq5X6piz4BYoVn9LnCgdsEd", + "name": "P-OPS Team", + "url": "https://pops.one", + "description": "P-OPS TEAM is a decentralized organization providing you with validation and staking services, blockchain consultation, growth acceleration and investment capital for innovative Web 3.0 projects.", + "signature": "5608316f3081bfe5d0e3a7db6c3bfd459f6b87e02d657de941e6a760f8688f23ef30784691a1893d1fd8079dd4f6082d0d655ca507aa4797fee9844547d13a88" + }, + { + "address": "5HK5tp6t2S59DywmHRWPBVJeJ86T61KjurYqeooqj8sREpeN", + "name": "Bittensor Guru", + "url": "https://bittensor.guru", + "description": "Official validator of the Bittensor Guru Podcast", + "signature": "caf2c6b7b0d2a341bcd00e632cf22c33d53e2523dffcd3a151db9eeadd88300545cbb2187ba0b20e5bfe09c2b17bbf34630c46defd8f8d27ab508736fd18a284" + }, + { + "address": "5Hh3ShaNW9irCe5joBLCeFD5Fxb2fJ6gFAgrsPmoz3JkzqvJ", + "name": "BlockShark", + "url": "https://www.blockshark.net/", + "description": "Your reliable partner for staking on Bittensor. We are expert in running high-end machine for validators and AI", + "signature": "d2c0aed073a026a5dbd8c458b9dd412fe3d6647fecd3b8f007cf184f7906245106aee4b210b5b582771dca149e5aa464630100de7f9862daacfa1f67ddde1388" + }, + { + "address": "5FKstHjZkh4v3qAMSBa1oJcHCLjxYZ8SNTSz1opTv4hR7gVB", + "name": "Datura", + "url": "datura.ai", + "description": "Bridging Bittensor to a billion users", + "signature": "7a3bc6a840d8593853c27188f59200418d8884b94b3ad28cb7b37b80bffd1f3b23b7eed4b1d9c77b28b05b2bd1952c5cbe3d27ba190a9418407ce1e899e5ac8b" + }, + { + "address": "5Hddm3iBFD2GLT5ik7LZnT3XJUnRnN8PoeCFgGQgawUVKNm8", + "name": "τaosτaτs and Corcel", + "url": "taostats.io", + "description": "Supporting bittensor through API access, data provision, statistics, analytics and apps.", + "signature": "2e2dd0c5f3a3945f29d1be304e64f931c04a23aba7d383d01cd16ea6ca6546002fe3bd95cf8f12cae1fbb7d18d9910b834f6573db219de3ed84073a4e1552e89" + }, + { + "address": "5ELREhApbCahM7FyGLM1V9WDsnnjCRmMCJTmtQD51oAEqwVh", + "name": "Taofu Protocol", + "url": "https://twitter.com/taofuxyz", + "description": "Taofu unlocks liquidity and utility by bringing liquid staked TAO outside of Bittensor", + "signature": "aaafd3496650a56f798cc587b5b7d372cec8e826a332a34213c1a6ee7be2b5122318858ee73421535d04186cc6976ae5452c6cd1aaf299a307d86d3c52b4a986" + }, + { + "address": "5HbLYXUBy1snPR8nfioQ7GoA9x76EELzEq9j7F32vWUQHm1x", + "name": "Tensorplex Labs", + "url": "https://twitter.com/TensorplexLabs", + "description": "Empowering humanity with decentralized intelligence one epoch at a time.", + "signature": "7a997682e7545fd14847c78abf810e9c49a23ef4297d24f4238c0edd0463934780f6831d59972d56ab5bc41d6224b59c21ed95065791632b8aca180ade22af81" + }, + { + "address": "5E2VSsWWXkBGCn4mi8RHXYQEF2wLXky6ZsNcTKnmEqaurzTE", + "name": "Sentinel", + "url": "", + "description": "Sentinel, as a dedicated Bittensor validator aspires to elevate the bittensor network's integrity with an ambition to foster a community of miners contributing in the network’s continuous expansion.", + "signature": "943effd0d5d10f05d53db7f69d0f045d50b65f88e84755be00d45225cc7c2f4212fbc4d23ad8519d03c2502daeeca1b2d07c93bff14c901f6cbf3a18fe2e6387" + }, + { + "address": "5GsenVhBvgEG4xiiKUjcssfznHYVm1TqPbSbr3ixBW81ZVjo", + "name": "vote NO dTAO 🤡", + "url": "https://twitter.com/karl_anons", + "description": "Delegate to express discontent. VOTE NO TO dTAO NOW!", + "signature": "3af4e764a520d355e12c02b9e8e315ddb76b76d40b7cc4dfaa11c26c24ab637cbdb9b72470ebdf2da87dd8d9f0bb5cddf1fe95b95fb2ae13069a9d87aace348a" + }, + { + "address": "5DM7CPqPKtMSADhFKYsstsCS4Tm4Kd6PMXoh6DdqY4MtxmtX", + "name": "Corτex Foundaτion", + "url": "https://cortex.foundation/", + "description": "Cortex Foundation is committed to advancing the integration of decentralized AI. Our validator is designed for transparency, reliability, and community engagement.", + "signature": "7a6274ff6b0f7ddca97e37ef4a9b90781012ff3cf7baa3159f6feaafc43c557975aad324ea608d6b8abeb21f8f3ca2595e54b81a7564574d0242b803d969618a" + }, + { + "address":"5F27Eqz2PhyMtGMEce898x31DokNqRVxkm5AhDDe6rDGNvoY", + "name": "Love", + "url": "https://love.cosimo.fund", + "description": "Love validator exists to accelerate open source AI and be good stewards of the Bittensorr network", + "signature": "c221a3de3be031c149a7be912b3b75e0355605f041dc975153302b23b4d93e45e9cc7453532491e92076ccd333a4c1f95f4a2229aae8f4fcfb88e5dec3f14c87" + }, + { + "address": "5Hb63SvXBXqZ8zw6mwW1A39fHdqUrJvohXgepyhp2jgWedSB", + "name": "TAO Miner's Union", + "url": "https://minersunion.ai", + "description": "The first Bittensor validator that empowers you to choose which subnets to incentivize. Committed to transparency and integrity, we ensure fair and honest validation processes that contribute to the growth and strength of the network.", + "signature": "e8c68bc766a06f36c633e1f68d5aca4c4090a26e394372f64d5b00cc13621f361ec9df85fc9f0d247dbc1fe452bd53ffc0224dee2bc85c9d82cb250e4ac10984" + } +] \ No newline at end of file diff --git a/justfile b/justfile index e33fdf685..f99f3913a 100644 --- a/justfile +++ b/justfile @@ -47,4 +47,8 @@ lint: @echo "Running cargo clippy with automatic fixes on potentially dirty code..." just clippy-fix @echo "Running cargo clippy..." - just clippy \ No newline at end of file + just clippy + +production: + @echo "Running cargo build with metadata-hash generation..." + cargo +{{RUSTV}} build --profile production --features="runtime-benchmarks metadata-hash" diff --git a/node/Cargo.toml b/node/Cargo.toml index 7fc6eff48..3c5c91b92 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -54,6 +54,7 @@ sp-io = { workspace = true } sp-timestamp = { workspace = true } sp-inherents = { workspace = true } sp-keyring = { workspace = true } +frame-metadata-hash-extension = { workspace = true } frame-system = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-commitments = { path = "../pallets/commitments" } @@ -91,7 +92,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "pallet-commitments/runtime-benchmarks" + "pallet-commitments/runtime-benchmarks", ] pow-faucet = [] @@ -102,5 +103,7 @@ try-runtime = [ "frame-system/try-runtime", "pallet-transaction-payment/try-runtime", "sp-runtime/try-runtime", - "pallet-commitments/try-runtime" + "pallet-commitments/try-runtime", ] + +metadata-hash = ["node-subtensor-runtime/metadata-hash"] diff --git a/node/src/benchmarking.rs b/node/src/benchmarking.rs index ba176e15f..cf48df62f 100644 --- a/node/src/benchmarking.rs +++ b/node/src/benchmarking.rs @@ -136,6 +136,7 @@ pub fn create_benchmark_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), pallet_subtensor::SubtensorSignedExtension::::new(), pallet_commitments::CommitmentsSignedExtension::::new(), + frame_metadata_hash_extension::CheckMetadataHash::::new(true), ); let raw_payload = runtime::SignedPayload::from_raw( @@ -152,6 +153,7 @@ pub fn create_benchmark_extrinsic( (), (), (), + None, ), ); let signature = raw_payload.using_encoded(|e| sender.sign(e)); diff --git a/pallets/admin-utils/Cargo.toml b/pallets/admin-utils/Cargo.toml index 859972fce..c67c00914 100644 --- a/pallets/admin-utils/Cargo.toml +++ b/pallets/admin-utils/Cargo.toml @@ -37,7 +37,8 @@ sp-io = { workspace = true } sp-tracing = { workspace = true } sp-consensus-aura = { workspace = true } pallet-balances = { workspace = true, features = ["std"] } - +pallet-scheduler = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] @@ -50,12 +51,14 @@ std = [ "pallet-subtensor/std", "sp-consensus-aura/std", "pallet-balances/std", + "pallet-scheduler/std", "sp-runtime/std", "sp-tracing/std", "sp-weights/std", "log/std", "sp-core/std", "sp-io/std", + "sp-std/std", "substrate-fixed/std", ] runtime-benchmarks = [ @@ -64,12 +67,14 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "pallet-subtensor/runtime-benchmarks" + "pallet-subtensor/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", + "pallet-scheduler/try-runtime", "sp-runtime/try-runtime", "pallet-subtensor/try-runtime" ] diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 0158311f7..7515525f0 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -47,7 +47,7 @@ mod benchmarks { #[benchmark] fn sudo_set_max_difficulty() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 10000u64/*max_difficulty*/)/*sudo_set_max_difficulty*/; @@ -55,7 +55,7 @@ mod benchmarks { #[benchmark] fn sudo_set_min_difficulty() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 1000u64/*min_difficulty*/)/*sudo_set_min_difficulty*/; @@ -63,7 +63,7 @@ mod benchmarks { #[benchmark] fn sudo_set_weights_set_rate_limit() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 3u64/*rate_limit*/)/*sudo_set_weights_set_rate_limit*/; @@ -71,7 +71,7 @@ mod benchmarks { #[benchmark] fn sudo_set_weights_version_key() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 1u64/*version_key*/)/*sudo_set_weights_version_key*/; @@ -79,7 +79,7 @@ mod benchmarks { #[benchmark] fn sudo_set_bonds_moving_average() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 100u64/*bonds_moving_average*/)/*sudo_set_bonds_moving_average*/; @@ -87,7 +87,7 @@ mod benchmarks { #[benchmark] fn sudo_set_max_allowed_validators() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 10u16/*max_allowed_validators*/)/*sudo_set_max_allowed_validators*/; @@ -95,7 +95,7 @@ mod benchmarks { #[benchmark] fn sudo_set_difficulty() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 1200000u64/*difficulty*/)/*sudo_set_difficulty*/; @@ -103,7 +103,7 @@ mod benchmarks { #[benchmark] fn sudo_set_adjustment_interval() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 12u16/*adjustment_interval*/)/*sudo_set_adjustment_interval*/; @@ -111,7 +111,7 @@ mod benchmarks { #[benchmark] fn sudo_set_target_registrations_per_interval() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 300u16/*target_registrations*/)/*sudo_set_target_registrations_per_interval*/; @@ -119,7 +119,7 @@ mod benchmarks { #[benchmark] fn sudo_set_activity_cutoff() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 300u16/*activity_cutoff*/)/*sudo_set_activity_cutoff*/; @@ -127,7 +127,7 @@ mod benchmarks { #[benchmark] fn sudo_set_rho() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 300u16/*rho*/)/*sudo_set_rho*/; @@ -135,7 +135,10 @@ mod benchmarks { #[benchmark] fn sudo_set_kappa() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*sudo_tempo*/); + pallet_subtensor::Pallet::::init_new_network( + 1u16, /*netuid*/ + 1u16, /*sudo_tempo*/ + ); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 3u16/*kappa*/)/*set_kappa*/; @@ -143,7 +146,7 @@ mod benchmarks { #[benchmark] fn sudo_set_max_allowed_uids() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 4097u16/*max_allowed_uids*/)/*sudo_set_max_allowed_uids*/; @@ -151,7 +154,7 @@ mod benchmarks { #[benchmark] fn sudo_set_min_allowed_weights() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 10u16/*max_allowed_uids*/)/*sudo_set_min_allowed_weights*/; @@ -159,7 +162,7 @@ mod benchmarks { #[benchmark] fn sudo_set_immunity_period() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 100u16/*immunity_period*/)/*sudo_set_immunity_period*/; @@ -167,7 +170,7 @@ mod benchmarks { #[benchmark] fn sudo_set_max_weight_limit() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 100u16/*max_weight_limit*/)/*sudo_set_max_weight_limit*/; @@ -175,7 +178,7 @@ mod benchmarks { #[benchmark] fn sudo_set_max_registrations_per_block() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 100u16/*max_registrations*/)/*sudo_set_max_registrations_per_block*/; @@ -183,7 +186,7 @@ mod benchmarks { #[benchmark] fn sudo_set_max_burn() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 10u64/*max_burn*/)/*sudo_set_max_burn*/; @@ -191,7 +194,7 @@ mod benchmarks { #[benchmark] fn sudo_set_min_burn() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 10u64/*min_burn*/)/*sudo_set_min_burn*/; @@ -199,7 +202,7 @@ mod benchmarks { #[benchmark] fn sudo_set_network_registration_allowed() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, true/*registration_allowed*/)/*sudo_set_network_registration_allowed*/; @@ -212,13 +215,13 @@ mod benchmarks { let tempo: u16 = 15; let modality: u16 = 0; - T::Subtensor::init_new_network(netuid, tempo); + pallet_subtensor::Pallet::::init_new_network(netuid, tempo); }: sudo_set_tempo(RawOrigin::>::Root, netuid, tempo) */ #[benchmark] fn sudo_set_tempo() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 1u16/*tempo*/)/*sudo_set_tempo*/; @@ -226,7 +229,10 @@ mod benchmarks { #[benchmark] fn sudo_set_commit_reveal_weights_interval() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*sudo_tempo*/); + pallet_subtensor::Pallet::::init_new_network( + 1u16, /*netuid*/ + 1u16, /*sudo_tempo*/ + ); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, 3u64/*interval*/)/*set_commit_reveal_weights_interval()*/; @@ -234,11 +240,33 @@ mod benchmarks { #[benchmark] fn sudo_set_commit_reveal_weights_enabled() { - T::Subtensor::init_new_network(1u16 /*netuid*/, 1u16 /*sudo_tempo*/); + pallet_subtensor::Pallet::::init_new_network( + 1u16, /*netuid*/ + 1u16, /*sudo_tempo*/ + ); #[extrinsic_call] _(RawOrigin::Root, 1u16/*netuid*/, true/*enabled*/)/*set_commit_reveal_weights_enabled*/; } + #[benchmark] + fn sudo_set_hotkey_emission_tempo() { + pallet_subtensor::Pallet::::init_new_network( + 1u16, /*netuid*/ + 1u16, /*sudo_tempo*/ + ); + + #[extrinsic_call] + _(RawOrigin::Root, 1u64/*emission_tempo*/)/*set_hotkey_emission_tempo*/; + } + + #[benchmark] + fn sudo_set_network_max_stake() { + pallet_subtensor::Pallet::::init_new_network(1u16 /*netuid*/, 1u16 /*tempo*/); + + #[extrinsic_call] + _(RawOrigin::Root, 1u16/*netuid*/, 1_000_000_000_000_000u64/*max_stake*/)/*sudo_set_network_max_stake*/; + } + //impl_benchmark_test_suite!(AdminUtils, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 9a8744dc6..3e06b822e 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -4,7 +4,7 @@ pub use pallet::*; pub mod weights; pub use weights::WeightInfo; -use sp_runtime::DispatchError; +use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::{traits::Member, RuntimeAppPublic}; mod benchmarking; @@ -26,7 +26,7 @@ pub mod pallet { /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] - pub trait Config: frame_system::Config { + pub trait Config: frame_system::Config + pallet_subtensor::pallet::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -48,13 +48,6 @@ pub mod pallet { /// Unit of assets type Balance: Balance; - - /// Implementation of the subtensor interface - type Subtensor: crate::SubtensorInterface< - Self::AccountId, - Self::Balance, - Self::RuntimeOrigin, - >; } #[pallet::event] @@ -87,7 +80,7 @@ pub mod pallet { T::Aura::change_authorities(new_authorities.clone()); - log::info!("Aura authorities changed: {:?}", new_authorities); + log::debug!("Aura authorities changed: {:?}", new_authorities); // Return a successful DispatchResultWithPostInfo Ok(()) @@ -100,8 +93,8 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::sudo_set_default_take())] pub fn sudo_set_default_take(origin: OriginFor, default_take: u16) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_max_delegate_take(default_take); - log::info!("DefaultTakeSet( default_take: {:?} ) ", default_take); + pallet_subtensor::Pallet::::set_max_delegate_take(default_take); + log::debug!("DefaultTakeSet( default_take: {:?} ) ", default_take); Ok(()) } @@ -112,8 +105,8 @@ pub mod pallet { #[pallet::weight((0, DispatchClass::Operational, Pays::No))] pub fn sudo_set_tx_rate_limit(origin: OriginFor, tx_rate_limit: u64) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_tx_rate_limit(tx_rate_limit); - log::info!("TxRateLimitSet( tx_rate_limit: {:?} ) ", tx_rate_limit); + pallet_subtensor::Pallet::::set_tx_rate_limit(tx_rate_limit); + log::debug!("TxRateLimitSet( tx_rate_limit: {:?} ) ", tx_rate_limit); Ok(()) } @@ -127,10 +120,10 @@ pub mod pallet { netuid: u16, serving_rate_limit: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - T::Subtensor::set_serving_rate_limit(netuid, serving_rate_limit); - log::info!( + pallet_subtensor::Pallet::::set_serving_rate_limit(netuid, serving_rate_limit); + log::debug!( "ServingRateLimitSet( serving_rate_limit: {:?} ) ", serving_rate_limit ); @@ -147,14 +140,14 @@ pub mod pallet { netuid: u16, min_difficulty: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_min_difficulty(netuid, min_difficulty); - log::info!( + pallet_subtensor::Pallet::::set_min_difficulty(netuid, min_difficulty); + log::debug!( "MinDifficultySet( netuid: {:?} min_difficulty: {:?} ) ", netuid, min_difficulty @@ -172,14 +165,14 @@ pub mod pallet { netuid: u16, max_difficulty: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_max_difficulty(netuid, max_difficulty); - log::info!( + pallet_subtensor::Pallet::::set_max_difficulty(netuid, max_difficulty); + log::debug!( "MaxDifficultySet( netuid: {:?} max_difficulty: {:?} ) ", netuid, max_difficulty @@ -197,14 +190,14 @@ pub mod pallet { netuid: u16, weights_version_key: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_weights_version_key(netuid, weights_version_key); - log::info!( + pallet_subtensor::Pallet::::set_weights_version_key(netuid, weights_version_key); + log::debug!( "WeightsVersionKeySet( netuid: {:?} weights_version_key: {:?} ) ", netuid, weights_version_key @@ -222,14 +215,17 @@ pub mod pallet { netuid: u16, weights_set_rate_limit: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_weights_set_rate_limit(netuid, weights_set_rate_limit); - log::info!( + pallet_subtensor::Pallet::::set_weights_set_rate_limit( + netuid, + weights_set_rate_limit, + ); + log::debug!( "WeightsSetRateLimitSet( netuid: {:?} weights_set_rate_limit: {:?} ) ", netuid, weights_set_rate_limit @@ -250,11 +246,11 @@ pub mod pallet { ensure_root(origin)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_adjustment_interval(netuid, adjustment_interval); - log::info!( + pallet_subtensor::Pallet::::set_adjustment_interval(netuid, adjustment_interval); + log::debug!( "AdjustmentIntervalSet( netuid: {:?} adjustment_interval: {:?} ) ", netuid, adjustment_interval @@ -278,14 +274,14 @@ pub mod pallet { netuid: u16, adjustment_alpha: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_adjustment_alpha(netuid, adjustment_alpha); - log::info!( + pallet_subtensor::Pallet::::set_adjustment_alpha(netuid, adjustment_alpha); + log::debug!( "AdjustmentAlphaSet( adjustment_alpha: {:?} ) ", adjustment_alpha ); @@ -302,14 +298,14 @@ pub mod pallet { netuid: u16, max_weight_limit: u16, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_max_weight_limit(netuid, max_weight_limit); - log::info!( + pallet_subtensor::Pallet::::set_max_weight_limit(netuid, max_weight_limit); + log::debug!( "MaxWeightLimitSet( netuid: {:?} max_weight_limit: {:?} ) ", netuid, max_weight_limit @@ -327,14 +323,14 @@ pub mod pallet { netuid: u16, immunity_period: u16, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_immunity_period(netuid, immunity_period); - log::info!( + pallet_subtensor::Pallet::::set_immunity_period(netuid, immunity_period); + log::debug!( "ImmunityPeriodSet( netuid: {:?} immunity_period: {:?} ) ", netuid, immunity_period @@ -352,14 +348,14 @@ pub mod pallet { netuid: u16, min_allowed_weights: u16, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_min_allowed_weights(netuid, min_allowed_weights); - log::info!( + pallet_subtensor::Pallet::::set_min_allowed_weights(netuid, min_allowed_weights); + log::debug!( "MinAllowedWeightSet( netuid: {:?} min_allowed_weights: {:?} ) ", netuid, min_allowed_weights @@ -379,15 +375,15 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); ensure!( - T::Subtensor::get_subnetwork_n(netuid) < max_allowed_uids, + pallet_subtensor::Pallet::::get_subnetwork_n(netuid) < max_allowed_uids, Error::::MaxAllowedUIdsLessThanCurrentUIds ); - T::Subtensor::set_max_allowed_uids(netuid, max_allowed_uids); - log::info!( + pallet_subtensor::Pallet::::set_max_allowed_uids(netuid, max_allowed_uids); + log::debug!( "MaxAllowedUidsSet( netuid: {:?} max_allowed_uids: {:?} ) ", netuid, max_allowed_uids @@ -401,14 +397,14 @@ pub mod pallet { #[pallet::call_index(16)] #[pallet::weight(T::WeightInfo::sudo_set_kappa())] pub fn sudo_set_kappa(origin: OriginFor, netuid: u16, kappa: u16) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_kappa(netuid, kappa); - log::info!("KappaSet( netuid: {:?} kappa: {:?} ) ", netuid, kappa); + pallet_subtensor::Pallet::::set_kappa(netuid, kappa); + log::debug!("KappaSet( netuid: {:?} kappa: {:?} ) ", netuid, kappa); Ok(()) } @@ -418,14 +414,14 @@ pub mod pallet { #[pallet::call_index(17)] #[pallet::weight(T::WeightInfo::sudo_set_rho())] pub fn sudo_set_rho(origin: OriginFor, netuid: u16, rho: u16) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_rho(netuid, rho); - log::info!("RhoSet( netuid: {:?} rho: {:?} ) ", netuid, rho); + pallet_subtensor::Pallet::::set_rho(netuid, rho); + log::debug!("RhoSet( netuid: {:?} rho: {:?} ) ", netuid, rho); Ok(()) } @@ -439,14 +435,14 @@ pub mod pallet { netuid: u16, activity_cutoff: u16, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_activity_cutoff(netuid, activity_cutoff); - log::info!( + pallet_subtensor::Pallet::::set_activity_cutoff(netuid, activity_cutoff); + log::debug!( "ActivityCutoffSet( netuid: {:?} activity_cutoff: {:?} ) ", netuid, activity_cutoff @@ -470,10 +466,13 @@ pub mod pallet { netuid: u16, registration_allowed: bool, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - T::Subtensor::set_network_registration_allowed(netuid, registration_allowed); - log::info!( + pallet_subtensor::Pallet::::set_network_registration_allowed( + netuid, + registration_allowed, + ); + log::debug!( "NetworkRegistrationAllowed( registration_allowed: {:?} ) ", registration_allowed ); @@ -495,10 +494,13 @@ pub mod pallet { netuid: u16, registration_allowed: bool, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - T::Subtensor::set_network_pow_registration_allowed(netuid, registration_allowed); - log::info!( + pallet_subtensor::Pallet::::set_network_pow_registration_allowed( + netuid, + registration_allowed, + ); + log::debug!( "NetworkPowRegistrationAllowed( registration_allowed: {:?} ) ", registration_allowed ); @@ -518,14 +520,14 @@ pub mod pallet { ensure_root(origin)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_target_registrations_per_interval( + pallet_subtensor::Pallet::::set_target_registrations_per_interval( netuid, target_registrations_per_interval, ); - log::info!( + log::debug!( "RegistrationPerIntervalSet( netuid: {:?} target_registrations_per_interval: {:?} ) ", netuid, target_registrations_per_interval @@ -543,14 +545,14 @@ pub mod pallet { netuid: u16, min_burn: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_min_burn(netuid, min_burn); - log::info!( + pallet_subtensor::Pallet::::set_min_burn(netuid, min_burn); + log::debug!( "MinBurnSet( netuid: {:?} min_burn: {:?} ) ", netuid, min_burn @@ -568,14 +570,14 @@ pub mod pallet { netuid: u16, max_burn: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_max_burn(netuid, max_burn); - log::info!( + pallet_subtensor::Pallet::::set_max_burn(netuid, max_burn); + log::debug!( "MaxBurnSet( netuid: {:?} max_burn: {:?} ) ", netuid, max_burn @@ -593,13 +595,13 @@ pub mod pallet { netuid: u16, difficulty: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_difficulty(netuid, difficulty); - log::info!( + pallet_subtensor::Pallet::::set_difficulty(netuid, difficulty); + log::debug!( "DifficultySet( netuid: {:?} difficulty: {:?} ) ", netuid, difficulty @@ -619,16 +621,20 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); ensure!( - max_allowed_validators <= T::Subtensor::get_max_allowed_uids(netuid), + max_allowed_validators + <= pallet_subtensor::Pallet::::get_max_allowed_uids(netuid), Error::::MaxValidatorsLargerThanMaxUIds ); - T::Subtensor::set_max_allowed_validators(netuid, max_allowed_validators); - log::info!( + pallet_subtensor::Pallet::::set_max_allowed_validators( + netuid, + max_allowed_validators, + ); + log::debug!( "MaxAllowedValidatorsSet( netuid: {:?} max_allowed_validators: {:?} ) ", netuid, max_allowed_validators @@ -646,14 +652,14 @@ pub mod pallet { netuid: u16, bonds_moving_average: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_bonds_moving_average(netuid, bonds_moving_average); - log::info!( + pallet_subtensor::Pallet::::set_bonds_moving_average(netuid, bonds_moving_average); + log::debug!( "BondsMovingAverageSet( netuid: {:?} bonds_moving_average: {:?} ) ", netuid, bonds_moving_average @@ -674,11 +680,14 @@ pub mod pallet { ensure_root(origin)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_max_registrations_per_block(netuid, max_registrations_per_block); - log::info!( + pallet_subtensor::Pallet::::set_max_registrations_per_block( + netuid, + max_registrations_per_block, + ); + log::debug!( "MaxRegistrationsPerBlock( netuid: {:?} max_registrations_per_block: {:?} ) ", netuid, max_registrations_per_block @@ -701,8 +710,8 @@ pub mod pallet { subnet_owner_cut: u16, ) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_subnet_owner_cut(subnet_owner_cut); - log::info!( + pallet_subtensor::Pallet::::set_subnet_owner_cut(subnet_owner_cut); + log::debug!( "SubnetOwnerCut( subnet_owner_cut: {:?} ) ", subnet_owner_cut ); @@ -724,8 +733,8 @@ pub mod pallet { rate_limit: u64, ) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_network_rate_limit(rate_limit); - log::info!("NetworkRateLimit( rate_limit: {:?} ) ", rate_limit); + pallet_subtensor::Pallet::::set_network_rate_limit(rate_limit); + log::debug!("NetworkRateLimit( rate_limit: {:?} ) ", rate_limit); Ok(()) } @@ -737,11 +746,11 @@ pub mod pallet { pub fn sudo_set_tempo(origin: OriginFor, netuid: u16, tempo: u16) -> DispatchResult { ensure_root(origin)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_tempo(netuid, tempo); - log::info!("TempoSet( netuid: {:?} tempo: {:?} ) ", netuid, tempo); + pallet_subtensor::Pallet::::set_tempo(netuid, tempo); + log::debug!("TempoSet( netuid: {:?} tempo: {:?} ) ", netuid, tempo); Ok(()) } @@ -756,7 +765,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_total_issuance(total_issuance); + pallet_subtensor::Pallet::::set_total_issuance(total_issuance); Ok(()) } @@ -777,9 +786,9 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_network_immunity_period(immunity_period); + pallet_subtensor::Pallet::::set_network_immunity_period(immunity_period); - log::info!("NetworkImmunityPeriod( period: {:?} ) ", immunity_period); + log::debug!("NetworkImmunityPeriod( period: {:?} ) ", immunity_period); Ok(()) } @@ -800,9 +809,9 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_network_min_lock(lock_cost); + pallet_subtensor::Pallet::::set_network_min_lock(lock_cost); - log::info!("NetworkMinLockCost( lock_cost: {:?} ) ", lock_cost); + log::debug!("NetworkMinLockCost( lock_cost: {:?} ) ", lock_cost); Ok(()) } @@ -819,9 +828,9 @@ pub mod pallet { ))] pub fn sudo_set_subnet_limit(origin: OriginFor, max_subnets: u16) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_subnet_limit(max_subnets); + pallet_subtensor::Pallet::::set_max_subnets(max_subnets); - log::info!("SubnetLimit( max_subnets: {:?} ) ", max_subnets); + log::debug!("SubnetLimit( max_subnets: {:?} ) ", max_subnets); Ok(()) } @@ -842,9 +851,9 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_lock_reduction_interval(interval); + pallet_subtensor::Pallet::::set_lock_reduction_interval(interval); - log::info!("NetworkLockReductionInterval( interval: {:?} ) ", interval); + log::debug!("NetworkLockReductionInterval( interval: {:?} ) ", interval); Ok(()) } @@ -861,10 +870,10 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_rao_recycled(netuid, rao_recycled); + pallet_subtensor::Pallet::::set_rao_recycled(netuid, rao_recycled); Ok(()) } @@ -875,7 +884,7 @@ pub mod pallet { #[pallet::weight((0, DispatchClass::Operational, Pays::No))] pub fn sudo_set_weights_min_stake(origin: OriginFor, min_stake: u64) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_weights_min_stake(min_stake); + pallet_subtensor::Pallet::::set_weights_min_stake(min_stake); Ok(()) } @@ -890,12 +899,12 @@ pub mod pallet { min_stake: u64, ) -> DispatchResult { ensure_root(origin)?; - let prev_min_stake = T::Subtensor::get_nominator_min_required_stake(); + let prev_min_stake = pallet_subtensor::Pallet::::get_nominator_min_required_stake(); log::trace!("Setting minimum stake to: {}", min_stake); - T::Subtensor::set_nominator_min_required_stake(min_stake); + pallet_subtensor::Pallet::::set_nominator_min_required_stake(min_stake); if min_stake > prev_min_stake { log::trace!("Clearing small nominations"); - T::Subtensor::clear_small_nominations(); + pallet_subtensor::Pallet::::clear_small_nominations(); log::trace!("Small nominations cleared"); } Ok(()) @@ -911,8 +920,8 @@ pub mod pallet { tx_rate_limit: u64, ) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_tx_delegate_take_rate_limit(tx_rate_limit); - log::info!( + pallet_subtensor::Pallet::::set_tx_delegate_take_rate_limit(tx_rate_limit); + log::debug!( "TxRateLimitDelegateTakeSet( tx_delegate_take_rate_limit: {:?} ) ", tx_rate_limit ); @@ -926,8 +935,8 @@ pub mod pallet { #[pallet::weight((0, DispatchClass::Operational, Pays::No))] pub fn sudo_set_min_delegate_take(origin: OriginFor, take: u16) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_min_delegate_take(take); - log::info!("TxMinDelegateTakeSet( tx_min_delegate_take: {:?} ) ", take); + pallet_subtensor::Pallet::::set_min_delegate_take(take); + log::debug!("TxMinDelegateTakeSet( tx_min_delegate_take: {:?} ) ", take); Ok(()) } @@ -941,8 +950,10 @@ pub mod pallet { target_stakes_per_interval: u64, ) -> DispatchResult { ensure_root(origin)?; - T::Subtensor::set_target_stakes_per_interval(target_stakes_per_interval); - log::info!( + pallet_subtensor::Pallet::::set_target_stakes_per_interval( + target_stakes_per_interval, + ); + log::debug!( "TxTargetStakesPerIntervalSet( set_target_stakes_per_interval: {:?} ) ", target_stakes_per_interval ); @@ -959,15 +970,15 @@ pub mod pallet { netuid: u16, interval: u64, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_commit_reveal_weights_interval(netuid, interval); - log::info!( + pallet_subtensor::Pallet::::set_commit_reveal_weights_interval(netuid, interval); + log::debug!( "SetWeightCommitInterval( netuid: {:?}, interval: {:?} ) ", netuid, interval @@ -985,15 +996,15 @@ pub mod pallet { netuid: u16, enabled: bool, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; ensure!( - T::Subtensor::if_subnet_exist(netuid), + pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - T::Subtensor::set_commit_reveal_weights_enabled(netuid, enabled); - log::info!("ToggleSetWeightsCommitReveal( netuid: {:?} ) ", netuid); + pallet_subtensor::Pallet::::set_commit_reveal_weights_enabled(netuid, enabled); + log::debug!("ToggleSetWeightsCommitReveal( netuid: {:?} ) ", netuid); Ok(()) } @@ -1013,9 +1024,9 @@ pub mod pallet { netuid: u16, enabled: bool, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin, netuid)?; - T::Subtensor::set_liquid_alpha_enabled(netuid, enabled); - log::info!( + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::set_liquid_alpha_enabled(netuid, enabled); + log::debug!( "LiquidAlphaEnableToggled( netuid: {:?}, Enabled: {:?} ) ", netuid, enabled @@ -1032,8 +1043,158 @@ pub mod pallet { alpha_low: u16, alpha_high: u16, ) -> DispatchResult { - T::Subtensor::ensure_subnet_owner_or_root(origin.clone(), netuid)?; - T::Subtensor::do_set_alpha_values(origin, netuid, alpha_low, alpha_high) + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + pallet_subtensor::Pallet::::do_set_alpha_values( + origin, netuid, alpha_low, alpha_high, + ) + } + + /// Sets the hotkey emission tempo. + /// + /// This extrinsic allows the root account to set the hotkey emission tempo, which determines + /// the number of blocks before a hotkey drains accumulated emissions through to nominator staking accounts. + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be the root account. + /// * `emission_tempo` - The new emission tempo value to set. + /// + /// # Emits + /// * `Event::HotkeyEmissionTempoSet` - When the hotkey emission tempo is successfully set. + /// + /// # Errors + /// * `DispatchError::BadOrigin` - If the origin is not the root account. + // #[pallet::weight(T::WeightInfo::sudo_set_hotkey_emission_tempo())] + #[pallet::call_index(52)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_hotkey_emission_tempo( + origin: OriginFor, + emission_tempo: u64, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_hotkey_emission_tempo(emission_tempo); + log::debug!( + "HotkeyEmissionTempoSet( emission_tempo: {:?} )", + emission_tempo + ); + Ok(()) + } + + /// Sets the maximum stake allowed for a specific network. + /// + /// This function allows the root account to set the maximum stake for a given network. + /// It updates the network's maximum stake value and logs the change. + /// + /// # Arguments + /// + /// * `origin` - The origin of the call, which must be the root account. + /// * `netuid` - The unique identifier of the network. + /// * `max_stake` - The new maximum stake value to set. + /// + /// # Returns + /// + /// Returns `Ok(())` if the operation is successful, or an error if it fails. + /// + /// # Example + /// + /// + /// # Notes + /// + /// - This function can only be called by the root account. + /// - The `netuid` should correspond to an existing network. + /// + /// # TODO + /// + // - Consider adding a check to ensure the `netuid` corresponds to an existing network. + // - Implement a mechanism to gradually adjust the max stake to prevent sudden changes. + // #[pallet::weight(T::WeightInfo::sudo_set_network_max_stake())] + #[pallet::call_index(53)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_network_max_stake( + origin: OriginFor, + netuid: u16, + max_stake: u64, + ) -> DispatchResult { + // Ensure the call is made by the root account + ensure_root(origin)?; + + // Set the new maximum stake for the specified network + pallet_subtensor::Pallet::::set_network_max_stake(netuid, max_stake); + + // Log the change + log::trace!( + "NetworkMaxStakeSet( netuid: {:?}, max_stake: {:?} )", + netuid, + max_stake + ); + + Ok(()) + } + + /// Sets the duration of the coldkey swap schedule. + /// + /// This extrinsic allows the root account to set the duration for the coldkey swap schedule. + /// The coldkey swap schedule determines how long it takes for a coldkey swap operation to complete. + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be the root account. + /// * `duration` - The new duration for the coldkey swap schedule, in number of blocks. + /// + /// # Errors + /// * `BadOrigin` - If the caller is not the root account. + /// + /// # Weight + /// Weight is handled by the `#[pallet::weight]` attribute. + #[pallet::call_index(54)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_coldkey_swap_schedule_duration( + origin: OriginFor, + duration: BlockNumberFor, + ) -> DispatchResult { + // Ensure the call is made by the root account + ensure_root(origin)?; + + // Set the new duration of schedule coldkey swap + pallet_subtensor::Pallet::::set_coldkey_swap_schedule_duration(duration); + + // Log the change + log::trace!("ColdkeySwapScheduleDurationSet( duration: {:?} )", duration); + + Ok(()) + } + + /// Sets the duration of the dissolve network schedule. + /// + /// This extrinsic allows the root account to set the duration for the dissolve network schedule. + /// The dissolve network schedule determines how long it takes for a network dissolution operation to complete. + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be the root account. + /// * `duration` - The new duration for the dissolve network schedule, in number of blocks. + /// + /// # Errors + /// * `BadOrigin` - If the caller is not the root account. + /// + /// # Weight + /// Weight is handled by the `#[pallet::weight]` attribute. + #[pallet::call_index(55)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_dissolve_network_schedule_duration( + origin: OriginFor, + duration: BlockNumberFor, + ) -> DispatchResult { + // Ensure the call is made by the root account + ensure_root(origin)?; + + // Set the duration of schedule dissolve network + pallet_subtensor::Pallet::::set_dissolve_network_schedule_duration(duration); + + // Log the change + log::trace!( + "DissolveNetworkScheduleDurationSet( duration: {:?} )", + duration + ); + + Ok(()) } } } @@ -1052,89 +1213,3 @@ pub trait AuraInterface { impl AuraInterface for () { fn change_authorities(_: BoundedVec) {} } - -/////////////////////////////////////////// - -pub trait SubtensorInterface { - fn set_min_delegate_take(take: u16); - fn set_max_delegate_take(take: u16); - fn set_tx_rate_limit(rate_limit: u64); - fn set_tx_delegate_take_rate_limit(rate_limit: u64); - - fn set_serving_rate_limit(netuid: u16, rate_limit: u64); - - fn set_max_burn(netuid: u16, max_burn: u64); - fn set_min_burn(netuid: u16, min_burn: u64); - fn set_burn(netuid: u16, burn: u64); - - fn set_max_difficulty(netuid: u16, max_diff: u64); - fn set_min_difficulty(netuid: u16, min_diff: u64); - fn set_difficulty(netuid: u16, diff: u64); - - fn set_weights_rate_limit(netuid: u16, rate_limit: u64); - - fn set_weights_version_key(netuid: u16, version: u64); - - fn set_bonds_moving_average(netuid: u16, moving_average: u64); - - fn set_max_allowed_validators(netuid: u16, max_validators: u16); - - fn get_root_netuid() -> u16; - fn if_subnet_exist(netuid: u16) -> bool; - fn create_account_if_non_existent(coldkey: &AccountId, hotkey: &AccountId); - fn coldkey_owns_hotkey(coldkey: &AccountId, hotkey: &AccountId) -> bool; - fn increase_stake_on_coldkey_hotkey_account( - coldkey: &AccountId, - hotkey: &AccountId, - increment: u64, - ); - fn add_balance_to_coldkey_account(coldkey: &AccountId, amount: Balance); - fn get_current_block_as_u64() -> u64; - fn get_subnetwork_n(netuid: u16) -> u16; - fn get_max_allowed_uids(netuid: u16) -> u16; - fn append_neuron(netuid: u16, new_hotkey: &AccountId, block_number: u64); - fn get_neuron_to_prune(netuid: u16) -> u16; - fn replace_neuron(netuid: u16, uid_to_replace: u16, new_hotkey: &AccountId, block_number: u64); - fn set_total_issuance(total_issuance: u64); - fn set_network_immunity_period(net_immunity_period: u64); - fn set_network_min_lock(net_min_lock: u64); - fn set_rao_recycled(netuid: u16, rao_recycled: u64); - fn set_subnet_limit(limit: u16); - fn is_hotkey_registered_on_network(netuid: u16, hotkey: &AccountId) -> bool; - fn set_lock_reduction_interval(interval: u64); - fn set_tempo(netuid: u16, tempo: u16); - fn set_subnet_owner_cut(subnet_owner_cut: u16); - fn set_network_rate_limit(limit: u64); - fn set_max_registrations_per_block(netuid: u16, max_registrations_per_block: u16); - fn set_adjustment_alpha(netuid: u16, adjustment_alpha: u64); - fn set_target_registrations_per_interval(netuid: u16, target_registrations_per_interval: u16); - fn set_network_pow_registration_allowed(netuid: u16, registration_allowed: bool); - fn set_network_registration_allowed(netuid: u16, registration_allowed: bool); - fn set_activity_cutoff(netuid: u16, activity_cutoff: u16); - fn ensure_subnet_owner_or_root(o: RuntimeOrigin, netuid: u16) -> Result<(), DispatchError>; - fn set_rho(netuid: u16, rho: u16); - fn set_kappa(netuid: u16, kappa: u16); - fn set_max_allowed_uids(netuid: u16, max_allowed: u16); - fn set_min_allowed_weights(netuid: u16, min_allowed_weights: u16); - fn set_immunity_period(netuid: u16, immunity_period: u16); - fn set_max_weight_limit(netuid: u16, max_weight_limit: u16); - fn set_scaling_law_power(netuid: u16, scaling_law_power: u16); - fn set_validator_prune_len(netuid: u16, validator_prune_len: u64); - fn set_adjustment_interval(netuid: u16, adjustment_interval: u16); - fn set_weights_set_rate_limit(netuid: u16, weights_set_rate_limit: u64); - fn init_new_network(netuid: u16, tempo: u16); - fn set_weights_min_stake(min_stake: u64); - fn get_nominator_min_required_stake() -> u64; - fn set_nominator_min_required_stake(min_stake: u64); - fn clear_small_nominations(); - fn set_target_stakes_per_interval(target_stakes_per_interval: u64); - fn set_commit_reveal_weights_interval(netuid: u16, interval: u64); - fn set_commit_reveal_weights_enabled(netuid: u16, enabled: bool); - fn set_liquid_alpha_enabled(netuid: u16, enabled: bool); - fn do_set_alpha_values( - origin: RuntimeOrigin, - netuid: u16, - alpha_low: u16, - alpha_high: u16, - ) -> Result<(), DispatchError>; -} diff --git a/pallets/admin-utils/src/weights.rs b/pallets/admin-utils/src/weights.rs index ace123b14..84fe058f8 100644 --- a/pallets/admin-utils/src/weights.rs +++ b/pallets/admin-utils/src/weights.rs @@ -62,7 +62,6 @@ pub trait WeightInfo { fn sudo_set_tempo() -> Weight; fn sudo_set_commit_reveal_weights_interval() -> Weight; fn sudo_set_commit_reveal_weights_enabled() -> Weight; - } /// Weights for `pallet_admin_utils` using the Substrate node and recommended hardware. diff --git a/pallets/admin-utils/tests/mock.rs b/pallets/admin-utils/tests/mock.rs index dbf88bdfa..342ed01cd 100644 --- a/pallets/admin-utils/tests/mock.rs +++ b/pallets/admin-utils/tests/mock.rs @@ -2,18 +2,20 @@ use frame_support::{ assert_ok, derive_impl, parameter_types, - traits::{Everything, Hooks}, + traits::{Everything, Hooks, PrivilegeCmp}, weights, }; use frame_system as system; -use frame_system::{limits, EnsureNever}; +use frame_system::{limits, EnsureNever, EnsureRoot}; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::U256; use sp_core::{ConstU64, H256}; use sp_runtime::{ traits::{BlakeTwo256, ConstU32, IdentityLookup}, - BuildStorage, DispatchError, + BuildStorage, Perbill, }; +use sp_std::cmp::Ordering; +use sp_weights::Weight; type Block = frame_system::mocking::MockBlock; @@ -25,6 +27,7 @@ frame_support::construct_runtime!( Balances: pallet_balances, AdminUtils: pallet_admin_utils, SubtensorModule: pallet_subtensor::{Pallet, Call, Storage, Event, Error}, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, } ); @@ -77,12 +80,16 @@ parameter_types! { pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; - pub const InitialDefaultTake: u16 = 11_796; // 18% honest number. - pub const InitialMinTake: u16 = 5_898; // 9%; + pub const InitialDefaultDelegateTake: u16 = 11_796; // 18% honest number. + pub const InitialMinDelegateTake: u16 = 5_898; // 9%; + pub const InitialDefaultChildKeyTake: u16 = 0; // Allow 0 % + pub const InitialMinChildKeyTake: u16 = 0; // Allow 0 % + pub const InitialMaxChildKeyTake: u16 = 11_796; // 18 %; pub const InitialWeightsVersionKey: u16 = 0; pub const InitialServingRateLimit: u64 = 0; // No limit. pub const InitialTxRateLimit: u64 = 0; // Disable rate limit for testing pub const InitialTxDelegateTakeRateLimit: u64 = 0; // Disable rate limit for testing + pub const InitialTxChildKeyTakeRateLimit: u64 = 0; // Disable rate limit for testing pub const InitialBurn: u64 = 0; pub const InitialMinBurn: u64 = 0; pub const InitialMaxBurn: u64 = 1_000_000_000; @@ -114,18 +121,22 @@ parameter_types! { pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn - pub const InitialBaseDifficulty: u64 = 10_000; // Base difficulty + pub const InitialHotkeyEmissionTempo: u64 = 1; + pub const InitialNetworkMaxStake: u64 = u64::MAX; // Maximum possible value for u64, this make the make stake infinity + pub const InitialColdkeySwapScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days + pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // 5 days } impl pallet_subtensor::Config for Test { type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type Currency = Balances; type InitialIssuance = InitialIssuance; type SudoRuntimeCall = TestRuntimeCall; type CouncilOrigin = EnsureNever; type SenateMembers = (); type TriumvirateInterface = (); - + type Scheduler = Scheduler; type InitialMinAllowedWeights = InitialMinAllowedWeights; type InitialEmissionValue = InitialEmissionValue; type InitialMaxWeightsLimit = InitialMaxWeightsLimit; @@ -145,14 +156,18 @@ impl pallet_subtensor::Config for Test { type InitialPruningScore = InitialPruningScore; type InitialBondsMovingAverage = InitialBondsMovingAverage; type InitialMaxAllowedValidators = InitialMaxAllowedValidators; - type InitialDefaultTake = InitialDefaultTake; - type InitialMinTake = InitialMinTake; + type InitialDefaultDelegateTake = InitialDefaultDelegateTake; + type InitialMinDelegateTake = InitialMinDelegateTake; + type InitialDefaultChildKeyTake = InitialDefaultChildKeyTake; + type InitialMinChildKeyTake = InitialMinChildKeyTake; + type InitialMaxChildKeyTake = InitialMaxChildKeyTake; type InitialWeightsVersionKey = InitialWeightsVersionKey; type InitialMaxDifficulty = InitialMaxDifficulty; type InitialMinDifficulty = InitialMinDifficulty; type InitialServingRateLimit = InitialServingRateLimit; type InitialTxRateLimit = InitialTxRateLimit; type InitialTxDelegateTakeRateLimit = InitialTxDelegateTakeRateLimit; + type InitialTxChildKeyTakeRateLimit = InitialTxChildKeyTakeRateLimit; type InitialBurn = InitialBurn; type InitialMaxBurn = InitialMaxBurn; type InitialMinBurn = InitialMinBurn; @@ -170,7 +185,11 @@ impl pallet_subtensor::Config for Test { type AlphaHigh = InitialAlphaHigh; type AlphaLow = InitialAlphaLow; type LiquidAlphaOn = InitialLiquidAlphaOn; - type InitialBaseDifficulty = InitialBaseDifficulty; + type InitialHotkeyEmissionTempo = InitialHotkeyEmissionTempo; + type InitialNetworkMaxStake = InitialNetworkMaxStake; + type Preimages = (); + type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -216,274 +235,11 @@ impl pallet_balances::Config for Test { type RuntimeHoldReason = (); } -pub struct SubtensorIntrf; - -impl pallet_admin_utils::SubtensorInterface for SubtensorIntrf { - fn set_max_delegate_take(default_take: u16) { - SubtensorModule::set_max_delegate_take(default_take); - } - - fn set_min_delegate_take(default_take: u16) { - SubtensorModule::set_min_delegate_take(default_take); - } - - fn set_tx_rate_limit(rate_limit: u64) { - SubtensorModule::set_tx_rate_limit(rate_limit); - } - - fn set_tx_delegate_take_rate_limit(rate_limit: u64) { - SubtensorModule::set_tx_delegate_take_rate_limit(rate_limit); - } - - fn set_serving_rate_limit(netuid: u16, rate_limit: u64) { - SubtensorModule::set_serving_rate_limit(netuid, rate_limit); - } - - fn set_max_burn(netuid: u16, max_burn: u64) { - SubtensorModule::set_max_burn(netuid, max_burn); - } - - fn set_min_burn(netuid: u16, min_burn: u64) { - SubtensorModule::set_min_burn(netuid, min_burn); - } - - fn set_burn(netuid: u16, burn: u64) { - SubtensorModule::set_burn(netuid, burn); - } - - fn set_max_difficulty(netuid: u16, max_diff: u64) { - SubtensorModule::set_max_difficulty(netuid, max_diff); - } - - fn set_min_difficulty(netuid: u16, min_diff: u64) { - SubtensorModule::set_min_difficulty(netuid, min_diff); - } - - fn set_difficulty(netuid: u16, diff: u64) { - SubtensorModule::set_difficulty(netuid, diff); - } - - fn set_weights_rate_limit(netuid: u16, rate_limit: u64) { - SubtensorModule::set_weights_set_rate_limit(netuid, rate_limit); - } - - fn set_weights_version_key(netuid: u16, version: u64) { - SubtensorModule::set_weights_version_key(netuid, version); - } - - fn set_bonds_moving_average(netuid: u16, moving_average: u64) { - SubtensorModule::set_bonds_moving_average(netuid, moving_average); - } - - fn set_max_allowed_validators(netuid: u16, max_validators: u16) { - SubtensorModule::set_max_allowed_validators(netuid, max_validators); - } - - fn get_root_netuid() -> u16 { - SubtensorModule::get_root_netuid() - } - - fn if_subnet_exist(netuid: u16) -> bool { - SubtensorModule::if_subnet_exist(netuid) - } - - fn create_account_if_non_existent(coldkey: &AccountId, hotkey: &AccountId) { - SubtensorModule::create_account_if_non_existent(coldkey, hotkey) - } - - fn coldkey_owns_hotkey(coldkey: &AccountId, hotkey: &AccountId) -> bool { - SubtensorModule::coldkey_owns_hotkey(coldkey, hotkey) - } - - fn increase_stake_on_coldkey_hotkey_account( - coldkey: &AccountId, - hotkey: &AccountId, - increment: u64, - ) { - SubtensorModule::increase_stake_on_coldkey_hotkey_account(coldkey, hotkey, increment); - } - - fn add_balance_to_coldkey_account(coldkey: &AccountId, amount: Balance) { - SubtensorModule::add_balance_to_coldkey_account(coldkey, amount); - } - - fn get_current_block_as_u64() -> u64 { - SubtensorModule::get_current_block_as_u64() - } - - fn get_subnetwork_n(netuid: u16) -> u16 { - SubtensorModule::get_subnetwork_n(netuid) - } - - fn get_max_allowed_uids(netuid: u16) -> u16 { - SubtensorModule::get_max_allowed_uids(netuid) - } - - fn append_neuron(netuid: u16, new_hotkey: &AccountId, block_number: u64) { - SubtensorModule::append_neuron(netuid, new_hotkey, block_number) - } - - fn get_neuron_to_prune(netuid: u16) -> u16 { - SubtensorModule::get_neuron_to_prune(netuid) - } - - fn replace_neuron(netuid: u16, uid_to_replace: u16, new_hotkey: &AccountId, block_number: u64) { - SubtensorModule::replace_neuron(netuid, uid_to_replace, new_hotkey, block_number); - } - - fn set_total_issuance(total_issuance: u64) { - SubtensorModule::set_total_issuance(total_issuance); - } - - fn set_network_immunity_period(net_immunity_period: u64) { - SubtensorModule::set_network_immunity_period(net_immunity_period); - } - - fn set_network_min_lock(net_min_lock: u64) { - SubtensorModule::set_network_min_lock(net_min_lock); - } - - fn set_subnet_limit(limit: u16) { - SubtensorModule::set_max_subnets(limit); - } - - fn set_lock_reduction_interval(interval: u64) { - SubtensorModule::set_lock_reduction_interval(interval); - } - - fn set_tempo(netuid: u16, tempo: u16) { - SubtensorModule::set_tempo(netuid, tempo); - } +pub struct OriginPrivilegeCmp; - fn set_subnet_owner_cut(subnet_owner_cut: u16) { - SubtensorModule::set_subnet_owner_cut(subnet_owner_cut); - } - - fn set_network_rate_limit(limit: u64) { - SubtensorModule::set_network_rate_limit(limit); - } - - fn set_max_registrations_per_block(netuid: u16, max_registrations_per_block: u16) { - SubtensorModule::set_max_registrations_per_block(netuid, max_registrations_per_block); - } - - fn set_adjustment_alpha(netuid: u16, adjustment_alpha: u64) { - SubtensorModule::set_adjustment_alpha(netuid, adjustment_alpha); - } - - fn set_target_registrations_per_interval(netuid: u16, target_registrations_per_interval: u16) { - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - } - - fn set_network_pow_registration_allowed(netuid: u16, registration_allowed: bool) { - SubtensorModule::set_network_pow_registration_allowed(netuid, registration_allowed); - } - - fn set_network_registration_allowed(netuid: u16, registration_allowed: bool) { - SubtensorModule::set_network_pow_registration_allowed(netuid, registration_allowed); - } - - fn set_activity_cutoff(netuid: u16, activity_cutoff: u16) { - SubtensorModule::set_activity_cutoff(netuid, activity_cutoff); - } - - fn ensure_subnet_owner_or_root(o: RuntimeOrigin, netuid: u16) -> Result<(), DispatchError> { - SubtensorModule::ensure_subnet_owner_or_root(o, netuid) - } - - fn set_rho(netuid: u16, rho: u16) { - SubtensorModule::set_rho(netuid, rho); - } - - fn set_kappa(netuid: u16, kappa: u16) { - SubtensorModule::set_kappa(netuid, kappa); - } - - fn set_max_allowed_uids(netuid: u16, max_allowed: u16) { - SubtensorModule::set_max_allowed_uids(netuid, max_allowed); - } - - fn set_min_allowed_weights(netuid: u16, min_allowed_weights: u16) { - SubtensorModule::set_min_allowed_weights(netuid, min_allowed_weights); - } - - fn set_immunity_period(netuid: u16, immunity_period: u16) { - SubtensorModule::set_immunity_period(netuid, immunity_period); - } - - fn set_max_weight_limit(netuid: u16, max_weight_limit: u16) { - SubtensorModule::set_max_weight_limit(netuid, max_weight_limit); - } - - fn set_scaling_law_power(netuid: u16, scaling_law_power: u16) { - SubtensorModule::set_scaling_law_power(netuid, scaling_law_power); - } - - fn set_validator_prune_len(netuid: u16, validator_prune_len: u64) { - SubtensorModule::set_validator_prune_len(netuid, validator_prune_len); - } - - fn set_adjustment_interval(netuid: u16, adjustment_interval: u16) { - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - } - - fn set_weights_set_rate_limit(netuid: u16, weights_set_rate_limit: u64) { - SubtensorModule::set_weights_set_rate_limit(netuid, weights_set_rate_limit); - } - - fn set_rao_recycled(netuid: u16, rao_recycled: u64) { - SubtensorModule::set_rao_recycled(netuid, rao_recycled); - } - - fn is_hotkey_registered_on_network(netuid: u16, hotkey: &AccountId) -> bool { - SubtensorModule::is_hotkey_registered_on_network(netuid, hotkey) - } - - fn init_new_network(netuid: u16, tempo: u16) { - SubtensorModule::init_new_network(netuid, tempo); - } - - fn set_weights_min_stake(min_stake: u64) { - SubtensorModule::set_weights_min_stake(min_stake); - } - - fn set_nominator_min_required_stake(min_stake: u64) { - SubtensorModule::set_nominator_min_required_stake(min_stake); - } - - fn get_nominator_min_required_stake() -> u64 { - SubtensorModule::get_nominator_min_required_stake() - } - - fn clear_small_nominations() { - SubtensorModule::clear_small_nominations(); - } - - fn set_target_stakes_per_interval(target_stakes_per_interval: u64) { - SubtensorModule::set_target_stakes_per_interval(target_stakes_per_interval); - } - - fn set_commit_reveal_weights_interval(netuid: u16, interval: u64) { - SubtensorModule::set_commit_reveal_weights_interval(netuid, interval); - } - - fn set_commit_reveal_weights_enabled(netuid: u16, enabled: bool) { - SubtensorModule::set_commit_reveal_weights_enabled(netuid, enabled); - } - - fn set_liquid_alpha_enabled(netuid: u16, enabled: bool) { - SubtensorModule::set_liquid_alpha_enabled(netuid, enabled); - } - fn do_set_alpha_values( - origin: RuntimeOrigin, - netuid: u16, - alpha_low: u16, - alpha_high: u16, - ) -> Result<(), DispatchError> { - SubtensorModule::do_set_alpha_values(origin, netuid, alpha_low, alpha_high) +impl PrivilegeCmp for OriginPrivilegeCmp { + fn cmp_privilege(_left: &OriginCaller, _right: &OriginCaller) -> Option { + None } } @@ -493,10 +249,29 @@ impl pallet_admin_utils::Config for Test { type MaxAuthorities = ConstU32<32>; type Aura = (); type Balance = Balance; - type Subtensor = SubtensorIntrf; type WeightInfo = (); } +parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + BlockWeights::get().max_block; + pub const MaxScheduledPerBlock: u32 = 50; + pub const NoPreimagePostponement: Option = Some(10); +} + +impl pallet_scheduler::Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type RuntimeCall = RuntimeCall; + type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureRoot; + type MaxScheduledPerBlock = MaxScheduledPerBlock; + type WeightInfo = pallet_scheduler::weights::SubstrateWeight; + type OriginPrivilegeCmp = OriginPrivilegeCmp; + type Preimages = (); +} + // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { sp_tracing::try_init_simple(); diff --git a/pallets/admin-utils/tests/tests.rs b/pallets/admin-utils/tests/tests.rs index 9df59978f..8ab85f177 100644 --- a/pallets/admin-utils/tests/tests.rs +++ b/pallets/admin-utils/tests/tests.rs @@ -1,12 +1,12 @@ use frame_support::sp_runtime::DispatchError; use frame_support::{ - assert_err, assert_ok, + assert_err, assert_noop, assert_ok, dispatch::{DispatchClass, GetDispatchInfo, Pays}, }; use frame_system::Config; use pallet_admin_utils::Error; use pallet_subtensor::Error as SubtensorError; -use pallet_subtensor::{migration, Event}; +use pallet_subtensor::{migrations, Event}; use sp_core::U256; mod mock; @@ -16,7 +16,7 @@ use mock::*; fn test_sudo_set_default_take() { new_test_ext().execute_with(|| { let to_be_set: u16 = 10; - let init_value: u16 = SubtensorModule::get_default_take(); + let init_value: u16 = SubtensorModule::get_default_delegate_take(); assert_eq!( AdminUtils::sudo_set_default_take( <::RuntimeOrigin>::signed(U256::from(0)), @@ -24,12 +24,12 @@ fn test_sudo_set_default_take() { ), Err(DispatchError::BadOrigin) ); - assert_eq!(SubtensorModule::get_default_take(), init_value); + assert_eq!(SubtensorModule::get_default_delegate_take(), init_value); assert_ok!(AdminUtils::sudo_set_default_take( <::RuntimeOrigin>::root(), to_be_set )); - assert_eq!(SubtensorModule::get_default_take(), to_be_set); + assert_eq!(SubtensorModule::get_default_delegate_take(), to_be_set); }); } @@ -1232,7 +1232,7 @@ fn test_sudo_get_set_alpha() { // Enable Liquid Alpha and setup SubtensorModule::set_liquid_alpha_enabled(netuid, true); - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); SubtensorModule::add_balance_to_coldkey_account(&coldkey, 1_000_000_000_000_000); assert_ok!(SubtensorModule::root_register(signer.clone(), hotkey,)); assert_ok!(SubtensorModule::add_stake(signer.clone(), hotkey, 1000)); @@ -1361,3 +1361,77 @@ fn test_sudo_get_set_alpha() { )); }); } + +#[test] +fn test_sudo_set_coldkey_swap_schedule_duration() { + new_test_ext().execute_with(|| { + // Arrange + let root = RuntimeOrigin::root(); + let non_root = RuntimeOrigin::signed(U256::from(1)); + let new_duration = 100u32.into(); + + // Act & Assert: Non-root account should fail + assert_noop!( + AdminUtils::sudo_set_coldkey_swap_schedule_duration(non_root, new_duration), + DispatchError::BadOrigin + ); + + // Act: Root account should succeed + assert_ok!(AdminUtils::sudo_set_coldkey_swap_schedule_duration( + root.clone(), + new_duration + )); + + // Assert: Check if the duration was actually set + assert_eq!( + pallet_subtensor::ColdkeySwapScheduleDuration::::get(), + new_duration + ); + + // Act & Assert: Setting the same value again should succeed (idempotent operation) + assert_ok!(AdminUtils::sudo_set_coldkey_swap_schedule_duration( + root, + new_duration + )); + + // You might want to check for events here if your pallet emits them + System::assert_last_event(Event::ColdkeySwapScheduleDurationSet(new_duration).into()); + }); +} + +#[test] +fn test_sudo_set_dissolve_network_schedule_duration() { + new_test_ext().execute_with(|| { + // Arrange + let root = RuntimeOrigin::root(); + let non_root = RuntimeOrigin::signed(U256::from(1)); + let new_duration = 200u32.into(); + + // Act & Assert: Non-root account should fail + assert_noop!( + AdminUtils::sudo_set_dissolve_network_schedule_duration(non_root, new_duration), + DispatchError::BadOrigin + ); + + // Act: Root account should succeed + assert_ok!(AdminUtils::sudo_set_dissolve_network_schedule_duration( + root.clone(), + new_duration + )); + + // Assert: Check if the duration was actually set + assert_eq!( + pallet_subtensor::DissolveNetworkScheduleDuration::::get(), + new_duration + ); + + // Act & Assert: Setting the same value again should succeed (idempotent operation) + assert_ok!(AdminUtils::sudo_set_dissolve_network_schedule_duration( + root, + new_duration + )); + + // You might want to check for events here if your pallet emits them + System::assert_last_event(Event::DissolveNetworkScheduleDurationSet(new_duration).into()); + }); +} diff --git a/pallets/collective/src/lib.rs b/pallets/collective/src/lib.rs index 66c55036d..823e92663 100644 --- a/pallets/collective/src/lib.rs +++ b/pallets/collective/src/lib.rs @@ -165,6 +165,7 @@ pub struct Votes { /// The hard end time of this vote. end: BlockNumber, } + #[deny(missing_docs)] #[frame_support::pallet] pub mod pallet { @@ -951,10 +952,9 @@ impl, I: 'static> Pallet { /// /// If not `approved`: /// - one event deposited. - /// - /// Two removals, one mutation. - /// Computation and i/o `O(P)` where: - /// - `P` is number of active proposals + /// - two removals, one mutation. + /// - computation and i/o `O(P)` where: + /// - `P` is number of active proposals fn do_approve_proposal( seats: MemberCount, yes_votes: MemberCount, diff --git a/pallets/commitments/src/types.rs b/pallets/commitments/src/types.rs index 912a474c0..06bafcaac 100644 --- a/pallets/commitments/src/types.rs +++ b/pallets/commitments/src/types.rs @@ -299,6 +299,7 @@ pub struct CommitmentInfo> { /// /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a /// backwards compatible way through a specialized `Decode` impl. +#[freeze_struct("632f12850e51c420")] #[derive( CloneNoBound, Encode, Eq, MaxEncodedLen, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, )] diff --git a/pallets/registry/src/types.rs b/pallets/registry/src/types.rs index 0badd5669..58cc5ed19 100644 --- a/pallets/registry/src/types.rs +++ b/pallets/registry/src/types.rs @@ -367,6 +367,7 @@ impl> IdentityInfo { /// /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a /// backwards compatible way through a specialized `Decode` impl. +#[freeze_struct("797b69e82710bb21")] #[derive( CloneNoBound, Encode, Eq, MaxEncodedLen, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, )] diff --git a/pallets/subtensor/Cargo.toml b/pallets/subtensor/Cargo.toml index a0835008f..3023d1e0d 100644 --- a/pallets/subtensor/Cargo.toml +++ b/pallets/subtensor/Cargo.toml @@ -28,6 +28,7 @@ frame-support = { workspace = true } frame-system = { workspace = true } sp-io = { workspace = true } serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } serde-tuple-vec-map = { workspace = true } serde_bytes = { workspace = true, features = ["alloc"] } serde_with = { workspace = true, features = ["macros"] } @@ -48,12 +49,15 @@ num-traits = { version = "0.2.19", default-features = false, features = ["libm"] [dev-dependencies] pallet-balances = { workspace = true, features = ["std"] } +pallet-scheduler = { workspace = true } sp-version = { workspace = true } # Substrate sp-tracing = { workspace = true } parity-util-mem = { workspace = true, features = ["primitive-types"] } rand = { workspace = true } sp-core = { workspace = true } +sp-std = { workspace = true } +pallet-preimage = { workspace = true } [features] default = ["std"] @@ -67,6 +71,8 @@ std = [ "pallet-membership/std", "substrate-fixed/std", "pallet-balances/std", + "pallet-preimage/std", + "pallet-scheduler/std", "pallet-transaction-payment/std", "pallet-utility/std", "sp-core/std", @@ -83,6 +89,7 @@ std = [ "serde_with/std", "substrate-fixed/std", "num-traits/std", + "serde_json/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -92,13 +99,17 @@ runtime-benchmarks = [ "pallet-membership/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "pallet-collective/runtime-benchmarks" + "pallet-collective/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", "pallet-membership/try-runtime", + "pallet-preimage/try-runtime", + "pallet-scheduler/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-utility/try-runtime", "sp-runtime/try-runtime", diff --git a/pallets/subtensor/rpc/Cargo.toml b/pallets/subtensor/rpc/Cargo.toml index db2f5f147..861c313d8 100644 --- a/pallets/subtensor/rpc/Cargo.toml +++ b/pallets/subtensor/rpc/Cargo.toml @@ -26,8 +26,8 @@ sp-runtime = { workspace = true } # local packages -subtensor-custom-rpc-runtime-api = { version = "0.0.2", path = "../runtime-api", default-features = false } -pallet-subtensor = { version = "4.0.0-dev", path = "../../subtensor", default-features = false } +subtensor-custom-rpc-runtime-api = { path = "../runtime-api", default-features = false } +pallet-subtensor = { path = "../../subtensor", default-features = false } [features] default = ["std"] diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index 2f71e9c21..2445a5eda 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -46,6 +46,10 @@ pub trait SubtensorCustomApi { fn get_subnet_info(&self, netuid: u16, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getSubnetsInfo")] fn get_subnets_info(&self, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getSubnetInfo_v2")] + fn get_subnet_info_v2(&self, netuid: u16, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getSubnetsInf_v2")] + fn get_subnets_info_v2(&self, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getSubnetHyperparams")] fn get_subnet_hyperparams(&self, netuid: u16, at: Option) -> RpcResult>; @@ -215,6 +219,26 @@ where .map_err(|e| Error::RuntimeError(format!("Unable to get subnets info: {:?}", e)).into()) } + fn get_subnet_info_v2( + &self, + netuid: u16, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + api.get_subnet_info_v2(at, netuid) + .map_err(|e| Error::RuntimeError(format!("Unable to get subnet info: {:?}", e)).into()) + } + + fn get_subnets_info_v2(&self, at: Option<::Hash>) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + api.get_subnets_info_v2(at) + .map_err(|e| Error::RuntimeError(format!("Unable to get subnets info: {:?}", e)).into()) + } + fn get_network_lock_cost(&self, at: Option<::Hash>) -> RpcResult { let api = self.client.runtime_api(); let at = at.unwrap_or_else(|| self.client.info().best_hash); diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index 9095ad54a..ca43384b8 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -21,6 +21,8 @@ sp_api::decl_runtime_apis! { pub trait SubnetInfoRuntimeApi { fn get_subnet_info(netuid: u16) -> Vec; fn get_subnets_info() -> Vec; + fn get_subnet_info_v2(netuid: u16) -> Vec; + fn get_subnets_info_v2() -> Vec; fn get_subnet_hyperparams(netuid: u16) -> Vec; } diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 03e087a92..4915bb3ac 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -159,7 +159,7 @@ benchmarks! { Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), wallet_bal); assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); - assert_ok!(Subtensor::::do_become_delegate(RawOrigin::Signed(coldkey.clone()).into(), hotkey.clone(), Subtensor::::get_default_take())); + assert_ok!(Subtensor::::do_become_delegate(RawOrigin::Signed(coldkey.clone()).into(), hotkey.clone(), Subtensor::::get_default_delegate_take())); // Stake 10% of our current total staked TAO let u64_staked_amt = 100_000_000_000; @@ -312,7 +312,8 @@ benchmarks! { let amount_to_be_staked = 100_000_000_000_000u64; Subtensor::::add_balance_to_coldkey_account(&coldkey.clone(), amount_to_be_staked); assert_ok!(Subtensor::::register_network(RawOrigin::Signed(coldkey.clone()).into())); - }: dissolve_network(RawOrigin::Signed(coldkey), 1) + }: dissolve_network(RawOrigin::Root, coldkey.clone(), 1) + // swap_hotkey { // let seed: u32 = 1; @@ -429,4 +430,95 @@ reveal_weights { }: reveal_weights(RawOrigin::Signed(hotkey.clone()), netuid, uids, weight_values, salt, version_key) + schedule_swap_coldkey { + let old_coldkey: T::AccountId = account("old_cold", 0, 1); + let new_coldkey: T::AccountId = account("new_cold", 1, 2); + }: schedule_swap_coldkey(RawOrigin::Signed(old_coldkey.clone()), new_coldkey.clone()) + + schedule_dissolve_network { + let coldkey: T::AccountId = account("coldkey", 0, 1); + let netuid = 1; + }: schedule_dissolve_network(RawOrigin::Signed(coldkey.clone()), netuid) + benchmark_sudo_set_tx_childkey_take_rate_limit { + // We don't need to set up any initial state for this benchmark + // as it's a simple setter function that only requires root origin + let new_rate_limit: u64 = 100; +}: sudo_set_tx_childkey_take_rate_limit(RawOrigin::Root, new_rate_limit) + + benchmark_set_childkey_take { + // Setup + let netuid: u16 = 1; + let tempo: u16 = 1; + let seed: u32 = 1; + let coldkey: T::AccountId = account("Cold", 0, seed); + let hotkey: T::AccountId = account("Hot", 0, seed); + let take: u16 = 1000; // 10% in basis points + + // Initialize the network + Subtensor::::init_new_network(netuid, tempo); + + // Register the hotkey + Subtensor::::set_burn(netuid, 1); + let amount_to_be_staked = 1_000_000u32.into(); + Subtensor::::add_balance_to_coldkey_account(&coldkey, amount_to_be_staked); + assert_ok!(Subtensor::::do_burned_registration(RawOrigin::Signed(coldkey.clone()).into(), netuid, hotkey.clone())); +}: set_childkey_take(RawOrigin::Signed(coldkey), hotkey, netuid, take) + + swap_coldkey { + // Set up initial state + let old_coldkey: T::AccountId = account("old_coldkey", 0, 0); + let new_coldkey: T::AccountId = account("new_coldkey", 0, 0); + let hotkey1: T::AccountId = account("hotkey1", 0, 0); + let netuid = 1u16; + let stake_amount1 = 1000u64; + let stake_amount2 = 2000u64; + let swap_cost = Subtensor::::get_key_swap_cost(); + let free_balance_old = 12345u64 + swap_cost; + let tempo: u16 = 1; + + // Setup initial state + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( + netuid, + block_number, + 3, + &hotkey1, + ); + + let _ = Subtensor::::register( + ::RuntimeOrigin::from(RawOrigin::Signed(old_coldkey.clone())), + netuid, + block_number, + nonce, + work.clone(), + hotkey1.clone(), + old_coldkey.clone(), + ); + + // Add balance to old coldkey + Subtensor::::add_balance_to_coldkey_account( + &old_coldkey, + stake_amount1 + stake_amount2 + free_balance_old, + ); + + // Insert an Identity + let name: Vec = b"The fourth Coolest Identity".to_vec(); + let identity: ChainIdentity = ChainIdentity { + name: name.clone(), + url: vec![], + image: vec![], + discord: vec![], + description: vec![], + additional: vec![], + }; + + Identities::::insert(&old_coldkey, identity); + + // Benchmark setup complete, now execute the extrinsic +}: swap_coldkey(RawOrigin::Root, old_coldkey.clone(), new_coldkey.clone()) + } diff --git a/pallets/subtensor/src/block_step.rs b/pallets/subtensor/src/block_step.rs deleted file mode 100644 index 784332e4e..000000000 --- a/pallets/subtensor/src/block_step.rs +++ /dev/null @@ -1,575 +0,0 @@ -use super::*; -use frame_support::storage::IterableStorageDoubleMap; -use frame_support::storage::IterableStorageMap; -use sp_runtime::Saturating; -use substrate_fixed::types::I110F18; -use substrate_fixed::types::I64F64; -use substrate_fixed::types::I96F32; - -impl Pallet { - /// Executes the necessary operations for each block. - pub fn block_step() -> Result<(), &'static str> { - let block_number: u64 = Self::get_current_block_as_u64(); - log::debug!("block_step for block: {:?} ", block_number); - // --- 1. Adjust difficulties. - Self::adjust_registration_terms_for_networks(); - // --- 2. Calculate per-subnet emissions - match Self::root_epoch(block_number) { - Ok(_) => (), - Err(e) => { - log::trace!("Error while running root epoch: {:?}", e); - } - } - // --- 3. Drains emission tuples ( hotkey, amount ). - Self::drain_emission(block_number); - // --- 4. Generates emission tuples from epoch functions. - Self::generate_emission(block_number); - // Return ok. - Ok(()) - } - - #[allow(clippy::arithmetic_side_effects)] - /// Helper function which returns the number of blocks remaining before we will run the epoch on this - /// network. Networks run their epoch when (block_number + netuid + 1 ) % (tempo + 1) = 0 - /// - pub fn blocks_until_next_epoch(netuid: u16, tempo: u16, block_number: u64) -> u64 { - // tempo | netuid | # first epoch block - // 1 0 0 - // 1 1 1 - // 2 0 1 - // 2 1 0 - // 100 0 99 - // 100 1 98 - // Special case: tempo = 0, the network never runs. - if tempo == 0 { - return 1000; - } - (tempo as u64).saturating_sub( - block_number.saturating_add(netuid as u64).saturating_add(1) - % (tempo as u64).saturating_add(1), - ) - } - - #[allow(clippy::arithmetic_side_effects)] - /// Helper function returns the number of tuples to drain on a particular step based on - /// the remaining tuples to sink and the block number - /// - pub fn tuples_to_drain_this_block( - netuid: u16, - tempo: u16, - block_number: u64, - n_remaining: usize, - ) -> usize { - let blocks_until_epoch: u64 = Self::blocks_until_next_epoch(netuid, tempo, block_number); - if blocks_until_epoch.saturating_div(2) == 0 { - return n_remaining; - } // drain all. - if tempo.saturating_div(2) == 0 { - return n_remaining; - } // drain all - if n_remaining == 0 { - return 0; - } // nothing to drain at all. - // Else return enough tuples to drain all within half the epoch length. - let to_sink_via_tempo: usize = - n_remaining.saturating_div((tempo as usize).saturating_div(2)); - let to_sink_via_blocks_until_epoch: usize = - n_remaining.saturating_div((blocks_until_epoch as usize).saturating_div(2)); - if to_sink_via_tempo > to_sink_via_blocks_until_epoch { - to_sink_via_tempo - } else { - to_sink_via_blocks_until_epoch - } - } - - pub fn get_loaded_emission_tuples(netuid: u16) -> Option> { - LoadedEmission::::get(netuid) - } - - /// Reads from the loaded emission storage which contains lists of pending emission tuples ( hotkey, amount ) - /// and distributes small chunks of them at a time. - /// - pub fn drain_emission(_: u64) { - // --- 1. We iterate across each network. - for (netuid, _) in as IterableStorageMap>::iter() { - let Some(tuples_to_drain) = Self::get_loaded_emission_tuples(netuid) else { - // There are no tuples to emit. - continue; - }; - let mut total_emitted: u64 = 0; - for (hotkey, server_amount, validator_amount) in tuples_to_drain.iter() { - Self::emit_inflation_through_hotkey_account( - hotkey, - *server_amount, - *validator_amount, - ); - total_emitted.saturating_accrue((*server_amount).saturating_add(*validator_amount)); - } - LoadedEmission::::remove(netuid); - TotalIssuance::::put(TotalIssuance::::get().saturating_add(total_emitted)); - } - } - - /// Iterates through networks queues more emission onto their pending storage. - /// If a network has no blocks left until tempo, we run the epoch function and generate - /// more token emission tuples for later draining onto accounts. - /// - pub fn generate_emission(block_number: u64) { - // --- 1. Iterate across each network and add pending emission into stash. - for (netuid, tempo) in as IterableStorageMap>::iter() { - // Skip the root network or subnets with registrations turned off - if netuid == Self::get_root_netuid() { - // Root emission or subnet emission is burned - continue; - } - - // --- 2. Queue the emission due to this network. - let mut new_queued_emission: u64 = Self::get_subnet_emission_value(netuid); - if !Self::is_registration_allowed(netuid) { - new_queued_emission = 0; // No emission for this network if registration is off. - } - - log::debug!( - "generate_emission for netuid: {:?} with tempo: {:?} and emission: {:?}", - netuid, - tempo, - new_queued_emission, - ); - - let subnet_has_owner = SubnetOwner::::contains_key(netuid); - let mut remaining = I96F32::from_num(new_queued_emission); - if subnet_has_owner { - let cut = remaining - .saturating_mul(I96F32::from_num(Self::get_subnet_owner_cut())) - .saturating_div(I96F32::from_num(u16::MAX)); - - remaining = remaining.saturating_sub(cut); - - Self::add_balance_to_coldkey_account( - &Self::get_subnet_owner(netuid), - cut.to_num::(), - ); - - // We are creating tokens here from the coinbase. - Self::coinbase(cut.to_num::()); - } - // --- 5. Add remaining amount to the network's pending emission. - PendingEmission::::mutate(netuid, |queued| { - queued.saturating_accrue(remaining.to_num::()) - }); - log::debug!( - "netuid_i: {:?} queued_emission: +{:?} ", - netuid, - new_queued_emission - ); - - // --- 6. Check to see if this network has reached tempo. - if Self::blocks_until_next_epoch(netuid, tempo, block_number) != 0 { - // --- 3.1 No epoch, increase blocks since last step and continue, - Self::set_blocks_since_last_step( - netuid, - Self::get_blocks_since_last_step(netuid).saturating_add(1), - ); - continue; - } - - // --- 7 This network is at tempo and we are running its epoch. - // First drain the queued emission. - let emission_to_drain: u64 = PendingEmission::::get(netuid); - PendingEmission::::insert(netuid, 0); - - // --- 8. Run the epoch mechanism and return emission tuples for hotkeys in the network. - let emission_tuples_this_block: Vec<(T::AccountId, u64, u64)> = - Self::epoch(netuid, emission_to_drain); - log::debug!( - "netuid_i: {:?} emission_to_drain: {:?} ", - netuid, - emission_to_drain - ); - - // --- 9. Check that the emission does not exceed the allowed total. - let emission_sum: u128 = emission_tuples_this_block - .iter() - .map(|(_account_id, ve, se)| (*ve as u128).saturating_add(*se as u128)) - .sum(); - if emission_sum > emission_to_drain as u128 { - continue; - } // Saftey check. - - // --- 10. Sink the emission tuples onto the already loaded. - let mut concat_emission_tuples: Vec<(T::AccountId, u64, u64)> = - emission_tuples_this_block.clone(); - if let Some(mut current_emission_tuples) = Self::get_loaded_emission_tuples(netuid) { - // 10.a We already have loaded emission tuples, so we concat the new ones. - concat_emission_tuples.append(&mut current_emission_tuples); - } - LoadedEmission::::insert(netuid, concat_emission_tuples); - - // --- 11 Set counters. - Self::set_blocks_since_last_step(netuid, 0); - Self::set_last_mechanism_step_block(netuid, block_number); - } - } - /// Distributes token inflation through the hotkey based on emission. The call ensures that the inflation - /// is distributed onto the accounts in proportion of the stake delegated minus the take. This function - /// is called after an epoch to distribute the newly minted stake according to delegation. - /// - pub fn emit_inflation_through_hotkey_account( - hotkey: &T::AccountId, - server_emission: u64, - validator_emission: u64, - ) { - // --- 1. Check if the hotkey is a delegate. If not, we simply pass the stake through to the - // coldkey - hotkey account as normal. - if !Self::hotkey_is_delegate(hotkey) { - Self::increase_stake_on_hotkey_account( - hotkey, - server_emission.saturating_add(validator_emission), - ); - return; - } - // Then this is a delegate, we distribute validator_emission, then server_emission. - - // --- 2. The hotkey is a delegate. We first distribute a proportion of the validator_emission to the hotkey - // directly as a function of its 'take' - let total_hotkey_stake: u64 = Self::get_total_stake_for_hotkey(hotkey); - let delegate_take: u64 = - Self::calculate_delegate_proportional_take(hotkey, validator_emission); - let validator_emission_minus_take: u64 = validator_emission.saturating_sub(delegate_take); - let mut remaining_validator_emission: u64 = validator_emission_minus_take; - - // 3. -- The remaining emission goes to the owners in proportion to the stake delegated. - for (owning_coldkey_i, stake_i) in - as IterableStorageDoubleMap>::iter_prefix( - hotkey, - ) - { - // --- 4. The emission proportion is remaining_emission * ( stake / total_stake ). - let stake_proportion: u64 = Self::calculate_stake_proportional_emission( - stake_i, - total_hotkey_stake, - validator_emission_minus_take, - ); - Self::increase_stake_on_coldkey_hotkey_account( - &owning_coldkey_i, - hotkey, - stake_proportion, - ); - log::debug!( - "owning_coldkey_i: {:?} hotkey: {:?} emission: +{:?} ", - owning_coldkey_i, - hotkey, - stake_proportion - ); - remaining_validator_emission.saturating_reduce(stake_proportion); - } - - // --- 5. Last increase final account balance of delegate after 4, since 5 will change the stake proportion of - // the delegate and effect calculation in 4. - Self::increase_stake_on_hotkey_account( - hotkey, - delegate_take.saturating_add(remaining_validator_emission), - ); - log::debug!("delkey: {:?} delegate_take: +{:?} ", hotkey, delegate_take); - // Also emit the server_emission to the hotkey - // The server emission is distributed in-full to the delegate owner. - // We do this after 4. for the same reason as above. - Self::increase_stake_on_hotkey_account(hotkey, server_emission); - } - - /// Increases the stake on the cold - hot pairing by increment while also incrementing other counters. - /// This function should be called rather than set_stake under account. - /// - pub fn block_step_increase_stake_on_coldkey_hotkey_account( - coldkey: &T::AccountId, - hotkey: &T::AccountId, - increment: u64, - ) { - TotalColdkeyStake::::mutate(coldkey, |old| old.saturating_add(increment)); - TotalHotkeyStake::::insert( - hotkey, - TotalHotkeyStake::::get(hotkey).saturating_add(increment), - ); - Stake::::insert( - hotkey, - coldkey, - Stake::::get(hotkey, coldkey).saturating_add(increment), - ); - TotalStake::::put(TotalStake::::get().saturating_add(increment)); - } - - /// Decreases the stake on the cold - hot pairing by the decrement while decreasing other counters. - /// - pub fn block_step_decrease_stake_on_coldkey_hotkey_account( - coldkey: &T::AccountId, - hotkey: &T::AccountId, - decrement: u64, - ) { - TotalColdkeyStake::::mutate(coldkey, |old| old.saturating_sub(decrement)); - TotalHotkeyStake::::insert( - hotkey, - TotalHotkeyStake::::get(hotkey).saturating_sub(decrement), - ); - Stake::::insert( - hotkey, - coldkey, - Stake::::get(hotkey, coldkey).saturating_sub(decrement), - ); - TotalStake::::put(TotalStake::::get().saturating_sub(decrement)); - } - - /// Returns emission awarded to a hotkey as a function of its proportion of the total stake. - /// - pub fn calculate_stake_proportional_emission( - stake: u64, - total_stake: u64, - emission: u64, - ) -> u64 { - if total_stake == 0 { - return 0; - }; - let stake_proportion: I64F64 = - I64F64::from_num(stake).saturating_div(I64F64::from_num(total_stake)); - let proportional_emission: I64F64 = - I64F64::from_num(emission).saturating_mul(stake_proportion); - proportional_emission.to_num::() - } - - /// Returns the delegated stake 'take' assigned to this key. (If exists, otherwise 0) - /// - pub fn calculate_delegate_proportional_take(hotkey: &T::AccountId, emission: u64) -> u64 { - if Self::hotkey_is_delegate(hotkey) { - let take_proportion: I64F64 = I64F64::from_num(Delegates::::get(hotkey)) - .saturating_div(I64F64::from_num(u16::MAX)); - let take_emission: I64F64 = take_proportion.saturating_mul(I64F64::from_num(emission)); - take_emission.to_num::() - } else { - 0 - } - } - - /// Adjusts the network difficulties/burns of every active network. Resetting state parameters. - /// - pub fn adjust_registration_terms_for_networks() { - log::debug!("adjust_registration_terms_for_networks"); - - // --- 1. Iterate through each network. - for (netuid, _) in as IterableStorageMap>::iter() { - // --- 2. Pull counters for network difficulty. - let last_adjustment_block: u64 = Self::get_last_adjustment_block(netuid); - let adjustment_interval: u16 = Self::get_adjustment_interval(netuid); - let current_block: u64 = Self::get_current_block_as_u64(); - log::debug!("netuid: {:?} last_adjustment_block: {:?} adjustment_interval: {:?} current_block: {:?}", - netuid, - last_adjustment_block, - adjustment_interval, - current_block - ); - - // --- 3. Check if we are at the adjustment interval for this network. - // If so, we need to adjust the registration difficulty based on target and actual registrations. - if current_block.saturating_sub(last_adjustment_block) >= adjustment_interval as u64 { - log::debug!("interval reached."); - - // --- 4. Get the current counters for this network w.r.t burn and difficulty values. - let current_burn: u64 = Self::get_burn_as_u64(netuid); - let current_difficulty: u64 = Self::get_difficulty_as_u64(netuid); - let registrations_this_interval: u16 = - Self::get_registrations_this_interval(netuid); - let pow_registrations_this_interval: u16 = - Self::get_pow_registrations_this_interval(netuid); - let burn_registrations_this_interval: u16 = - Self::get_burn_registrations_this_interval(netuid); - let target_registrations_this_interval: u16 = - Self::get_target_registrations_per_interval(netuid); - // --- 5. Adjust burn + pow - // There are six cases to consider. A, B, C, D, E, F - if registrations_this_interval > target_registrations_this_interval { - #[allow(clippy::comparison_chain)] - if pow_registrations_this_interval > burn_registrations_this_interval { - // A. There are too many registrations this interval and most of them are pow registrations - // this triggers an increase in the pow difficulty. - // pow_difficulty ++ - Self::set_difficulty( - netuid, - Self::upgraded_difficulty( - netuid, - current_difficulty, - registrations_this_interval, - target_registrations_this_interval, - ), - ); - } else if pow_registrations_this_interval < burn_registrations_this_interval { - // B. There are too many registrations this interval and most of them are burn registrations - // this triggers an increase in the burn cost. - // burn_cost ++ - Self::set_burn( - netuid, - Self::upgraded_burn( - netuid, - current_burn, - registrations_this_interval, - target_registrations_this_interval, - ), - ); - } else { - // F. There are too many registrations this interval and the pow and burn registrations are equal - // this triggers an increase in the burn cost and pow difficulty - // burn_cost ++ - Self::set_burn( - netuid, - Self::upgraded_burn( - netuid, - current_burn, - registrations_this_interval, - target_registrations_this_interval, - ), - ); - // pow_difficulty ++ - Self::set_difficulty( - netuid, - Self::upgraded_difficulty( - netuid, - current_difficulty, - registrations_this_interval, - target_registrations_this_interval, - ), - ); - } - } else { - // Not enough registrations this interval. - #[allow(clippy::comparison_chain)] - if pow_registrations_this_interval > burn_registrations_this_interval { - // C. There are not enough registrations this interval and most of them are pow registrations - // this triggers a decrease in the burn cost - // burn_cost -- - Self::set_burn( - netuid, - Self::upgraded_burn( - netuid, - current_burn, - registrations_this_interval, - target_registrations_this_interval, - ), - ); - } else if pow_registrations_this_interval < burn_registrations_this_interval { - // D. There are not enough registrations this interval and most of them are burn registrations - // this triggers a decrease in the pow difficulty - // pow_difficulty -- - Self::set_difficulty( - netuid, - Self::upgraded_difficulty( - netuid, - current_difficulty, - registrations_this_interval, - target_registrations_this_interval, - ), - ); - } else { - // E. There are not enough registrations this interval and the pow and burn registrations are equal - // this triggers a decrease in the burn cost and pow difficulty - // burn_cost -- - Self::set_burn( - netuid, - Self::upgraded_burn( - netuid, - current_burn, - registrations_this_interval, - target_registrations_this_interval, - ), - ); - // pow_difficulty -- - Self::set_difficulty( - netuid, - Self::upgraded_difficulty( - netuid, - current_difficulty, - registrations_this_interval, - target_registrations_this_interval, - ), - ); - } - } - - // --- 6. Drain all counters for this network for this interval. - Self::set_last_adjustment_block(netuid, current_block); - Self::set_registrations_this_interval(netuid, 0); - Self::set_pow_registrations_this_interval(netuid, 0); - Self::set_burn_registrations_this_interval(netuid, 0); - } else { - log::debug!("interval not reached."); - } - - // --- 7. Drain block registrations for each network. Needed for registration rate limits. - Self::set_registrations_this_block(netuid, 0); - } - } - - /// Calculates the upgraded difficulty by multiplying the current difficulty by the ratio ( reg_actual + reg_target / reg_target + reg_target ) - /// We use I110F18 to avoid any overflows on u64. Also min_difficulty and max_difficulty bound the range. - /// - pub fn upgraded_difficulty( - netuid: u16, - current_difficulty: u64, - registrations_this_interval: u16, - target_registrations_per_interval: u16, - ) -> u64 { - let updated_difficulty: I110F18 = I110F18::from_num(current_difficulty) - .saturating_mul(I110F18::from_num( - registrations_this_interval.saturating_add(target_registrations_per_interval), - )) - .saturating_div(I110F18::from_num( - target_registrations_per_interval.saturating_add(target_registrations_per_interval), - )); - let alpha: I110F18 = I110F18::from_num(Self::get_adjustment_alpha(netuid)) - .saturating_div(I110F18::from_num(u64::MAX)); - let next_value: I110F18 = alpha - .saturating_mul(I110F18::from_num(current_difficulty)) - .saturating_add( - I110F18::from_num(1.0) - .saturating_sub(alpha) - .saturating_mul(updated_difficulty), - ); - if next_value >= I110F18::from_num(Self::get_max_difficulty(netuid)) { - Self::get_max_difficulty(netuid) - } else if next_value <= I110F18::from_num(Self::get_min_difficulty(netuid)) { - return Self::get_min_difficulty(netuid); - } else { - return next_value.to_num::(); - } - } - - /// Calculates the upgraded burn by multiplying the current burn by the ratio ( reg_actual + reg_target / reg_target + reg_target ) - /// We use I110F18 to avoid any overflows on u64. Also min_burn and max_burn bound the range. - /// - pub fn upgraded_burn( - netuid: u16, - current_burn: u64, - registrations_this_interval: u16, - target_registrations_per_interval: u16, - ) -> u64 { - let updated_burn: I110F18 = I110F18::from_num(current_burn) - .saturating_mul(I110F18::from_num( - registrations_this_interval.saturating_add(target_registrations_per_interval), - )) - .saturating_div(I110F18::from_num( - target_registrations_per_interval.saturating_add(target_registrations_per_interval), - )); - let alpha: I110F18 = I110F18::from_num(Self::get_adjustment_alpha(netuid)) - .saturating_div(I110F18::from_num(u64::MAX)); - let next_value: I110F18 = alpha - .saturating_mul(I110F18::from_num(current_burn)) - .saturating_add( - I110F18::from_num(1.0) - .saturating_sub(alpha) - .saturating_mul(updated_burn), - ); - if next_value >= I110F18::from_num(Self::get_max_burn_as_u64(netuid)) { - Self::get_max_burn_as_u64(netuid) - } else if next_value <= I110F18::from_num(Self::get_min_burn_as_u64(netuid)) { - return Self::get_min_burn_as_u64(netuid); - } else { - return next_value.to_num::(); - } - } -} diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs new file mode 100644 index 000000000..3c621155f --- /dev/null +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -0,0 +1,242 @@ +use super::*; +use frame_support::storage::IterableStorageMap; +use substrate_fixed::types::I110F18; + +impl Pallet { + /// Executes the necessary operations for each block. + pub fn block_step() -> Result<(), &'static str> { + let block_number: u64 = Self::get_current_block_as_u64(); + log::debug!("block_step for block: {:?} ", block_number); + // --- 1. Adjust difficulties. + Self::adjust_registration_terms_for_networks(); + // --- 2. Run emission through network. + Self::run_coinbase(); + // Return ok. + Ok(()) + } + + /// Adjusts the network difficulties/burns of every active network. Resetting state parameters. + /// + pub fn adjust_registration_terms_for_networks() { + log::debug!("adjust_registration_terms_for_networks"); + + // --- 1. Iterate through each network. + for (netuid, _) in as IterableStorageMap>::iter() { + // --- 2. Pull counters for network difficulty. + let last_adjustment_block: u64 = Self::get_last_adjustment_block(netuid); + let adjustment_interval: u16 = Self::get_adjustment_interval(netuid); + let current_block: u64 = Self::get_current_block_as_u64(); + log::debug!("netuid: {:?} last_adjustment_block: {:?} adjustment_interval: {:?} current_block: {:?}", + netuid, + last_adjustment_block, + adjustment_interval, + current_block + ); + + // --- 3. Check if we are at the adjustment interval for this network. + // If so, we need to adjust the registration difficulty based on target and actual registrations. + if current_block.saturating_sub(last_adjustment_block) >= adjustment_interval as u64 { + log::debug!("interval reached."); + + // --- 4. Get the current counters for this network w.r.t burn and difficulty values. + let current_burn: u64 = Self::get_burn_as_u64(netuid); + let current_difficulty: u64 = Self::get_difficulty_as_u64(netuid); + let registrations_this_interval: u16 = + Self::get_registrations_this_interval(netuid); + let pow_registrations_this_interval: u16 = + Self::get_pow_registrations_this_interval(netuid); + let burn_registrations_this_interval: u16 = + Self::get_burn_registrations_this_interval(netuid); + let target_registrations_this_interval: u16 = + Self::get_target_registrations_per_interval(netuid); + // --- 5. Adjust burn + pow + // There are six cases to consider. A, B, C, D, E, F + if registrations_this_interval > target_registrations_this_interval { + #[allow(clippy::comparison_chain)] + if pow_registrations_this_interval > burn_registrations_this_interval { + // A. There are too many registrations this interval and most of them are pow registrations + // this triggers an increase in the pow difficulty. + // pow_difficulty ++ + Self::set_difficulty( + netuid, + Self::upgraded_difficulty( + netuid, + current_difficulty, + registrations_this_interval, + target_registrations_this_interval, + ), + ); + } else if pow_registrations_this_interval < burn_registrations_this_interval { + // B. There are too many registrations this interval and most of them are burn registrations + // this triggers an increase in the burn cost. + // burn_cost ++ + Self::set_burn( + netuid, + Self::upgraded_burn( + netuid, + current_burn, + registrations_this_interval, + target_registrations_this_interval, + ), + ); + } else { + // F. There are too many registrations this interval and the pow and burn registrations are equal + // this triggers an increase in the burn cost and pow difficulty + // burn_cost ++ + Self::set_burn( + netuid, + Self::upgraded_burn( + netuid, + current_burn, + registrations_this_interval, + target_registrations_this_interval, + ), + ); + // pow_difficulty ++ + Self::set_difficulty( + netuid, + Self::upgraded_difficulty( + netuid, + current_difficulty, + registrations_this_interval, + target_registrations_this_interval, + ), + ); + } + } else { + // Not enough registrations this interval. + #[allow(clippy::comparison_chain)] + if pow_registrations_this_interval > burn_registrations_this_interval { + // C. There are not enough registrations this interval and most of them are pow registrations + // this triggers a decrease in the burn cost + // burn_cost -- + Self::set_burn( + netuid, + Self::upgraded_burn( + netuid, + current_burn, + registrations_this_interval, + target_registrations_this_interval, + ), + ); + } else if pow_registrations_this_interval < burn_registrations_this_interval { + // D. There are not enough registrations this interval and most of them are burn registrations + // this triggers a decrease in the pow difficulty + // pow_difficulty -- + Self::set_difficulty( + netuid, + Self::upgraded_difficulty( + netuid, + current_difficulty, + registrations_this_interval, + target_registrations_this_interval, + ), + ); + } else { + // E. There are not enough registrations this interval and the pow and burn registrations are equal + // this triggers a decrease in the burn cost and pow difficulty + // burn_cost -- + Self::set_burn( + netuid, + Self::upgraded_burn( + netuid, + current_burn, + registrations_this_interval, + target_registrations_this_interval, + ), + ); + // pow_difficulty -- + Self::set_difficulty( + netuid, + Self::upgraded_difficulty( + netuid, + current_difficulty, + registrations_this_interval, + target_registrations_this_interval, + ), + ); + } + } + + // --- 6. Drain all counters for this network for this interval. + Self::set_last_adjustment_block(netuid, current_block); + Self::set_registrations_this_interval(netuid, 0); + Self::set_pow_registrations_this_interval(netuid, 0); + Self::set_burn_registrations_this_interval(netuid, 0); + } else { + log::debug!("interval not reached."); + } + + // --- 7. Drain block registrations for each network. Needed for registration rate limits. + Self::set_registrations_this_block(netuid, 0); + } + } + + /// Calculates the upgraded difficulty by multiplying the current difficulty by the ratio ( reg_actual + reg_target / reg_target + reg_target ) + /// We use I110F18 to avoid any overflows on u64. Also min_difficulty and max_difficulty bound the range. + /// + pub fn upgraded_difficulty( + netuid: u16, + current_difficulty: u64, + registrations_this_interval: u16, + target_registrations_per_interval: u16, + ) -> u64 { + let updated_difficulty: I110F18 = I110F18::from_num(current_difficulty) + .saturating_mul(I110F18::from_num( + registrations_this_interval.saturating_add(target_registrations_per_interval), + )) + .saturating_div(I110F18::from_num( + target_registrations_per_interval.saturating_add(target_registrations_per_interval), + )); + let alpha: I110F18 = I110F18::from_num(Self::get_adjustment_alpha(netuid)) + .saturating_div(I110F18::from_num(u64::MAX)); + let next_value: I110F18 = alpha + .saturating_mul(I110F18::from_num(current_difficulty)) + .saturating_add( + I110F18::from_num(1.0) + .saturating_sub(alpha) + .saturating_mul(updated_difficulty), + ); + if next_value >= I110F18::from_num(Self::get_max_difficulty(netuid)) { + Self::get_max_difficulty(netuid) + } else if next_value <= I110F18::from_num(Self::get_min_difficulty(netuid)) { + return Self::get_min_difficulty(netuid); + } else { + return next_value.to_num::(); + } + } + + /// Calculates the upgraded burn by multiplying the current burn by the ratio ( reg_actual + reg_target / reg_target + reg_target ) + /// We use I110F18 to avoid any overflows on u64. Also min_burn and max_burn bound the range. + /// + pub fn upgraded_burn( + netuid: u16, + current_burn: u64, + registrations_this_interval: u16, + target_registrations_per_interval: u16, + ) -> u64 { + let updated_burn: I110F18 = I110F18::from_num(current_burn) + .saturating_mul(I110F18::from_num( + registrations_this_interval.saturating_add(target_registrations_per_interval), + )) + .saturating_div(I110F18::from_num( + target_registrations_per_interval.saturating_add(target_registrations_per_interval), + )); + let alpha: I110F18 = I110F18::from_num(Self::get_adjustment_alpha(netuid)) + .saturating_div(I110F18::from_num(u64::MAX)); + let next_value: I110F18 = alpha + .saturating_mul(I110F18::from_num(current_burn)) + .saturating_add( + I110F18::from_num(1.0) + .saturating_sub(alpha) + .saturating_mul(updated_burn), + ); + if next_value >= I110F18::from_num(Self::get_max_burn_as_u64(netuid)) { + Self::get_max_burn_as_u64(netuid) + } else if next_value <= I110F18::from_num(Self::get_min_burn_as_u64(netuid)) { + return Self::get_min_burn_as_u64(netuid); + } else { + return next_value.to_num::(); + } + } +} diff --git a/pallets/subtensor/src/coinbase/mod.rs b/pallets/subtensor/src/coinbase/mod.rs new file mode 100644 index 000000000..ec989d258 --- /dev/null +++ b/pallets/subtensor/src/coinbase/mod.rs @@ -0,0 +1,4 @@ +use super::*; +pub mod block_step; +pub mod root; +pub mod run_coinbase; diff --git a/pallets/subtensor/src/root.rs b/pallets/subtensor/src/coinbase/root.rs similarity index 93% rename from pallets/subtensor/src/root.rs rename to pallets/subtensor/src/coinbase/root.rs index a973b8d7d..067d5855b 100644 --- a/pallets/subtensor/src/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -16,7 +16,7 @@ // DEALINGS IN THE SOFTWARE. use super::*; -use crate::math::*; +use crate::epoch::math::*; use frame_support::dispatch::Pays; use frame_support::storage::{IterableStorageDoubleMap, IterableStorageMap}; use frame_support::traits::Get; @@ -391,6 +391,9 @@ impl Pallet { // --- 9. Calculates the trust of networks. Trust is a sum of all stake with weights > 0. // Trust will have shape k, a score for each subnet. + log::debug!("Subnets:\n{:?}\n", Self::get_all_subnet_netuids()); + log::debug!("N Subnets:\n{:?}\n", Self::get_num_subnets()); + let total_networks = Self::get_num_subnets(); let mut trust = vec![I64F64::from_num(0); total_networks as usize]; let mut total_stake: I64F64 = I64F64::from_num(0); @@ -483,11 +486,7 @@ impl Pallet { // --- 1. Ensure that the call originates from a signed source and retrieve the caller's account ID (coldkey). let coldkey = ensure_signed(origin)?; - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - log::info!( + log::debug!( "do_root_register( coldkey: {:?}, hotkey: {:?} )", coldkey, hotkey @@ -530,7 +529,7 @@ impl Pallet { // --- 12.1.2 Add the new account and make them a member of the Senate. Self::append_neuron(root_netuid, &hotkey, current_block_number); - log::info!("add new neuron: {:?} on uid {:?}", hotkey, subnetwork_uid); + log::debug!("add new neuron: {:?} on uid {:?}", hotkey, subnetwork_uid); } else { // --- 13.1.1 The network is full. Perform replacement. // Find the neuron with the lowest stake value to replace. @@ -563,7 +562,7 @@ impl Pallet { // Replace the neuron account with new information. Self::replace_neuron(root_netuid, lowest_uid, &hotkey, current_block_number); - log::info!( + log::debug!( "replace neuron: {:?} with {:?} on uid {:?}", replaced_hotkey, hotkey, @@ -589,7 +588,7 @@ impl Pallet { RegistrationsThisBlock::::mutate(root_netuid, |val| *val += 1); // --- 16. Log and announce the successful registration. - log::info!( + log::debug!( "RootRegistered(netuid:{:?} uid:{:?} hotkey:{:?})", root_netuid, subnetwork_uid, @@ -623,7 +622,7 @@ impl Pallet { // --- 1. Ensure that the call originates from a signed source and retrieve the caller's account ID (coldkey). let coldkey = ensure_signed(origin)?; - log::info!( + log::debug!( "do_root_register( coldkey: {:?}, hotkey: {:?} )", coldkey, hotkey @@ -653,7 +652,7 @@ impl Pallet { } // --- 5. Log and announce the successful Senate adjustment. - log::info!( + log::debug!( "SenateAdjusted(old_hotkey:{:?} hotkey:{:?})", replaced, hotkey @@ -734,11 +733,7 @@ impl Pallet { ) -> dispatch::DispatchResult { // Check the caller's signature. This is the coldkey of a registered account. let coldkey = ensure_signed(origin)?; - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - log::info!( + log::debug!( "do_set_root_weights( origin:{:?} netuid:{:?}, uids:{:?}, values:{:?})", coldkey, netuid, @@ -839,7 +834,7 @@ impl Pallet { Self::set_last_update_for_uid(netuid, neuron_uid, current_block); // Emit the tracking event. - log::info!( + log::debug!( "RootWeightsSet( netuid:{:?}, neuron_uid:{:?} )", netuid, neuron_uid @@ -859,10 +854,6 @@ impl Pallet { ) -> DispatchResultWithPostInfo { // --- 1. Ensure that the caller has signed with their coldkey. let coldkey = ensure_signed(origin.clone())?; - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); // --- 2. Ensure that the calling coldkey owns the associated hotkey. ensure!( @@ -900,26 +891,29 @@ impl Pallet { .into()) } - /// Facilitates user registration of a new subnetwork. + /// Facilitates user registration of a new subnetwork with subnet identity. /// /// # Args: - /// * 'origin': ('T::RuntimeOrigin'): The calling origin. Must be signed. + /// * `origin` (`T::RuntimeOrigin`): The calling origin. Must be signed. + /// * `identity` (`Option`): Optional identity to be associated with the new subnetwork. /// - /// # Event: - /// * 'NetworkAdded': Emitted when a new network is successfully added. + /// # Events: + /// * `NetworkAdded(netuid, modality)`: Emitted when a new network is successfully added. + /// * `SubnetIdentitySet(netuid)`: Emitted when a custom identity is set for a new subnetwork. + /// * `NetworkRemoved(netuid)`: Emitted when an existing network is removed to make room for the new one. + /// * `SubnetIdentityRemoved(netuid)`: Emitted when the identity of a removed network is also deleted. /// /// # Raises: /// * 'TxRateLimitExceeded': If the rate limit for network registration is exceeded. /// * 'NotEnoughBalanceToStake': If there isn't enough balance to stake for network registration. /// * 'BalanceWithdrawalError': If an error occurs during balance withdrawal for network registration. /// - pub fn user_add_network(origin: T::RuntimeOrigin) -> dispatch::DispatchResult { + pub fn user_add_network( + origin: T::RuntimeOrigin, + identity: Option, + ) -> dispatch::DispatchResult { // --- 0. Ensure the caller is a signed user. let coldkey = ensure_signed(origin)?; - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); // --- 1. Rate limit for network registrations. let current_block = Self::get_current_block_as_u64(); @@ -961,6 +955,11 @@ impl Pallet { Self::remove_network(netuid_to_prune); log::debug!("remove_network: {:?}", netuid_to_prune,); Self::deposit_event(Event::NetworkRemoved(netuid_to_prune)); + + if SubnetIdentities::::take(netuid_to_prune).is_some() { + Self::deposit_event(Event::SubnetIdentityRemoved(netuid_to_prune)); + } + netuid_to_prune } }; @@ -974,21 +973,32 @@ impl Pallet { Self::init_new_network(netuid_to_register, 360); log::debug!("init_new_network: {:?}", netuid_to_register,); - // --- 7. Set netuid storage. + // --- 7. Add the identity if it exists + if let Some(identity_value) = identity { + ensure!( + Self::is_valid_subnet_identity(&identity_value), + Error::::InvalidIdentity + ); + + SubnetIdentities::::insert(netuid_to_register, identity_value); + Self::deposit_event(Event::SubnetIdentitySet(netuid_to_register)); + } + + // --- 8. Set netuid storage. let current_block_number: u64 = Self::get_current_block_as_u64(); NetworkLastRegistered::::set(current_block_number); NetworkRegisteredAt::::insert(netuid_to_register, current_block_number); SubnetOwner::::insert(netuid_to_register, coldkey); - // --- 8. Emit the NetworkAdded event. - log::info!( + // --- 9. Emit the NetworkAdded event. + log::debug!( "NetworkAdded( netuid:{:?}, modality:{:?} )", netuid_to_register, 0 ); Self::deposit_event(Event::NetworkAdded(netuid_to_register, 0)); - // --- 9. Return success. + // --- 10. Return success. Ok(()) } @@ -1005,34 +1015,32 @@ impl Pallet { /// * 'SubNetworkDoesNotExist': If the specified network does not exist. /// * 'NotSubnetOwner': If the caller does not own the specified subnet. /// - pub fn user_remove_network(origin: T::RuntimeOrigin, netuid: u16) -> dispatch::DispatchResult { - // --- 1. Ensure the function caller is a signed user. - let coldkey = ensure_signed(origin)?; - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - - // --- 2. Ensure this subnet exists. + pub fn user_remove_network(coldkey: T::AccountId, netuid: u16) -> dispatch::DispatchResult { + // --- 1. Ensure this subnet exists. ensure!( Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist ); - // --- 3. Ensure the caller owns this subnet. + // --- 2. Ensure the caller owns this subnet. ensure!( SubnetOwner::::get(netuid) == coldkey, Error::::NotSubnetOwner ); - // --- 4. Explicitly erase the network and all its parameters. + // --- 4. Remove the subnet identity if it exists. + if SubnetIdentities::::take(netuid).is_some() { + Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); + } + + // --- 5. Explicitly erase the network and all its parameters. Self::remove_network(netuid); - // --- 5. Emit the NetworkRemoved event. - log::info!("NetworkRemoved( netuid:{:?} )", netuid); + // --- 6. Emit the NetworkRemoved event. + log::debug!("NetworkRemoved( netuid:{:?} )", netuid); Self::deposit_event(Event::NetworkRemoved(netuid)); - // --- 6. Return success. + // --- 7. Return success. Ok(()) } @@ -1051,7 +1059,7 @@ impl Pallet { NetworkModality::::insert(netuid, 0); // --- 5. Increase total network count. - TotalNetworks::::mutate(|n| n.saturating_inc()); + TotalNetworks::::mutate(|n| *n = n.saturating_add(1)); // --- 6. Set all default values **explicitly**. Self::set_network_registration_allowed(netuid, true); @@ -1118,8 +1126,8 @@ impl Pallet { /// Removes a network (identified by netuid) and all associated parameters. /// /// This function is responsible for cleaning up all the data associated with a network. - /// It ensures that all the storage values related to the network are removed, and any - /// reserved balance is returned to the network owner. + /// It ensures that all the storage values related to the network are removed, any + /// reserved balance is returned to the network owner, and the subnet identity is removed if it exists. /// /// # Args: /// * 'netuid': ('u16'): The unique identifier of the network to be removed. @@ -1127,11 +1135,10 @@ impl Pallet { /// # Note: /// This function does not emit any events, nor does it raise any errors. It silently /// returns if any internal checks fail. - /// pub fn remove_network(netuid: u16) { // --- 1. Return balance to subnet owner. - let owner_coldkey = SubnetOwner::::get(netuid); - let reserved_amount = Self::get_subnet_locked_balance(netuid); + let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); + let reserved_amount: u64 = Self::get_subnet_locked_balance(netuid); // --- 2. Remove network count. SubnetworkN::::remove(netuid); @@ -1142,13 +1149,13 @@ impl Pallet { // --- 4. Remove netuid from added networks. NetworksAdded::::remove(netuid); - // --- 6. Decrement the network counter. - TotalNetworks::::mutate(|n| n.saturating_dec()); + // --- 5. Decrement the network counter. + TotalNetworks::::mutate(|n: &mut u16| *n = n.saturating_sub(1)); - // --- 7. Remove various network-related storages. + // --- 6. Remove various network-related storages. NetworkRegisteredAt::::remove(netuid); - // --- 8. Remove incentive mechanism memory. + // --- 7. Remove incentive mechanism memory. let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); @@ -1163,7 +1170,7 @@ impl Pallet { ) { // Create a new vector to hold modified weights. - let mut modified_weights = weights_i.clone(); + let mut modified_weights: Vec<(u16, u16)> = weights_i.clone(); // Iterate over each weight entry to potentially update it. for (subnet_id, weight) in modified_weights.iter_mut() { if subnet_id == &netuid { @@ -1205,6 +1212,12 @@ impl Pallet { Self::add_balance_to_coldkey_account(&owner_coldkey, reserved_amount); Self::set_subnet_locked_balance(netuid, 0); SubnetOwner::::remove(netuid); + + // --- 13. Remove subnet identity if it exists. + if SubnetIdentities::::contains_key(netuid) { + SubnetIdentities::::remove(netuid); + Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); + } } #[allow(clippy::arithmetic_side_effects)] @@ -1291,7 +1304,7 @@ impl Pallet { } }); - log::info!("Netuids Order: {:?}", netuids); + log::debug!("Netuids Order: {:?}", netuids); match netuids.last() { Some(netuid) => *netuid, diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs new file mode 100644 index 000000000..723edc423 --- /dev/null +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -0,0 +1,385 @@ +use super::*; +use substrate_fixed::types::I64F64; +use substrate_fixed::types::I96F32; + +impl Pallet { + /// The `coinbase` function performs a four-part emission distribution process involving + /// subnets, epochs, hotkeys, and nominators. + // It is divided into several steps, each handling a specific part of the distribution: + + // Step 1: Compute the block-wise emission for each subnet. + // This involves calculating how much (TAO) should be emitted into each subnet using the + // root epoch function. + + // Step 2: Accumulate the subnet block emission. + // After calculating the block-wise emission, these values are accumulated to keep track + // of how much each subnet should emit before the next distribution phase. This accumulation + // is a running total that gets updated each block. + + // Step 3: Distribute the accumulated emissions through epochs. + // Subnets periodically distribute their accumulated emissions to hotkeys (active validators/miners) + // in the network on a `tempo` --- the time between epochs. This step runs Yuma consensus to + // determine how emissions are split among hotkeys based on their contributions and roles. + // The accumulation of hotkey emissions is done through the `accumulate_hotkey_emission` function. + // The function splits the rewards for a hotkey amongst itself and its `parents`. The parents are + // the hotkeys that are delegating their stake to the hotkey. + + // Step 4: Further distribute emissions from hotkeys to nominators. + // Finally, the emissions received by hotkeys are further distributed to their nominators, + // who are stakeholders that support the hotkeys. + pub fn run_coinbase() { + // --- 0. Get current block. + let current_block: u64 = Self::get_current_block_as_u64(); + log::debug!("Current block: {:?}", current_block); + + // --- 1. Get all netuids. + let subnets: Vec = Self::get_all_subnet_netuids(); + log::debug!("All subnet netuids: {:?}", subnets); + + // --- 2. Run the root epoch function which computes the block emission for each subnet. + // coinbase --> root() --> subnet_block_emission + match Self::root_epoch(current_block) { + Ok(_) => log::debug!("Root epoch run successfully for block: {:?}", current_block), + Err(e) => { + log::trace!("Did not run epoch with: {:?}", e); + } + } + + // --- 3. Drain the subnet block emission and accumulate it as subnet emission, which increases until the tempo is reached in #4. + // subnet_blockwise_emission -> subnet_pending_emission + for netuid in subnets.clone().iter() { + // --- 3.1 Get the network's block-wise emission amount. + // This value is newly minted TAO which has not reached staking accounts yet. + let subnet_blockwise_emission: u64 = EmissionValues::::get(*netuid); + log::debug!( + "Subnet block-wise emission for netuid {:?}: {:?}", + *netuid, + subnet_blockwise_emission + ); + + // --- 3.2 Accumulate the subnet emission on the subnet. + PendingEmission::::mutate(*netuid, |subnet_emission| { + *subnet_emission = subnet_emission.saturating_add(subnet_blockwise_emission); + log::debug!( + "Updated subnet emission for netuid {:?}: {:?}", + *netuid, + *subnet_emission + ); + }); + } + + // --- 4. Drain the accumulated subnet emissions, pass them through the epoch(). + // Before accumulating on the hotkeys the function redistributes the emission towards hotkey parents. + // subnet_emission --> epoch() --> hotkey_emission --> (hotkey + parent hotkeys) + for netuid in subnets.clone().iter() { + // --- 4.1 Check to see if the subnet should run its epoch. + if Self::should_run_epoch(*netuid, current_block) { + // --- 4.2 Drain the subnet emission. + let mut subnet_emission: u64 = PendingEmission::::get(*netuid); + PendingEmission::::insert(*netuid, 0); + log::debug!( + "Drained subnet emission for netuid {:?}: {:?}", + *netuid, + subnet_emission + ); + + // --- 4.3 Set last step counter. + Self::set_blocks_since_last_step(*netuid, 0); + Self::set_last_mechanism_step_block(*netuid, current_block); + + // --- 4.4 Distribute owner take. + if SubnetOwner::::contains_key(netuid) { + // Does the subnet have an owner? + + // --- 4.4.1 Compute the subnet owner cut. + let owner_cut: I96F32 = I96F32::from_num(subnet_emission).saturating_mul( + I96F32::from_num(Self::get_subnet_owner_cut()) + .saturating_div(I96F32::from_num(u16::MAX)), + ); + + // --- 4.4.2 Remove the cut from the subnet emission + subnet_emission = subnet_emission.saturating_sub(owner_cut.to_num::()); + + // --- 4.4.3 Add the cut to the balance of the owner + Self::add_balance_to_coldkey_account( + &Self::get_subnet_owner(*netuid), + owner_cut.to_num::(), + ); + + // --- 4.4.4 Increase total issuance on the chain. + Self::coinbase(owner_cut.to_num::()); + } + + // 4.3 Pass emission through epoch() --> hotkey emission. + let hotkey_emission: Vec<(T::AccountId, u64, u64)> = + Self::epoch(*netuid, subnet_emission); + log::debug!( + "Hotkey emission results for netuid {:?}: {:?}", + *netuid, + hotkey_emission + ); + + // 4.4 Accumulate the tuples on hotkeys: + for (hotkey, mining_emission, validator_emission) in hotkey_emission { + // 4.5 Accumulate the emission on the hotkey and parent hotkeys. + Self::accumulate_hotkey_emission( + &hotkey, + *netuid, + validator_emission, // Amount received from validating + mining_emission, // Amount recieved from mining. + ); + log::debug!("Accumulated emissions on hotkey {:?} for netuid {:?}: mining {:?}, validator {:?}", hotkey, *netuid, mining_emission, validator_emission); + } + } else { + // No epoch, increase blocks since last step and continue + Self::set_blocks_since_last_step( + *netuid, + Self::get_blocks_since_last_step(*netuid).saturating_add(1), + ); + log::debug!("Tempo not reached for subnet: {:?}", *netuid); + } + } + + // --- 5. Drain the accumulated hotkey emissions through to the nominators. + // The hotkey takes a proportion of the emission, the remainder is drained through to the nominators. + // We keep track of the last stake increase event for accounting purposes. + // hotkeys --> nominators. + let emission_tempo: u64 = Self::get_hotkey_emission_tempo(); + for (hotkey, hotkey_emission) in PendingdHotkeyEmission::::iter() { + // Check for zeros. + // remove zero values. + if hotkey_emission == 0 { + continue; + } + + // --- 5.1 Check if we should drain the hotkey emission on this block. + if Self::should_drain_hotkey(&hotkey, current_block, emission_tempo) { + // --- 5.2 Drain the hotkey emission and distribute it to nominators. + let total_new_tao: u64 = + Self::drain_hotkey_emission(&hotkey, hotkey_emission, current_block); + log::debug!( + "Drained hotkey emission for hotkey {:?} on block {:?}: {:?}", + hotkey, + current_block, + hotkey_emission + ); + + // --- 5.3 Increase total issuance on the chain. + Self::coinbase(total_new_tao); + log::debug!("Increased total issuance by {:?}", total_new_tao); + } + } + } + + /// Accumulates the mining and validator emissions on a hotkey and distributes the validator emission among its parents. + /// + /// This function is responsible for accumulating the mining and validator emissions associated with a hotkey onto a hotkey. + /// It first calculates the total stake of the hotkey, considering the stakes contributed by its parents and reduced by its children. + /// It then retrieves the list of parents of the hotkey and distributes the validator emission proportionally based on the stake contributed by each parent. + /// The remaining validator emission, after distribution to the parents, along with the mining emission, is then added to the hotkey's own accumulated emission. + /// + /// # Arguments + /// * `hotkey` - The account ID of the hotkey for which emissions are being calculated. + /// * `netuid` - The unique identifier of the network to which the hotkey belongs. + /// * `mining_emission` - The amount of mining emission allocated to the hotkey. + /// * `validator_emission` - The amount of validator emission allocated to the hotkey. + /// + pub fn accumulate_hotkey_emission( + hotkey: &T::AccountId, + netuid: u16, + validating_emission: u64, + mining_emission: u64, + ) { + // --- 1. First, calculate the hotkey's share of the emission. + let take_proportion: I64F64 = I64F64::from_num(Self::get_childkey_take(hotkey, netuid)) + .saturating_div(I64F64::from_num(u16::MAX)); + let hotkey_take: u64 = take_proportion + .saturating_mul(I64F64::from_num(validating_emission)) + .to_num::(); + // NOTE: Only the validation emission should be split amongst parents. + + // --- 2. Compute the remaining emission after the hotkey's share is deducted. + let emission_minus_take: u64 = validating_emission.saturating_sub(hotkey_take); + + // --- 3. Track the remaining emission for accounting purposes. + let mut remaining_emission: u64 = emission_minus_take; + + // --- 4. Calculate the total stake of the hotkey, adjusted by the stakes of parents and children. + // Parents contribute to the stake, while children reduce it. + // If this value is zero, no distribution to anyone is necessary. + let total_hotkey_stake: u64 = Self::get_stake_for_hotkey_on_subnet(hotkey, netuid); + if total_hotkey_stake != 0 { + // --- 5. If the total stake is not zero, iterate over each parent to determine their contribution to the hotkey's stake, + // and calculate their share of the emission accordingly. + for (proportion, parent) in Self::get_parents(hotkey, netuid) { + // --- 5.1 Retrieve the parent's stake. This is the raw stake value including nominators. + let parent_stake: u64 = Self::get_total_stake_for_hotkey(&parent); + + // --- 5.2 Calculate the portion of the hotkey's total stake contributed by this parent. + // Then, determine the parent's share of the remaining emission. + let stake_from_parent: I96F32 = I96F32::from_num(parent_stake).saturating_mul( + I96F32::from_num(proportion).saturating_div(I96F32::from_num(u64::MAX)), + ); + let proportion_from_parent: I96F32 = + stake_from_parent.saturating_div(I96F32::from_num(total_hotkey_stake)); + let parent_emission_take: u64 = proportion_from_parent + .saturating_mul(I96F32::from_num(emission_minus_take)) + .to_num::(); + + // --- 5.5. Accumulate emissions for the parent hotkey. + PendingdHotkeyEmission::::mutate(parent, |parent_accumulated| { + *parent_accumulated = parent_accumulated.saturating_add(parent_emission_take) + }); + + // --- 5.6. Subtract the parent's share from the remaining emission for this hotkey. + remaining_emission = remaining_emission.saturating_sub(parent_emission_take); + } + } + + // --- 6. Add the remaining emission plus the hotkey's initial take to the pending emission for this hotkey. + PendingdHotkeyEmission::::mutate(hotkey, |hotkey_pending| { + *hotkey_pending = hotkey_pending.saturating_add( + remaining_emission + .saturating_add(hotkey_take) + .saturating_add(mining_emission), + ) + }); + } + + //. --- 4. Drains the accumulated hotkey emission through to the nominators. The hotkey takes a proportion of the emission. + /// The remainder is drained through to the nominators keeping track of the last stake increase event to ensure that the hotkey does not + /// gain more emission than it's stake since the last drain. + /// hotkeys --> nominators. + /// + /// 1. It resets the accumulated emissions for the hotkey to zero. + /// 4. It calculates the total stake for the hotkey and determines the hotkey's own take from the emissions based on its delegation status. + /// 5. It then calculates the remaining emissions after the hotkey's take and distributes this remaining amount proportionally among the hotkey's nominators. + /// 6. Each nominator's share of the emissions is added to their stake, but only if their stake was not manually increased since the last emission drain. + /// 7. Finally, the hotkey's own take and any undistributed emissions are added to the hotkey's total stake. + /// + /// This function ensures that emissions are fairly distributed according to stake proportions and delegation agreements, and it updates the necessary records to reflect these changes. + pub fn drain_hotkey_emission(hotkey: &T::AccountId, emission: u64, block_number: u64) -> u64 { + // --- 0. For accounting purposes record the total new added stake. + let mut total_new_tao: u64 = 0; + + // --- 1.0 Drain the hotkey emission. + PendingdHotkeyEmission::::insert(hotkey, 0); + + // --- 2 Retrieve the last time this hotkey's emissions were drained. + let last_emission_drain: u64 = LastHotkeyEmissionDrain::::get(hotkey); + + // --- 3 Update the block value to the current block number. + LastHotkeyEmissionDrain::::insert(hotkey, block_number); + + // --- 4 Retrieve the total stake for the hotkey from all nominations. + let total_hotkey_stake: u64 = Self::get_total_stake_for_hotkey(hotkey); + + // --- 5 Calculate the emission take for the hotkey. + let take_proportion: I64F64 = I64F64::from_num(Delegates::::get(hotkey)) + .saturating_div(I64F64::from_num(u16::MAX)); + let hotkey_take: u64 = + (take_proportion.saturating_mul(I64F64::from_num(emission))).to_num::(); + + // --- 6 Compute the remaining emission after deducting the hotkey's take. + let emission_minus_take: u64 = emission.saturating_sub(hotkey_take); + + // --- 7 Calculate the remaining emission after the hotkey's take. + let mut remainder: u64 = emission_minus_take; + + // --- 8 Iterate over each nominator and get all viable stake. + let mut total_viable_nominator_stake: u64 = total_hotkey_stake; + for (nominator, nominator_stake) in Stake::::iter_prefix(hotkey) { + if LastAddStakeIncrease::::get(hotkey, nominator) > last_emission_drain { + total_viable_nominator_stake = + total_viable_nominator_stake.saturating_sub(nominator_stake); + } + } + + // --- 9 Iterate over each nominator. + if total_viable_nominator_stake != 0 { + for (nominator, nominator_stake) in Stake::::iter_prefix(hotkey) { + // --- 10 Check if the stake was manually increased by the user since the last emission drain for this hotkey. + // If it was, skip this nominator as they will not receive their proportion of the emission. + if LastAddStakeIncrease::::get(hotkey, nominator.clone()) > last_emission_drain { + continue; + } + + // --- 11 Calculate this nominator's share of the emission. + let nominator_emission: I64F64 = I64F64::from_num(emission_minus_take) + .saturating_mul(I64F64::from_num(nominator_stake)) + .checked_div(I64F64::from_num(total_viable_nominator_stake)) + .unwrap_or(I64F64::from_num(0)); + + // --- 12 Increase the stake for the nominator. + Self::increase_stake_on_coldkey_hotkey_account( + &nominator, + hotkey, + nominator_emission.to_num::(), + ); + + // --- 13* Record event and Subtract the nominator's emission from the remainder. + total_new_tao = total_new_tao.saturating_add(nominator_emission.to_num::()); + remainder = remainder.saturating_sub(nominator_emission.to_num::()); + } + } + + // --- 14 Finally, add the stake to the hotkey itself, including its take and the remaining emission. + let hotkey_new_tao: u64 = hotkey_take.saturating_add(remainder); + Self::increase_stake_on_hotkey_account(hotkey, hotkey_new_tao); + + // --- 15 Record new tao creation event and return the amount created. + total_new_tao = total_new_tao.saturating_add(hotkey_new_tao); + total_new_tao + } + + /////////////// + /// Helpers /// + /////////////// + /// Determines whether the hotkey emission should be drained based on the current block and index. + /// + /// # Arguments + /// * `hotkey_i` - The hotkey identifier. + /// * `index` - The index of the hotkey in the iterable storage. + /// * `block` - The current block number. + /// + /// # Returns + /// * `bool` - True if the hotkey emission should be drained, false otherwise. + pub fn should_drain_hotkey(hotkey: &T::AccountId, block: u64, emit_tempo: u64) -> bool { + let hotkey_idx: u64 = Self::hash_hotkey_to_u64(hotkey); + block.rem_euclid(emit_tempo.saturating_add(1)) + == hotkey_idx.rem_euclid(emit_tempo.saturating_add(1)) + } + + /// Checks if the epoch should run for a given subnet based on the current block. + /// + /// # Arguments + /// * `netuid` - The unique identifier of the subnet. + /// + /// # Returns + /// * `bool` - True if the epoch should run, false otherwise. + pub fn should_run_epoch(netuid: u16, current_block: u64) -> bool { + Self::blocks_until_next_epoch(netuid, Self::get_tempo(netuid), current_block) == 0 + } + + /// Helper function which returns the number of blocks remaining before we will run the epoch on this + /// network. Networks run their epoch when (block_number + netuid + 1 ) % (tempo + 1) = 0 + /// tempo | netuid | # first epoch block + /// 1 0 0 + /// 1 1 1 + /// 2 0 1 + /// 2 1 0 + /// 100 0 99 + /// 100 1 98 + /// Special case: tempo = 0, the network never runs. + /// + pub fn blocks_until_next_epoch(netuid: u16, tempo: u16, block_number: u64) -> u64 { + if tempo == 0 { + return u64::MAX; + } + let netuid_plus_one = (netuid as u64).saturating_add(1); + let block_plus_netuid = block_number.saturating_add(netuid_plus_one); + let tempo_plus_one = (tempo as u64).saturating_add(1); + let remainder = block_plus_netuid.rem_euclid(tempo_plus_one); + (tempo as u64).saturating_sub(remainder) + } +} diff --git a/pallets/subtensor/src/math.rs b/pallets/subtensor/src/epoch/math.rs similarity index 100% rename from pallets/subtensor/src/math.rs rename to pallets/subtensor/src/epoch/math.rs diff --git a/pallets/subtensor/src/epoch/mod.rs b/pallets/subtensor/src/epoch/mod.rs new file mode 100644 index 000000000..3b22f940e --- /dev/null +++ b/pallets/subtensor/src/epoch/mod.rs @@ -0,0 +1,3 @@ +use super::*; +pub mod math; +pub mod run_epoch; diff --git a/pallets/subtensor/src/epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs similarity index 91% rename from pallets/subtensor/src/epoch.rs rename to pallets/subtensor/src/epoch/run_epoch.rs index 12c407efa..d919c6dbb 100644 --- a/pallets/subtensor/src/epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1,10 +1,91 @@ use super::*; -use crate::math::*; +use crate::epoch::math::*; use frame_support::IterableStorageDoubleMap; use sp_std::vec; use substrate_fixed::types::{I32F32, I64F64, I96F32}; impl Pallet { + /// Calculates the total stake held by a hotkey on the network, considering child/parent relationships. + /// + /// This function performs the following steps: + /// 1. Checks for self-loops in the delegation graph. + /// 2. Retrieves the initial stake of the hotkey. + /// 3. Calculates the stake allocated to children. + /// 4. Calculates the stake received from parents. + /// 5. Computes the final stake by adjusting the initial stake with child and parent contributions. + /// + /// # Arguments + /// * `hotkey` - AccountId of the hotkey whose total network stake is to be calculated. + /// * `netuid` - Network unique identifier specifying the network context. + /// + /// # Returns + /// * `u64` - The total stake for the hotkey on the network after considering the stakes + /// from children and parents. + /// + /// # Note + /// This function now includes a check for self-loops in the delegation graph using the + /// `dfs_check_self_loops` method. However, it currently only logs warnings for detected loops + /// and does not alter the stake calculation based on these findings. + /// + /// # Panics + /// This function does not explicitly panic, but underlying arithmetic operations + /// use saturating arithmetic to prevent overflows. + /// + pub fn get_stake_for_hotkey_on_subnet(hotkey: &T::AccountId, netuid: u16) -> u64 { + // Retrieve the initial total stake for the hotkey without any child/parent adjustments. + let initial_stake: u64 = Self::get_total_stake_for_hotkey(hotkey); + log::debug!("Initial stake: {:?}", initial_stake); + let mut stake_to_children: u64 = 0; + let mut stake_from_parents: u64 = 0; + + // Retrieve lists of parents and children from storage, based on the hotkey and network ID. + let parents: Vec<(u64, T::AccountId)> = Self::get_parents(hotkey, netuid); + let children: Vec<(u64, T::AccountId)> = Self::get_children(hotkey, netuid); + + // Iterate over children to calculate the total stake allocated to them. + for (proportion, _) in children { + // Calculate the stake proportion allocated to the child based on the initial stake. + let normalized_proportion: I96F32 = + I96F32::from_num(proportion).saturating_div(I96F32::from_num(u64::MAX)); + let stake_proportion_to_child: I96F32 = + I96F32::from_num(initial_stake).saturating_mul(normalized_proportion); + + // Accumulate the total stake given to children. + stake_to_children = + stake_to_children.saturating_add(stake_proportion_to_child.to_num::()); + } + + // Iterate over parents to calculate the total stake received from them. + for (proportion, parent) in parents { + // Retrieve the parent's total stake. + let parent_stake: u64 = Self::get_total_stake_for_hotkey(&parent); + // Calculate the stake proportion received from the parent. + let normalized_proportion: I96F32 = + I96F32::from_num(proportion).saturating_div(I96F32::from_num(u64::MAX)); + let stake_proportion_from_parent: I96F32 = + I96F32::from_num(parent_stake).saturating_mul(normalized_proportion); + + // Accumulate the total stake received from parents. + stake_from_parents = + stake_from_parents.saturating_add(stake_proportion_from_parent.to_num::()); + } + + // Calculate the final stake for the hotkey by adjusting the initial stake with the stakes + // to/from children and parents. + let mut finalized_stake: u64 = initial_stake + .saturating_sub(stake_to_children) + .saturating_add(stake_from_parents); + + // get the max stake for the network + let max_stake = Self::get_network_max_stake(netuid); + + // Return the finalized stake value for the hotkey, but capped at the max stake. + finalized_stake = finalized_stake.min(max_stake); + + // Return the finalized stake value for the hotkey. + finalized_stake + } + /// Calculates reward consensus and returns the emissions for uids/hotkeys in a given `netuid`. /// (Dense version used only for testing purposes.) #[allow(clippy::indexing_slicing)] @@ -67,7 +148,8 @@ impl Pallet { // Access network stake as normalized vector. let mut stake_64: Vec = vec![I64F64::from_num(0.0); n as usize]; for (uid_i, hotkey) in &hotkeys { - stake_64[*uid_i as usize] = I64F64::from_num(Self::get_total_stake_for_hotkey(hotkey)); + stake_64[*uid_i as usize] = + I64F64::from_num(Self::get_stake_for_hotkey_on_subnet(hotkey, netuid)); } inplace_normalize_64(&mut stake_64); let stake: Vec = vec_fixed64_to_fixed32(stake_64); @@ -265,6 +347,10 @@ impl Pallet { // == Value storage == // =================== let cloned_emission: Vec = combined_emission.clone(); + let cloned_stake_weight: Vec = stake + .iter() + .map(|xi| fixed_proportion_to_u16(*xi)) + .collect::>(); let cloned_ranks: Vec = ranks .iter() .map(|xi| fixed_proportion_to_u16(*xi)) @@ -290,6 +376,7 @@ impl Pallet { .iter() .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); + StakeWeight::::insert(netuid, cloned_stake_weight.clone()); Active::::insert(netuid, active.clone()); Emission::::insert(netuid, cloned_emission); Rank::::insert(netuid, cloned_ranks); @@ -395,7 +482,8 @@ impl Pallet { // Access network stake as normalized vector. let mut stake_64: Vec = vec![I64F64::from_num(0.0); n as usize]; for (uid_i, hotkey) in &hotkeys { - stake_64[*uid_i as usize] = I64F64::from_num(Self::get_total_stake_for_hotkey(hotkey)); + stake_64[*uid_i as usize] = + I64F64::from_num(Self::get_stake_for_hotkey_on_subnet(hotkey, netuid)); } log::trace!("Stake : {:?}", &stake_64); inplace_normalize_64(&mut stake_64); @@ -631,6 +719,10 @@ impl Pallet { // =================== // == Value storage == // =================== + let cloned_stake_weight: Vec = stake + .iter() + .map(|xi| fixed_proportion_to_u16(*xi)) + .collect::>(); let cloned_emission: Vec = combined_emission.clone(); let cloned_ranks: Vec = ranks .iter() @@ -657,6 +749,7 @@ impl Pallet { .iter() .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); + StakeWeight::::insert(netuid, cloned_stake_weight.clone()); Active::::insert(netuid, active.clone()); Emission::::insert(netuid, cloned_emission); Rank::::insert(netuid, cloned_ranks); @@ -1215,7 +1308,7 @@ impl Pallet { AlphaValues::::insert(netuid, (alpha_low, alpha_high)); - log::info!( + log::debug!( "AlphaValuesSet( netuid: {:?}, AlphaLow: {:?}, AlphaHigh: {:?} ) ", netuid, alpha_low, diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 2edcb8d50..2985736c8 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -19,6 +19,7 @@ use codec::{Decode, Encode}; use frame_support::sp_runtime::transaction_validity::InvalidTransaction; use frame_support::sp_runtime::transaction_validity::ValidTransaction; use pallet_balances::Call as BalancesCall; +// use pallet_scheduler as Scheduler; use scale_info::TypeInfo; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, @@ -35,53 +36,56 @@ mod benchmarks; // ========================= // ==== Pallet Imports ===== // ========================= -mod block_step; -mod epoch; -mod errors; -mod events; -pub mod math; -mod registration; -mod root; -mod serving; -mod staking; -mod swap; -mod uids; -mod utils; -mod weights; - -pub mod delegate_info; -pub mod neuron_info; -pub mod stake_info; -pub mod subnet_info; +pub mod coinbase; +pub mod epoch; +pub mod macros; +pub mod migrations; +pub mod rpc_info; +pub mod staking; +pub mod subnets; +pub mod swap; +pub mod utils; +use crate::utils::rate_limiting::TransactionType; +use macros::{config, dispatches, errors, events, genesis, hooks}; // apparently this is stabilized since rust 1.36 extern crate alloc; -pub mod migration; #[deny(missing_docs)] #[import_section(errors::errors)] #[import_section(events::events)] +#[import_section(dispatches::dispatches)] +#[import_section(genesis::genesis)] +#[import_section(hooks::hooks)] +#[import_section(config::config)] #[frame_support::pallet] pub mod pallet { - + use crate::migrations; use frame_support::{ dispatch::GetDispatchInfo, pallet_prelude::{DispatchResult, StorageMap, ValueQuery, *}, - traits::{tokens::fungible, UnfilteredDispatchable}, + traits::{ + tokens::fungible, OriginTrait, QueryPreimage, StorePreimage, UnfilteredDispatchable, + }, }; use frame_system::pallet_prelude::*; use sp_core::H256; - use sp_runtime::traits::TrailingZeroInput; + use sp_runtime::traits::{Dispatchable, TrailingZeroInput}; use sp_std::vec; use sp_std::vec::Vec; - use subtensor_macros::freeze_struct; - #[cfg(not(feature = "std"))] use alloc::boxed::Box; #[cfg(feature = "std")] use sp_std::prelude::Box; + /// Origin for the pallet + pub type PalletsOriginOf = + <::RuntimeOrigin as OriginTrait>::PalletsOrigin; + + /// Call type for the pallet + pub type CallOf = ::RuntimeCall; + /// Tracks version for migrations. Should be monotonic with respect to the /// order of migrations. (i.e. always increasing) const STORAGE_VERSION: StorageVersion = StorageVersion::new(7); @@ -94,1031 +98,1033 @@ pub mod pallet { #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); - /// Configure the pallet by specifying the parameters and types on which it depends. - #[pallet::config] - pub trait Config: frame_system::Config { - /// Because this pallet emits events, it depends on the runtime's definition of an event. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// A sudo-able call. - type SudoRuntimeCall: Parameter - + UnfilteredDispatchable - + GetDispatchInfo; - - /// Origin checking for council majority - type CouncilOrigin: EnsureOrigin; - - /// Currency type that will be used to place deposits on neurons - type Currency: fungible::Balanced - + fungible::Mutate; - - /// Senate members with members management functions. - type SenateMembers: crate::MemberManagement; - - /// Interface to allow other pallets to control who can register identities - type TriumvirateInterface: crate::CollectiveInterface; - - /// ================================= - /// ==== Initial Value Constants ==== - /// ================================= - - /// Initial currency issuance. - #[pallet::constant] - type InitialIssuance: Get; - /// Initial min allowed weights setting. - #[pallet::constant] - type InitialMinAllowedWeights: Get; - /// Initial Emission Ratio. - #[pallet::constant] - type InitialEmissionValue: Get; - /// Initial max weight limit. - #[pallet::constant] - type InitialMaxWeightsLimit: Get; - /// Tempo for each network. - #[pallet::constant] - type InitialTempo: Get; - /// Initial Difficulty. - #[pallet::constant] - type InitialDifficulty: Get; - /// Initial Max Difficulty. - #[pallet::constant] - type InitialMaxDifficulty: Get; - /// Initial Min Difficulty. - #[pallet::constant] - type InitialMinDifficulty: Get; - /// Initial RAO Recycled. - #[pallet::constant] - type InitialRAORecycledForRegistration: Get; - /// Initial Burn. - #[pallet::constant] - type InitialBurn: Get; - /// Initial Max Burn. - #[pallet::constant] - type InitialMaxBurn: Get; - /// Initial Min Burn. - #[pallet::constant] - type InitialMinBurn: Get; - /// Initial adjustment interval. - #[pallet::constant] - type InitialAdjustmentInterval: Get; - /// Initial bonds moving average. - #[pallet::constant] - type InitialBondsMovingAverage: Get; - /// Initial target registrations per interval. - #[pallet::constant] - type InitialTargetRegistrationsPerInterval: Get; - /// Rho constant. - #[pallet::constant] - type InitialRho: Get; - /// Kappa constant. - #[pallet::constant] - type InitialKappa: Get; - /// Max UID constant. - #[pallet::constant] - type InitialMaxAllowedUids: Get; - /// Initial validator context pruning length. - #[pallet::constant] - type InitialValidatorPruneLen: Get; - /// Initial scaling law power. - #[pallet::constant] - type InitialScalingLawPower: Get; - /// Immunity Period Constant. - #[pallet::constant] - type InitialImmunityPeriod: Get; - /// Activity constant. - #[pallet::constant] - type InitialActivityCutoff: Get; - /// Initial max registrations per block. - #[pallet::constant] - type InitialMaxRegistrationsPerBlock: Get; - /// Initial pruning score for each neuron. - #[pallet::constant] - type InitialPruningScore: Get; - /// Initial maximum allowed validators per network. - #[pallet::constant] - type InitialMaxAllowedValidators: Get; - /// Initial default delegation take. - #[pallet::constant] - type InitialDefaultTake: Get; - /// Initial minimum delegation take. - #[pallet::constant] - type InitialMinTake: Get; - /// Initial weights version key. - #[pallet::constant] - type InitialWeightsVersionKey: Get; - /// Initial serving rate limit. - #[pallet::constant] - type InitialServingRateLimit: Get; - /// Initial transaction rate limit. - #[pallet::constant] - type InitialTxRateLimit: Get; - /// Initial delegate take transaction rate limit. - #[pallet::constant] - type InitialTxDelegateTakeRateLimit: Get; - /// Initial percentage of total stake required to join senate. - #[pallet::constant] - type InitialSenateRequiredStakePercentage: Get; - /// Initial adjustment alpha on burn and pow. - #[pallet::constant] - type InitialAdjustmentAlpha: Get; - /// Initial network immunity period - #[pallet::constant] - type InitialNetworkImmunityPeriod: Get; - /// Initial minimum allowed network UIDs - #[pallet::constant] - type InitialNetworkMinAllowedUids: Get; - /// Initial network minimum burn cost - #[pallet::constant] - type InitialNetworkMinLockCost: Get; - /// Initial network subnet cut. - #[pallet::constant] - type InitialSubnetOwnerCut: Get; - /// Initial lock reduction interval. - #[pallet::constant] - type InitialNetworkLockReductionInterval: Get; - /// Initial max allowed subnets - #[pallet::constant] - type InitialSubnetLimit: Get; - /// Initial network creation rate limit - #[pallet::constant] - type InitialNetworkRateLimit: Get; - /// Initial target stakes per interval issuance. - #[pallet::constant] - type InitialTargetStakesPerInterval: Get; - /// Cost of swapping a hotkey. - #[pallet::constant] - type KeySwapCost: Get; - /// The upper bound for the alpha parameter. Used for Liquid Alpha. - #[pallet::constant] - type AlphaHigh: Get; - /// The lower bound for the alpha parameter. Used for Liquid Alpha. - #[pallet::constant] - type AlphaLow: Get; - /// A flag to indicate if Liquid Alpha is enabled. - #[pallet::constant] - type LiquidAlphaOn: Get; - /// The base difficulty for proof of work for coldkey swaps - #[pallet::constant] - type InitialBaseDifficulty: Get; - } - /// Alias for the account ID. pub type AccountIdOf = ::AccountId; - /// Senate requirements - #[pallet::type_value] - pub fn DefaultSenateRequiredStakePercentage() -> u64 { - T::InitialSenateRequiredStakePercentage::get() + /// Struct for Axon. + pub type AxonInfoOf = AxonInfo; + + /// local one + pub type LocalCallOf = ::RuntimeCall; + + /// Data structure for Axon information. + #[crate::freeze_struct("3545cfb0cac4c1f5")] + #[derive(Encode, Decode, Default, TypeInfo, Clone, PartialEq, Eq, Debug)] + pub struct AxonInfo { + /// Axon serving block. + pub block: u64, + /// Axon version + pub version: u32, + /// Axon u128 encoded ip address of type v6 or v4. + pub ip: u128, + /// Axon u16 encoded port. + pub port: u16, + /// Axon ip type, 4 for ipv4 and 6 for ipv6. + pub ip_type: u8, + /// Axon protocol. TCP, UDP, other. + pub protocol: u8, + /// Axon proto placeholder 1. + pub placeholder1: u8, + /// Axon proto placeholder 2. + pub placeholder2: u8, } - #[pallet::storage] - pub(super) type SenateRequiredStakePercentage = - StorageValue<_, u64, ValueQuery, DefaultSenateRequiredStakePercentage>; + /// Struct for Prometheus. + pub type PrometheusInfoOf = PrometheusInfo; + + /// Data structure for Prometheus information. + #[crate::freeze_struct("5dde687e63baf0cd")] + #[derive(Encode, Decode, Default, TypeInfo, Clone, PartialEq, Eq, Debug)] + pub struct PrometheusInfo { + /// Prometheus serving block. + pub block: u64, + /// Prometheus version. + pub version: u32, + /// Prometheus u128 encoded ip address of type v6 or v4. + pub ip: u128, + /// Prometheus u16 encoded port. + pub port: u16, + /// Prometheus ip type, 4 for ipv4 and 6 for ipv6. + pub ip_type: u8, + } + /// Struct for ChainIdentities. + pub type ChainIdentityOf = ChainIdentity; + + /// Data structure for Chain Identities. + #[crate::freeze_struct("bbfd00438dbe2b58")] + #[derive(Encode, Decode, Default, TypeInfo, Clone, PartialEq, Eq, Debug)] + pub struct ChainIdentity { + /// The name of the chain identity + pub name: Vec, + /// The URL associated with the chain identity + pub url: Vec, + /// The image representation of the chain identity + pub image: Vec, + /// The Discord information for the chain identity + pub discord: Vec, + /// A description of the chain identity + pub description: Vec, + /// Additional information about the chain identity + pub additional: Vec, + } + + /// Struct for SubnetIdentities. + pub type SubnetIdentityOf = SubnetIdentity; + /// Data structure for Subnet Identities + #[crate::freeze_struct("f448dc3dad763108")] + #[derive(Encode, Decode, Default, TypeInfo, Clone, PartialEq, Eq, Debug)] + pub struct SubnetIdentity { + /// The name of the subnet + pub subnet_name: Vec, + /// The github repository associated with the chain identity + pub github_repo: Vec, + /// The subnet's contact + pub subnet_contact: Vec, + } /// ============================ /// ==== Staking + Accounts ==== /// ============================ - /// Total Rao in circulation. #[pallet::type_value] + /// Total Rao in circulation. pub fn TotalSupply() -> u64 { - 21_000_000_000_000_000 // Rao => 21_000_000 Tao + 21_000_000_000_000_000 } - /// Default total stake. #[pallet::type_value] - pub fn DefaultDefaultTake() -> u16 { - T::InitialDefaultTake::get() + /// Default Delegate Take. + pub fn DefaultDelegateTake() -> u16 { + T::InitialDefaultDelegateTake::get() } - /// Default minimum take. + #[pallet::type_value] - pub fn DefaultMinTake() -> u16 { - T::InitialMinTake::get() + /// Default childkey take. + pub fn DefaultChildKeyTake() -> u16 { + T::InitialDefaultChildKeyTake::get() } - /// Default account take. #[pallet::type_value] + /// Default minimum delegate take. + pub fn DefaultMinDelegateTake() -> u16 { + T::InitialMinDelegateTake::get() + } + + #[pallet::type_value] + /// Default minimum childkey take. + pub fn DefaultMinChildKeyTake() -> u16 { + T::InitialMinChildKeyTake::get() + } + + #[pallet::type_value] + /// Default maximum childkey take. + pub fn DefaultMaxChildKeyTake() -> u16 { + T::InitialMaxChildKeyTake::get() + } + + #[pallet::type_value] + /// Default account take. pub fn DefaultAccountTake() -> u64 { 0 } - /// Default stakes per interval. #[pallet::type_value] + /// Default stakes per interval. pub fn DefaultStakesPerInterval() -> (u64, u64) { (0, 0) } - /// Default emission per block. #[pallet::type_value] + /// Default emission per block. pub fn DefaultBlockEmission() -> u64 { 1_000_000_000 } - /// Default allowed delegation. #[pallet::type_value] + /// Default allowed delegation. pub fn DefaultAllowsDelegation() -> bool { false } - /// Default total issuance. #[pallet::type_value] + /// Default total issuance. pub fn DefaultTotalIssuance() -> u64 { T::InitialIssuance::get() } - /// Default account, derived from zero trailing bytes. #[pallet::type_value] + /// Default account, derived from zero trailing bytes. pub fn DefaultAccount() -> T::AccountId { T::AccountId::decode(&mut TrailingZeroInput::zeroes()) .expect("trailing zeroes always produce a valid account ID; qed") } - /// Default target stakes per interval. #[pallet::type_value] + /// Default target stakes per interval. pub fn DefaultTargetStakesPerInterval() -> u64 { T::InitialTargetStakesPerInterval::get() } - /// Default stake interval. #[pallet::type_value] + /// Default stake interval. pub fn DefaultStakeInterval() -> u64 { 360 } - - /// Default base difficulty for proof of work for coldkey swaps - #[pallet::type_value] - pub fn DefaultBaseDifficulty() -> u64 { - T::InitialBaseDifficulty::get() - } - - #[pallet::storage] // --- ITEM ( total_stake ) - pub type TotalStake = StorageValue<_, u64, ValueQuery>; - #[pallet::storage] // --- ITEM ( default_take ) - pub type MaxTake = StorageValue<_, u16, ValueQuery, DefaultDefaultTake>; - #[pallet::storage] // --- ITEM ( min_take ) - pub type MinTake = StorageValue<_, u16, ValueQuery, DefaultMinTake>; - #[pallet::storage] // --- ITEM ( global_block_emission ) - pub type BlockEmission = StorageValue<_, u64, ValueQuery, DefaultBlockEmission>; - #[pallet::storage] // --- ITEM ( total_issuance ) - pub type TotalIssuance = StorageValue<_, u64, ValueQuery, DefaultTotalIssuance>; - #[pallet::storage] // --- ITEM (target_stakes_per_interval) - pub type TargetStakesPerInterval = - StorageValue<_, u64, ValueQuery, DefaultTargetStakesPerInterval>; - - #[pallet::storage] // --- ITEM ( base_difficulty ) - pub type BaseDifficulty = StorageValue<_, u64, ValueQuery, DefaultBaseDifficulty>; - #[pallet::storage] // --- ITEM (default_stake_interval) - pub type StakeInterval = StorageValue<_, u64, ValueQuery, DefaultStakeInterval>; - #[pallet::storage] // --- MAP ( hot ) --> stake | Returns the total amount of stake under a hotkey. - pub type TotalHotkeyStake = - StorageMap<_, Identity, T::AccountId, u64, ValueQuery, DefaultAccountTake>; - #[pallet::storage] // --- MAP ( cold ) --> stake | Returns the total amount of stake under a coldkey. - pub type TotalColdkeyStake = - StorageMap<_, Identity, T::AccountId, u64, ValueQuery, DefaultAccountTake>; - #[pallet::storage] - /// MAP (hot, cold) --> stake | Returns a tuple (u64: stakes, u64: block_number) - pub type TotalHotkeyColdkeyStakesThisInterval = StorageDoubleMap< - _, - Identity, - T::AccountId, - Identity, - T::AccountId, - (u64, u64), - ValueQuery, - DefaultStakesPerInterval, - >; - #[pallet::storage] // --- MAP ( hot ) --> cold | Returns the controlling coldkey for a hotkey. - pub type Owner = - StorageMap<_, Blake2_128Concat, T::AccountId, T::AccountId, ValueQuery, DefaultAccount>; - #[pallet::storage] // --- MAP ( cold ) --> Vec | Returns the vector of hotkeys controlled by this coldkey. - pub type OwnedHotkeys = - StorageMap<_, Blake2_128Concat, T::AccountId, Vec, ValueQuery>; - #[pallet::storage] // --- DMAP ( cold ) --> Vec | Maps coldkey to hotkeys that stake to it - pub type StakingHotkeys = - StorageMap<_, Blake2_128Concat, T::AccountId, Vec, ValueQuery>; - #[pallet::storage] // --- MAP ( hot ) --> take | Returns the hotkey delegation take. And signals that this key is open for delegation. - pub type Delegates = - StorageMap<_, Blake2_128Concat, T::AccountId, u16, ValueQuery, DefaultDefaultTake>; - #[pallet::storage] // --- DMAP ( hot, cold ) --> stake | Returns the stake under a coldkey prefixed by hotkey. - pub type Stake = StorageDoubleMap< - _, - Blake2_128Concat, - T::AccountId, - Identity, - T::AccountId, - u64, - ValueQuery, - DefaultAccountTake, - >; - #[pallet::type_value] - /// Default value for hotkeys. - pub fn EmptyAccounts() -> Vec { + /// Default account linkage + pub fn DefaultAccountLinkage() -> Vec<(u64, T::AccountId)> { vec![] } #[pallet::type_value] - /// Default arbitration period. - /// This value represents the default arbitration period in blocks. - /// The period is set to 18 hours, assuming a block time of 12 seconds. - pub fn DefaultArbitrationPeriod() -> u64 { - 7200 * 3 // 3 days + /// Default account linkage + pub fn DefaultProportion() -> u64 { + 0 } - #[pallet::storage] // ---- StorageItem Global Used Work. - pub type ArbitrationPeriod = - StorageValue<_, u64, ValueQuery, DefaultArbitrationPeriod>; - #[pallet::storage] // --- MAP ( cold ) --> Vec | Returns a list of keys to drain to, if there are two, we extend the period. - pub type ColdkeySwapDestinations = StorageMap< - _, - Blake2_128Concat, - T::AccountId, - Vec, - ValueQuery, - EmptyAccounts, - >; - #[pallet::storage] // --- MAP ( cold ) --> u64 | Block when the coldkey will be arbitrated. - pub type ColdkeyArbitrationBlock = - StorageMap<_, Blake2_128Concat, T::AccountId, u64, ValueQuery>; - #[pallet::storage] // --- MAP ( u64 ) --> Vec | Coldkeys to drain on the specific block. - pub type ColdkeysToSwapAtBlock = - StorageMap<_, Identity, u64, Vec, ValueQuery, EmptyAccounts>; - /// -- ITEM (switches liquid alpha on) #[pallet::type_value] - pub fn DefaultLiquidAlpha() -> bool { - false + /// Default accumulated emission for a hotkey + pub fn DefaultAccumulatedEmission() -> u64 { + 0 } - #[pallet::storage] // --- MAP ( netuid ) --> Whether or not Liquid Alpha is enabled - pub type LiquidAlphaOn = - StorageMap<_, Blake2_128Concat, u16, bool, ValueQuery, DefaultLiquidAlpha>; - - /// ===================================== - /// ==== Difficulty / Registrations ===== - /// ===================================== - - /// Default last adjustment block. #[pallet::type_value] + /// Default last adjustment block. pub fn DefaultLastAdjustmentBlock() -> u64 { 0 } - /// Default registrations this block. #[pallet::type_value] + /// Default last adjustment block. pub fn DefaultRegistrationsThisBlock() -> u16 { 0 } - /// Default burn token. #[pallet::type_value] + /// Default registrations this block. pub fn DefaultBurn() -> u64 { T::InitialBurn::get() } - /// Default min burn token. #[pallet::type_value] + /// Default burn token. pub fn DefaultMinBurn() -> u64 { T::InitialMinBurn::get() } - /// Default max burn token. #[pallet::type_value] + /// Default min burn token. pub fn DefaultMaxBurn() -> u64 { T::InitialMaxBurn::get() } - /// Default difficulty value. #[pallet::type_value] + /// Default max burn token. pub fn DefaultDifficulty() -> u64 { T::InitialDifficulty::get() } - /// Default min difficulty value. #[pallet::type_value] + /// Default difficulty value. pub fn DefaultMinDifficulty() -> u64 { T::InitialMinDifficulty::get() } - /// Default max difficulty value. #[pallet::type_value] + /// Default min difficulty value. pub fn DefaultMaxDifficulty() -> u64 { T::InitialMaxDifficulty::get() } - /// Default max registrations per block. #[pallet::type_value] + /// Default max difficulty value. pub fn DefaultMaxRegistrationsPerBlock() -> u16 { T::InitialMaxRegistrationsPerBlock::get() } - /// Default RAO recycled for registration. #[pallet::type_value] + /// Default max registrations per block. pub fn DefaultRAORecycledForRegistration() -> u64 { T::InitialRAORecycledForRegistration::get() } - - #[pallet::storage] // ---- StorageItem Global Used Work. - pub type UsedWork = StorageMap<_, Identity, Vec, u64, ValueQuery>; - #[pallet::storage] // --- MAP ( netuid ) --> Burn - pub type Burn = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultBurn>; - #[pallet::storage] // --- MAP ( netuid ) --> Difficulty - pub type Difficulty = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultDifficulty>; - #[pallet::storage] // --- MAP ( netuid ) --> MinBurn - pub type MinBurn = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultMinBurn>; - #[pallet::storage] // --- MAP ( netuid ) --> MaxBurn - pub type MaxBurn = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultMaxBurn>; - #[pallet::storage] // --- MAP ( netuid ) --> MinDifficulty - pub type MinDifficulty = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultMinDifficulty>; - #[pallet::storage] // --- MAP ( netuid ) --> MaxDifficulty - pub type MaxDifficulty = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultMaxDifficulty>; - #[pallet::storage] // --- MAP ( netuid ) --> Block at last adjustment. - pub type LastAdjustmentBlock = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultLastAdjustmentBlock>; - #[pallet::storage] // --- MAP ( netuid ) --> Registrations of this Block. - pub type RegistrationsThisBlock = - StorageMap<_, Identity, u16, u16, ValueQuery, DefaultRegistrationsThisBlock>; - #[pallet::storage] // --- ITEM( global_max_registrations_per_block ) - pub type MaxRegistrationsPerBlock = - StorageMap<_, Identity, u16, u16, ValueQuery, DefaultMaxRegistrationsPerBlock>; - #[pallet::storage] // --- MAP ( netuid, global_RAO_recycled_for_registration ) - pub type RAORecycledForRegistration = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultRAORecycledForRegistration>; - - /// ============================== - /// ==== Subnetworks Storage ===== - /// ============================== - - /// Default number of networks. #[pallet::type_value] + /// Default number of networks. pub fn DefaultN() -> u16 { 0 } - /// Default value for modality. #[pallet::type_value] + /// Default value for modality. pub fn DefaultModality() -> u16 { 0 } - /// Default value for hotkeys. #[pallet::type_value] + /// Default value for hotkeys. pub fn DefaultHotkeys() -> Vec { vec![] } - /// Default value if network is added. #[pallet::type_value] + /// Default value if network is added. pub fn DefaultNeworksAdded() -> bool { false } - /// Default value for network member. #[pallet::type_value] + /// Default value for network member. pub fn DefaultIsNetworkMember() -> bool { false } - /// Default value for registration allowed. #[pallet::type_value] + /// Default value for registration allowed. pub fn DefaultRegistrationAllowed() -> bool { false } - /// Default value for network registered at. #[pallet::type_value] + /// Default value for network registered at. pub fn DefaultNetworkRegisteredAt() -> u64 { 0 } - /// Default value for network immunity period. #[pallet::type_value] + /// Default value for network immunity period. pub fn DefaultNetworkImmunityPeriod() -> u64 { T::InitialNetworkImmunityPeriod::get() } - /// Default value for network last registered. #[pallet::type_value] + /// Default value for network last registered. pub fn DefaultNetworkLastRegistered() -> u64 { 0 } - /// Default value for nominator min required stake. #[pallet::type_value] + /// Default value for nominator min required stake. pub fn DefaultNominatorMinRequiredStake() -> u64 { 0 } - /// Default value for network min allowed UIDs. #[pallet::type_value] + /// Default value for network min allowed UIDs. pub fn DefaultNetworkMinAllowedUids() -> u16 { T::InitialNetworkMinAllowedUids::get() } - /// Default value for network min lock cost. #[pallet::type_value] + /// Default value for network min lock cost. pub fn DefaultNetworkMinLockCost() -> u64 { T::InitialNetworkMinLockCost::get() } - /// Default value for network lock reduction interval. #[pallet::type_value] + /// Default value for network lock reduction interval. pub fn DefaultNetworkLockReductionInterval() -> u64 { T::InitialNetworkLockReductionInterval::get() } - /// Default value for subnet owner cut. #[pallet::type_value] + /// Default value for subnet owner cut. pub fn DefaultSubnetOwnerCut() -> u16 { T::InitialSubnetOwnerCut::get() } - /// Default value for subnet limit. #[pallet::type_value] + /// Default value for subnet limit. pub fn DefaultSubnetLimit() -> u16 { T::InitialSubnetLimit::get() } - /// Default value for network rate limit. #[pallet::type_value] + /// Default value for network rate limit. pub fn DefaultNetworkRateLimit() -> u64 { if cfg!(feature = "pow-faucet") { return 0; } - T::InitialNetworkRateLimit::get() } - - #[pallet::storage] // --- ITEM( maximum_number_of_networks ) - pub type SubnetLimit = StorageValue<_, u16, ValueQuery, DefaultSubnetLimit>; - #[pallet::storage] // --- ITEM( total_number_of_existing_networks ) - pub type TotalNetworks = StorageValue<_, u16, ValueQuery>; - #[pallet::storage] // --- MAP ( netuid ) --> subnetwork_n (Number of UIDs in the network). - pub type SubnetworkN = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultN>; - #[pallet::storage] // --- MAP ( netuid ) --> modality TEXT: 0, IMAGE: 1, TENSOR: 2 - pub type NetworkModality = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultModality>; - #[pallet::storage] // --- MAP ( netuid ) --> network_is_added - pub type NetworksAdded = - StorageMap<_, Identity, u16, bool, ValueQuery, DefaultNeworksAdded>; - #[pallet::storage] // --- DMAP ( hotkey, netuid ) --> bool - pub type IsNetworkMember = StorageDoubleMap< - _, - Blake2_128Concat, - T::AccountId, - Identity, - u16, - bool, - ValueQuery, - DefaultIsNetworkMember, - >; - #[pallet::storage] // --- MAP ( netuid ) --> network_registration_allowed - pub type NetworkRegistrationAllowed = - StorageMap<_, Identity, u16, bool, ValueQuery, DefaultRegistrationAllowed>; - #[pallet::storage] // --- MAP ( netuid ) --> network_pow_allowed - pub type NetworkPowRegistrationAllowed = - StorageMap<_, Identity, u16, bool, ValueQuery, DefaultRegistrationAllowed>; - #[pallet::storage] // --- MAP ( netuid ) --> block_created - pub type NetworkRegisteredAt = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultNetworkRegisteredAt>; - #[pallet::storage] // ITEM( network_immunity_period ) - pub type NetworkImmunityPeriod = - StorageValue<_, u64, ValueQuery, DefaultNetworkImmunityPeriod>; - #[pallet::storage] // ITEM( network_last_registered_block ) - pub type NetworkLastRegistered = - StorageValue<_, u64, ValueQuery, DefaultNetworkLastRegistered>; - #[pallet::storage] // ITEM( network_min_allowed_uids ) - pub type NetworkMinAllowedUids = - StorageValue<_, u16, ValueQuery, DefaultNetworkMinAllowedUids>; - #[pallet::storage] // ITEM( min_network_lock_cost ) - pub type NetworkMinLockCost = StorageValue<_, u64, ValueQuery, DefaultNetworkMinLockCost>; - #[pallet::storage] // ITEM( last_network_lock_cost ) - pub type NetworkLastLockCost = - StorageValue<_, u64, ValueQuery, DefaultNetworkMinLockCost>; - #[pallet::storage] // ITEM( network_lock_reduction_interval ) - pub type NetworkLockReductionInterval = - StorageValue<_, u64, ValueQuery, DefaultNetworkLockReductionInterval>; - #[pallet::storage] // ITEM( subnet_owner_cut ) - pub type SubnetOwnerCut = StorageValue<_, u16, ValueQuery, DefaultSubnetOwnerCut>; - #[pallet::storage] // ITEM( network_rate_limit ) - pub type NetworkRateLimit = StorageValue<_, u64, ValueQuery, DefaultNetworkRateLimit>; - #[pallet::storage] // ITEM( nominator_min_required_stake ) - pub type NominatorMinRequiredStake = - StorageValue<_, u64, ValueQuery, DefaultNominatorMinRequiredStake>; - - /// ============================== - /// ==== Subnetwork Features ===== - /// ============================== - - /// Default value for emission values. #[pallet::type_value] + /// Default value for emission values. pub fn DefaultEmissionValues() -> u64 { 0 } - /// Default value for pending emission. #[pallet::type_value] + /// Default value for pending emission. pub fn DefaultPendingEmission() -> u64 { 0 } - /// Default value for blocks since last step. #[pallet::type_value] + /// Default value for blocks since last step. pub fn DefaultBlocksSinceLastStep() -> u64 { 0 } - /// Default value for last mechanism step block. #[pallet::type_value] + /// Default value for last mechanism step block. pub fn DefaultLastMechanismStepBlock() -> u64 { 0 } - /// Default value for subnet owner. #[pallet::type_value] + /// Default value for subnet owner. pub fn DefaultSubnetOwner() -> T::AccountId { T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) .expect("trailing zeroes always produce a valid account ID; qed") } - /// Default value for subnet locked. #[pallet::type_value] + /// Default value for subnet locked. pub fn DefaultSubnetLocked() -> u64 { 0 } - /// Default value for network tempo #[pallet::type_value] + /// Default value for network tempo pub fn DefaultTempo() -> u16 { T::InitialTempo::get() } - - #[pallet::storage] // --- MAP ( netuid ) --> tempo - pub type Tempo = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultTempo>; - #[pallet::storage] // --- MAP ( netuid ) --> emission_values - pub type EmissionValues = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultEmissionValues>; - #[pallet::storage] // --- MAP ( netuid ) --> pending_emission - pub type PendingEmission = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultPendingEmission>; - #[pallet::storage] // --- MAP ( netuid ) --> blocks_since_last_step - pub type BlocksSinceLastStep = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultBlocksSinceLastStep>; - #[pallet::storage] // --- MAP ( netuid ) --> last_mechanism_step_block - pub type LastMechansimStepBlock = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultLastMechanismStepBlock>; - #[pallet::storage] // --- MAP ( netuid ) --> subnet_owner - pub type SubnetOwner = - StorageMap<_, Identity, u16, T::AccountId, ValueQuery, DefaultSubnetOwner>; - #[pallet::storage] // --- MAP ( netuid ) --> subnet_locked - pub type SubnetLocked = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultSubnetLocked>; - - /// ================================= - /// ==== Axon / Promo Endpoints ===== - /// ================================= - - /// Struct for Axon. - pub type AxonInfoOf = AxonInfo; - - /// Data structure for Axon information. - #[freeze_struct("3545cfb0cac4c1f5")] - #[derive(Encode, Decode, Default, TypeInfo, Clone, PartialEq, Eq, Debug)] - pub struct AxonInfo { - /// Axon serving block. - pub block: u64, - /// Axon version - pub version: u32, - /// Axon u128 encoded ip address of type v6 or v4. - pub ip: u128, - /// Axon u16 encoded port. - pub port: u16, - /// Axon ip type, 4 for ipv4 and 6 for ipv6. - pub ip_type: u8, - /// Axon protocol. TCP, UDP, other. - pub protocol: u8, - /// Axon proto placeholder 1. - pub placeholder1: u8, - /// Axon proto placeholder 2. - pub placeholder2: u8, - } - - /// Struct for Prometheus. - pub type PrometheusInfoOf = PrometheusInfo; - /// Data structure for Prometheus information. - #[freeze_struct("5dde687e63baf0cd")] - #[derive(Encode, Decode, Default, TypeInfo, Clone, PartialEq, Eq, Debug)] - pub struct PrometheusInfo { - /// Prometheus serving block. - pub block: u64, - /// Prometheus version. - pub version: u32, - /// Prometheus u128 encoded ip address of type v6 or v4. - pub ip: u128, - /// Prometheus u16 encoded port. - pub port: u16, - /// Prometheus ip type, 4 for ipv4 and 6 for ipv6. - pub ip_type: u8, - } - - /// Default value for rate limiting - #[pallet::type_value] - pub fn DefaultTxRateLimit() -> u64 { - T::InitialTxRateLimit::get() - } - /// Default value for delegate take rate limiting - #[pallet::type_value] - pub fn DefaultTxDelegateTakeRateLimit() -> u64 { - T::InitialTxDelegateTakeRateLimit::get() - } - /// Default value for last extrinsic block. - #[pallet::type_value] - pub fn DefaultLastTxBlock() -> u64 { - 0 - } - - #[pallet::storage] // --- ITEM ( tx_rate_limit ) - pub(super) type TxRateLimit = StorageValue<_, u64, ValueQuery, DefaultTxRateLimit>; - #[pallet::storage] // --- ITEM ( tx_rate_limit ) - pub(super) type TxDelegateTakeRateLimit = - StorageValue<_, u64, ValueQuery, DefaultTxDelegateTakeRateLimit>; - #[pallet::storage] // --- MAP ( key ) --> last_block - pub type LastTxBlock = - StorageMap<_, Identity, T::AccountId, u64, ValueQuery, DefaultLastTxBlock>; - #[pallet::storage] // --- MAP ( key ) --> last_block - pub(super) type LastTxBlockDelegateTake = - StorageMap<_, Identity, T::AccountId, u64, ValueQuery, DefaultLastTxBlock>; - - /// Default value for serving rate limit. - #[pallet::type_value] - pub fn DefaultServingRateLimit() -> u64 { - T::InitialServingRateLimit::get() - } - - #[pallet::storage] // --- MAP ( netuid ) --> serving_rate_limit - pub type ServingRateLimit = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultServingRateLimit>; - #[pallet::storage] // --- MAP ( netuid, hotkey ) --> axon_info - pub type Axons = - StorageDoubleMap<_, Identity, u16, Blake2_128Concat, T::AccountId, AxonInfoOf, OptionQuery>; - #[pallet::storage] // --- MAP ( netuid, hotkey ) --> prometheus_info - pub type Prometheus = StorageDoubleMap< - _, - Identity, - u16, - Blake2_128Concat, - T::AccountId, - PrometheusInfoOf, - OptionQuery, - >; - - /// ======================================= - /// ==== Subnetwork Hyperparam storage ==== - /// ======================================= - - /// Default weights set rate limit. #[pallet::type_value] + /// Default value for weights set rate limit. pub fn DefaultWeightsSetRateLimit() -> u64 { 100 } - /// Default block at registration. #[pallet::type_value] + /// Default block number at registration. pub fn DefaultBlockAtRegistration() -> u64 { 0 } - /// Default Rho parameter value. #[pallet::type_value] + /// Default value for rho parameter. pub fn DefaultRho() -> u16 { T::InitialRho::get() } - /// Default Kai parameter value. #[pallet::type_value] + /// Default value for kappa parameter. pub fn DefaultKappa() -> u16 { T::InitialKappa::get() } - /// Default max allowed uids. #[pallet::type_value] + /// Default maximum allowed UIDs. pub fn DefaultMaxAllowedUids() -> u16 { T::InitialMaxAllowedUids::get() } - /// Default immunity period value. #[pallet::type_value] + /// Default immunity period. pub fn DefaultImmunityPeriod() -> u16 { T::InitialImmunityPeriod::get() } - /// Default activity cutoff value. #[pallet::type_value] + /// Default activity cutoff. pub fn DefaultActivityCutoff() -> u16 { T::InitialActivityCutoff::get() } - /// Default max weights limit. #[pallet::type_value] + /// Default maximum weights limit. pub fn DefaultMaxWeightsLimit() -> u16 { T::InitialMaxWeightsLimit::get() } - /// Default weights version key. #[pallet::type_value] + /// Default weights version key. pub fn DefaultWeightsVersionKey() -> u64 { T::InitialWeightsVersionKey::get() } - /// Default minimal allowed weights. #[pallet::type_value] + /// Default minimum allowed weights. pub fn DefaultMinAllowedWeights() -> u16 { T::InitialMinAllowedWeights::get() } - /// Default max allowed validators. #[pallet::type_value] + /// Default maximum allowed validators. pub fn DefaultMaxAllowedValidators() -> u16 { T::InitialMaxAllowedValidators::get() } - /// Default adjustment interval. #[pallet::type_value] + /// Default adjustment interval. pub fn DefaultAdjustmentInterval() -> u16 { T::InitialAdjustmentInterval::get() } - /// Default bonds moving average. #[pallet::type_value] + /// Default bonds moving average. pub fn DefaultBondsMovingAverage() -> u64 { T::InitialBondsMovingAverage::get() } - /// Default validator prune length. #[pallet::type_value] + /// Default validator prune length. pub fn DefaultValidatorPruneLen() -> u64 { T::InitialValidatorPruneLen::get() } - /// Default scaling law power. #[pallet::type_value] + /// Default scaling law power. pub fn DefaultScalingLawPower() -> u16 { T::InitialScalingLawPower::get() } - /// Default target registrations per interval. #[pallet::type_value] + /// Default target registrations per interval. pub fn DefaultTargetRegistrationsPerInterval() -> u16 { T::InitialTargetRegistrationsPerInterval::get() } - /// Default adjustment alpha. #[pallet::type_value] + /// Default adjustment alpha. pub fn DefaultAdjustmentAlpha() -> u64 { T::InitialAdjustmentAlpha::get() } - /// Default weights min stake. #[pallet::type_value] + /// Default minimum stake for weights. pub fn DefaultWeightsMinStake() -> u64 { 0 } - /// Provides the default value for the upper bound of the alpha parameter. - #[pallet::type_value] + /// Value definition for vector of u16. + pub fn EmptyU16Vec() -> Vec { + vec![] + } + #[pallet::type_value] + /// Value definition for vector of u64. + pub fn EmptyU64Vec() -> Vec { + vec![] + } + #[pallet::type_value] + /// Value definition for vector of bool. + pub fn EmptyBoolVec() -> Vec { + vec![] + } + #[pallet::type_value] + /// Value definition for bonds with type vector of (u16, u16). + pub fn DefaultBonds() -> Vec<(u16, u16)> { + vec![] + } + #[pallet::type_value] + /// Value definition for weights with vector of (u16, u16). + pub fn DefaultWeights() -> Vec<(u16, u16)> { + vec![] + } + #[pallet::type_value] + /// Default value for key with type T::AccountId derived from trailing zeroes. + pub fn DefaultKey() -> T::AccountId { + T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) + .expect("trailing zeroes always produce a valid account ID; qed") + } + #[pallet::type_value] + /// Default value for network immunity period. + pub fn DefaultHotkeyEmissionTempo() -> u64 { + T::InitialHotkeyEmissionTempo::get() + } + #[pallet::type_value] + /// Default value for rate limiting + pub fn DefaultTxRateLimit() -> u64 { + T::InitialTxRateLimit::get() + } + #[pallet::type_value] + /// Default value for delegate take rate limiting + pub fn DefaultTxDelegateTakeRateLimit() -> u64 { + T::InitialTxDelegateTakeRateLimit::get() + } + #[pallet::type_value] + /// Default value for chidlkey take rate limiting + pub fn DefaultTxChildKeyTakeRateLimit() -> u64 { + T::InitialTxChildKeyTakeRateLimit::get() + } + #[pallet::type_value] + /// Default value for last extrinsic block. + pub fn DefaultLastTxBlock() -> u64 { + 0 + } + #[pallet::type_value] + /// Default value for serving rate limit. + pub fn DefaultServingRateLimit() -> u64 { + T::InitialServingRateLimit::get() + } + #[pallet::type_value] + /// Default value for weight commit reveal interval. + pub fn DefaultWeightCommitRevealInterval() -> u64 { + 1000 + } + #[pallet::type_value] + /// Default value for weight commit/reveal enabled. + pub fn DefaultCommitRevealWeightsEnabled() -> bool { + false + } + #[pallet::type_value] + /// Senate requirements + pub fn DefaultSenateRequiredStakePercentage() -> u64 { + T::InitialSenateRequiredStakePercentage::get() + } + #[pallet::type_value] + /// -- ITEM (switches liquid alpha on) + pub fn DefaultLiquidAlpha() -> bool { + false + } + #[pallet::type_value] + /// (alpha_low: 0.7, alpha_high: 0.9) pub fn DefaultAlphaValues() -> (u16, u16) { - (45875, 58982) // (alpha_low: 0.7, alpha_high: 0.9) + (45875, 58982) + } + #[pallet::type_value] + /// Default value for network max stake. + pub fn DefaultNetworkMaxStake() -> u64 { + T::InitialNetworkMaxStake::get() } - #[pallet::storage] // ITEM( weights_min_stake ) - pub type WeightsMinStake = StorageValue<_, u64, ValueQuery, DefaultWeightsMinStake>; - #[pallet::storage] // --- MAP ( netuid ) --> Rho + #[pallet::type_value] + /// Default value for coldkey swap schedule duration + pub fn DefaultColdkeySwapScheduleDuration() -> BlockNumberFor { + T::InitialColdkeySwapScheduleDuration::get() + } + + #[pallet::storage] + pub type ColdkeySwapScheduleDuration = + StorageValue<_, BlockNumberFor, ValueQuery, DefaultColdkeySwapScheduleDuration>; + + #[pallet::type_value] + /// Default value for dissolve network schedule duration + pub fn DefaultDissolveNetworkScheduleDuration() -> BlockNumberFor { + T::InitialDissolveNetworkScheduleDuration::get() + } + + #[pallet::storage] + pub type DissolveNetworkScheduleDuration = + StorageValue<_, BlockNumberFor, ValueQuery, DefaultDissolveNetworkScheduleDuration>; + + #[pallet::storage] + pub type SenateRequiredStakePercentage = + StorageValue<_, u64, ValueQuery, DefaultSenateRequiredStakePercentage>; + + /// ============================ + /// ==== Staking Variables ==== + /// ============================ + /// The Subtensor [`TotalIssuance`] represents the total issuance of tokens on the Bittensor network. + /// + /// It is comprised of three parts: + /// - The total amount of issued tokens, tracked in the TotalIssuance of the Balances pallet + /// - The total amount of tokens staked in the system, tracked in [`TotalStake`] + /// - The total amount of tokens locked up for subnet reg, tracked in [`TotalSubnetLocked`] attained by iterating over subnet lock. + /// + /// Eventually, Bittensor should migrate to using Holds afterwhich time we will not require this + /// separate accounting. + #[pallet::storage] // --- ITEM ( total_issuance ) + pub type TotalIssuance = StorageValue<_, u64, ValueQuery, DefaultTotalIssuance>; + #[pallet::storage] // --- ITEM ( total_stake ) + pub type TotalStake = StorageValue<_, u64, ValueQuery>; + #[pallet::storage] // --- ITEM ( default_delegate_take ) + pub type MaxDelegateTake = StorageValue<_, u16, ValueQuery, DefaultDelegateTake>; + #[pallet::storage] // --- ITEM ( min_delegate_take ) + pub type MinDelegateTake = StorageValue<_, u16, ValueQuery, DefaultMinDelegateTake>; + #[pallet::storage] // --- ITEM ( default_childkey_take ) + pub type MaxChildkeyTake = StorageValue<_, u16, ValueQuery, DefaultMaxChildKeyTake>; + #[pallet::storage] // --- ITEM ( min_childkey_take ) + pub type MinChildkeyTake = StorageValue<_, u16, ValueQuery, DefaultMinChildKeyTake>; + + #[pallet::storage] // --- ITEM ( global_block_emission ) + pub type BlockEmission = StorageValue<_, u64, ValueQuery, DefaultBlockEmission>; + #[pallet::storage] // --- ITEM (target_stakes_per_interval) + pub type TargetStakesPerInterval = + StorageValue<_, u64, ValueQuery, DefaultTargetStakesPerInterval>; + #[pallet::storage] // --- ITEM (default_stake_interval) + pub type StakeInterval = StorageValue<_, u64, ValueQuery, DefaultStakeInterval>; + #[pallet::storage] // --- MAP ( hot ) --> stake | Returns the total amount of stake under a hotkey. + pub type TotalHotkeyStake = + StorageMap<_, Identity, T::AccountId, u64, ValueQuery, DefaultAccountTake>; + #[pallet::storage] // --- MAP ( cold ) --> stake | Returns the total amount of stake under a coldkey. + pub type TotalColdkeyStake = + StorageMap<_, Identity, T::AccountId, u64, ValueQuery, DefaultAccountTake>; + #[pallet::storage] + /// MAP (hot, cold) --> stake | Returns a tuple (u64: stakes, u64: block_number) + pub type TotalHotkeyColdkeyStakesThisInterval = StorageDoubleMap< + _, + Identity, + T::AccountId, + Identity, + T::AccountId, + (u64, u64), + ValueQuery, + DefaultStakesPerInterval, + >; + #[pallet::storage] + /// MAP ( hot ) --> cold | Returns the controlling coldkey for a hotkey. + pub type Owner = + StorageMap<_, Blake2_128Concat, T::AccountId, T::AccountId, ValueQuery, DefaultAccount>; + #[pallet::storage] + /// MAP ( hot ) --> take | Returns the hotkey delegation take. And signals that this key is open for delegation. + pub type Delegates = + StorageMap<_, Blake2_128Concat, T::AccountId, u16, ValueQuery, DefaultDelegateTake>; + #[pallet::storage] + /// DMAP ( hot, netuid ) --> take | Returns the hotkey childkey take for a specific subnet + pub type ChildkeyTake = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, // First key: hotkey + Identity, + u16, // Second key: netuid + u16, // Value: take + ValueQuery, + >; + + #[pallet::storage] + /// DMAP ( hot, cold ) --> stake | Returns the stake under a coldkey prefixed by hotkey. + pub type Stake = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Identity, + T::AccountId, + u64, + ValueQuery, + DefaultAccountTake, + >; + #[pallet::storage] + /// Map ( hot ) --> last_hotkey_emission_drain | Last block we drained this hotkey's emission. + pub type LastHotkeyEmissionDrain = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + u64, + ValueQuery, + DefaultAccumulatedEmission, + >; + #[pallet::storage] + /// ITEM ( hotkey_emission_tempo ) + pub type HotkeyEmissionTempo = + StorageValue<_, u64, ValueQuery, DefaultHotkeyEmissionTempo>; + #[pallet::storage] + /// Map ( hot ) --> emission | Accumulated hotkey emission. + pub type PendingdHotkeyEmission = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + u64, + ValueQuery, + DefaultAccumulatedEmission, + >; + #[pallet::storage] + /// Map ( hot, cold ) --> block_number | Last add stake increase. + pub type LastAddStakeIncrease = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Identity, + T::AccountId, + u64, + ValueQuery, + DefaultAccountTake, + >; + #[pallet::storage] + /// DMAP ( parent, netuid ) --> Vec<(proportion,child)> + pub type ChildKeys = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Identity, + u16, + Vec<(u64, T::AccountId)>, + ValueQuery, + DefaultAccountLinkage, + >; + #[pallet::storage] + /// DMAP ( child, netuid ) --> Vec<(proportion,parent)> + pub type ParentKeys = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Identity, + u16, + Vec<(u64, T::AccountId)>, + ValueQuery, + DefaultAccountLinkage, + >; + #[pallet::storage] // --- DMAP ( cold ) --> Vec | Maps coldkey to hotkeys that stake to it + pub type StakingHotkeys = + StorageMap<_, Blake2_128Concat, T::AccountId, Vec, ValueQuery>; + #[pallet::storage] // --- MAP ( cold ) --> Vec | Returns the vector of hotkeys controlled by this coldkey. + pub type OwnedHotkeys = + StorageMap<_, Blake2_128Concat, T::AccountId, Vec, ValueQuery>; + + #[pallet::storage] // --- DMAP ( cold ) --> () | Maps coldkey to if a coldkey swap is scheduled. + pub type ColdkeySwapScheduled = + StorageMap<_, Blake2_128Concat, T::AccountId, (), ValueQuery>; + + /// ============================ + /// ==== Global Parameters ===== + /// ============================ + #[pallet::storage] + /// --- StorageItem Global Used Work. + pub type UsedWork = StorageMap<_, Identity, Vec, u64, ValueQuery>; + #[pallet::storage] + /// --- ITEM( global_max_registrations_per_block ) + pub type MaxRegistrationsPerBlock = + StorageMap<_, Identity, u16, u16, ValueQuery, DefaultMaxRegistrationsPerBlock>; + #[pallet::storage] + /// --- ITEM( maximum_number_of_networks ) + pub type SubnetLimit = StorageValue<_, u16, ValueQuery, DefaultSubnetLimit>; + #[pallet::storage] + /// --- ITEM( total_number_of_existing_networks ) + pub type TotalNetworks = StorageValue<_, u16, ValueQuery>; + #[pallet::storage] + /// ITEM( network_immunity_period ) + pub type NetworkImmunityPeriod = + StorageValue<_, u64, ValueQuery, DefaultNetworkImmunityPeriod>; + #[pallet::storage] + /// ITEM( network_last_registered_block ) + pub type NetworkLastRegistered = + StorageValue<_, u64, ValueQuery, DefaultNetworkLastRegistered>; + #[pallet::storage] + /// ITEM( network_min_allowed_uids ) + pub type NetworkMinAllowedUids = + StorageValue<_, u16, ValueQuery, DefaultNetworkMinAllowedUids>; + #[pallet::storage] + /// ITEM( min_network_lock_cost ) + pub type NetworkMinLockCost = StorageValue<_, u64, ValueQuery, DefaultNetworkMinLockCost>; + #[pallet::storage] + /// ITEM( last_network_lock_cost ) + pub type NetworkLastLockCost = + StorageValue<_, u64, ValueQuery, DefaultNetworkMinLockCost>; + #[pallet::storage] + /// ITEM( network_lock_reduction_interval ) + pub type NetworkLockReductionInterval = + StorageValue<_, u64, ValueQuery, DefaultNetworkLockReductionInterval>; + #[pallet::storage] + /// ITEM( subnet_owner_cut ) + pub type SubnetOwnerCut = StorageValue<_, u16, ValueQuery, DefaultSubnetOwnerCut>; + #[pallet::storage] + /// ITEM( network_rate_limit ) + pub type NetworkRateLimit = StorageValue<_, u64, ValueQuery, DefaultNetworkRateLimit>; + #[pallet::storage] + /// ITEM( nominator_min_required_stake ) + pub type NominatorMinRequiredStake = + StorageValue<_, u64, ValueQuery, DefaultNominatorMinRequiredStake>; + + /// ============================ + /// ==== Subnet Parameters ===== + /// ============================ + #[pallet::storage] + /// --- MAP ( netuid ) --> subnetwork_n (Number of UIDs in the network). + pub type SubnetworkN = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultN>; + #[pallet::storage] + /// --- MAP ( netuid ) --> modality TEXT: 0, IMAGE: 1, TENSOR: 2 + pub type NetworkModality = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultModality>; + #[pallet::storage] + /// --- MAP ( netuid ) --> network_is_added + pub type NetworksAdded = + StorageMap<_, Identity, u16, bool, ValueQuery, DefaultNeworksAdded>; + #[pallet::storage] + /// --- DMAP ( hotkey, netuid ) --> bool + pub type IsNetworkMember = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Identity, + u16, + bool, + ValueQuery, + DefaultIsNetworkMember, + >; + #[pallet::storage] + /// --- MAP ( netuid ) --> network_registration_allowed + pub type NetworkRegistrationAllowed = + StorageMap<_, Identity, u16, bool, ValueQuery, DefaultRegistrationAllowed>; + #[pallet::storage] + /// --- MAP ( netuid ) --> network_pow_allowed + pub type NetworkPowRegistrationAllowed = + StorageMap<_, Identity, u16, bool, ValueQuery, DefaultRegistrationAllowed>; + #[pallet::storage] + /// --- MAP ( netuid ) --> block_created + pub type NetworkRegisteredAt = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultNetworkRegisteredAt>; + #[pallet::storage] + /// --- MAP ( netuid ) --> tempo + pub type Tempo = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultTempo>; + #[pallet::storage] + /// --- MAP ( netuid ) --> emission_values + pub type EmissionValues = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultEmissionValues>; + #[pallet::storage] + /// --- MAP ( netuid ) --> pending_emission + pub type PendingEmission = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultPendingEmission>; + #[pallet::storage] + /// --- MAP ( netuid ) --> blocks_since_last_step + pub type BlocksSinceLastStep = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultBlocksSinceLastStep>; + #[pallet::storage] + /// --- MAP ( netuid ) --> last_mechanism_step_block + pub type LastMechansimStepBlock = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultLastMechanismStepBlock>; + #[pallet::storage] + /// --- MAP ( netuid ) --> subnet_owner + pub type SubnetOwner = + StorageMap<_, Identity, u16, T::AccountId, ValueQuery, DefaultSubnetOwner>; + #[pallet::storage] + /// --- MAP ( netuid ) --> subnet_locked + pub type SubnetLocked = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultSubnetLocked>; + #[pallet::storage] + /// --- MAP ( netuid ) --> serving_rate_limit + pub type ServingRateLimit = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultServingRateLimit>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Rho pub type Rho = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultRho>; - #[pallet::storage] // --- MAP ( netuid ) --> Kappa + #[pallet::storage] + /// --- MAP ( netuid ) --> Kappa pub type Kappa = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultKappa>; - #[pallet::storage] // --- MAP ( netuid ) --> uid, we use to record uids to prune at next epoch. + #[pallet::storage] + /// --- MAP ( netuid ) --> uid, we use to record uids to prune at next epoch. pub type NeuronsToPruneAtNextEpoch = StorageMap<_, Identity, u16, u16, ValueQuery>; - #[pallet::storage] // --- MAP ( netuid ) --> registrations_this_interval + #[pallet::storage] + /// --- MAP ( netuid ) --> registrations_this_interval pub type RegistrationsThisInterval = StorageMap<_, Identity, u16, u16, ValueQuery>; - #[pallet::storage] // --- MAP ( netuid ) --> pow_registrations_this_interval + #[pallet::storage] + /// --- MAP ( netuid ) --> pow_registrations_this_interval pub type POWRegistrationsThisInterval = StorageMap<_, Identity, u16, u16, ValueQuery>; - #[pallet::storage] // --- MAP ( netuid ) --> burn_registrations_this_interval + #[pallet::storage] + /// --- MAP ( netuid ) --> burn_registrations_this_interval pub type BurnRegistrationsThisInterval = StorageMap<_, Identity, u16, u16, ValueQuery>; - #[pallet::storage] // --- MAP ( netuid ) --> max_allowed_uids + #[pallet::storage] + /// --- MAP ( netuid ) --> max_allowed_uids pub type MaxAllowedUids = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultMaxAllowedUids>; - #[pallet::storage] // --- MAP ( netuid ) --> immunity_period + #[pallet::storage] + /// --- MAP ( netuid ) --> immunity_period pub type ImmunityPeriod = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultImmunityPeriod>; - #[pallet::storage] // --- MAP ( netuid ) --> activity_cutoff + #[pallet::storage] + /// --- MAP ( netuid ) --> activity_cutoff pub type ActivityCutoff = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultActivityCutoff>; - #[pallet::storage] // --- MAP ( netuid ) --> max_weight_limit + #[pallet::storage] + /// --- MAP ( netuid ) --> max_weight_limit pub type MaxWeightsLimit = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultMaxWeightsLimit>; - #[pallet::storage] // --- MAP ( netuid ) --> weights_version_key + #[pallet::storage] + /// --- MAP ( netuid ) --> weights_version_key pub type WeightsVersionKey = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultWeightsVersionKey>; - #[pallet::storage] // --- MAP ( netuid ) --> min_allowed_weights + #[pallet::storage] + /// --- MAP ( netuid ) --> min_allowed_weights pub type MinAllowedWeights = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultMinAllowedWeights>; - #[pallet::storage] // --- MAP ( netuid ) --> max_allowed_validators + #[pallet::storage] + /// --- MAP ( netuid ) --> max_allowed_validators pub type MaxAllowedValidators = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultMaxAllowedValidators>; - #[pallet::storage] // --- MAP ( netuid ) --> adjustment_interval + #[pallet::storage] + /// --- MAP ( netuid ) --> adjustment_interval pub type AdjustmentInterval = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultAdjustmentInterval>; - #[pallet::storage] // --- MAP ( netuid ) --> bonds_moving_average + #[pallet::storage] + /// --- MAP ( netuid ) --> bonds_moving_average pub type BondsMovingAverage = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultBondsMovingAverage>; - #[pallet::storage] // --- MAP ( netuid ) --> weights_set_rate_limit + #[pallet::storage] + /// --- MAP ( netuid ) --> weights_set_rate_limit pub type WeightsSetRateLimit = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultWeightsSetRateLimit>; - #[pallet::storage] // --- MAP ( netuid ) --> validator_prune_len + #[pallet::storage] + /// --- MAP ( netuid ) --> validator_prune_len pub type ValidatorPruneLen = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultValidatorPruneLen>; - #[pallet::storage] // --- MAP ( netuid ) --> scaling_law_power + #[pallet::storage] + /// --- MAP ( netuid ) --> scaling_law_power pub type ScalingLawPower = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultScalingLawPower>; - #[pallet::storage] // --- MAP ( netuid ) --> target_registrations_this_interval + #[pallet::storage] + /// --- MAP ( netuid ) --> target_registrations_this_interval pub type TargetRegistrationsPerInterval = StorageMap<_, Identity, u16, u16, ValueQuery, DefaultTargetRegistrationsPerInterval>; - #[pallet::storage] // --- DMAP ( netuid, uid ) --> block_at_registration - pub type BlockAtRegistration = StorageDoubleMap< - _, - Identity, - u16, - Identity, - u16, - u64, - ValueQuery, - DefaultBlockAtRegistration, - >; - #[pallet::storage] // --- DMAP ( netuid ) --> adjustment_alpha + #[pallet::storage] + /// --- MAP ( netuid ) --> adjustment_alpha pub type AdjustmentAlpha = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultAdjustmentAlpha>; - - // MAP ( netuid ) --> (alpha_low, alpha_high) #[pallet::storage] - pub type AlphaValues = - StorageMap<_, Identity, u16, (u16, u16), ValueQuery, DefaultAlphaValues>; - - #[pallet::storage] // --- MAP (netuid, who) --> (hash, weight) | Returns the hash and weight committed by an account for a given netuid. - pub type WeightCommits = StorageDoubleMap< - _, - Twox64Concat, - u16, - Twox64Concat, - T::AccountId, - (H256, u64), - OptionQuery, - >; - - /// Default value for weight commit reveal interval. - #[pallet::type_value] - pub fn DefaultWeightCommitRevealInterval() -> u64 { - 1000 - } - // --- DMAP ( netuid ) --> interval + /// --- MAP ( netuid ) --> interval + pub type WeightCommitRevealInterval = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultWeightCommitRevealInterval>; + #[pallet::storage] + /// --- MAP ( netuid ) --> interval + pub type CommitRevealWeightsEnabled = + StorageMap<_, Identity, u16, bool, ValueQuery, DefaultCommitRevealWeightsEnabled>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Burn + pub type Burn = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultBurn>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Difficulty + pub type Difficulty = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultDifficulty>; + #[pallet::storage] + /// --- MAP ( netuid ) --> MinBurn + pub type MinBurn = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultMinBurn>; + #[pallet::storage] + /// --- MAP ( netuid ) --> MaxBurn + pub type MaxBurn = StorageMap<_, Identity, u16, u64, ValueQuery, DefaultMaxBurn>; + #[pallet::storage] + /// --- MAP ( netuid ) --> MinDifficulty + pub type MinDifficulty = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultMinDifficulty>; + #[pallet::storage] + /// --- MAP ( netuid ) --> MaxDifficulty + pub type MaxDifficulty = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultMaxDifficulty>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Block at last adjustment. + pub type LastAdjustmentBlock = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultLastAdjustmentBlock>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Registrations of this Block. + pub type RegistrationsThisBlock = + StorageMap<_, Identity, u16, u16, ValueQuery, DefaultRegistrationsThisBlock>; + #[pallet::storage] + /// --- MAP ( netuid ) --> global_RAO_recycled_for_registration + pub type RAORecycledForRegistration = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultRAORecycledForRegistration>; + #[pallet::storage] + /// --- ITEM ( tx_rate_limit ) + pub type TxRateLimit = StorageValue<_, u64, ValueQuery, DefaultTxRateLimit>; + #[pallet::storage] + /// --- ITEM ( tx_delegate_take_rate_limit ) + pub type TxDelegateTakeRateLimit = + StorageValue<_, u64, ValueQuery, DefaultTxDelegateTakeRateLimit>; + #[pallet::storage] + /// --- ITEM ( tx_childkey_take_rate_limit ) + pub type TxChildkeyTakeRateLimit = + StorageValue<_, u64, ValueQuery, DefaultTxChildKeyTakeRateLimit>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Whether or not Liquid Alpha is enabled + pub type LiquidAlphaOn = + StorageMap<_, Blake2_128Concat, u16, bool, ValueQuery, DefaultLiquidAlpha>; #[pallet::storage] - pub type WeightCommitRevealInterval = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultWeightCommitRevealInterval>; - - /// Default value for weight commit/reveal enabled. - #[pallet::type_value] - pub fn DefaultCommitRevealWeightsEnabled() -> bool { - false - } - // --- DMAP ( netuid ) --> interval + /// MAP ( netuid ) --> (alpha_low, alpha_high) + pub type AlphaValues = + StorageMap<_, Identity, u16, (u16, u16), ValueQuery, DefaultAlphaValues>; + /// MAP ( netuid ) --> max stake allowed on a subnet. #[pallet::storage] - pub type CommitRevealWeightsEnabled = - StorageMap<_, Identity, u16, bool, ValueQuery, DefaultCommitRevealWeightsEnabled>; + pub type NetworkMaxStake = + StorageMap<_, Identity, u16, u64, ValueQuery, DefaultNetworkMaxStake>; /// ======================================= /// ==== Subnetwork Consensus Storage ==== /// ======================================= - - /// Value definition for vector of u16. - #[pallet::type_value] - pub fn EmptyU16Vec() -> Vec { - vec![] - } - /// Value definition for vector of u64. - #[pallet::type_value] - pub fn EmptyU64Vec() -> Vec { - vec![] - } - /// Value definition for vector of bool. - #[pallet::type_value] - pub fn EmptyBoolVec() -> Vec { - vec![] - } - /// Value definition for bonds with type vector of (u16, u16). - #[pallet::type_value] - pub fn DefaultBonds() -> Vec<(u16, u16)> { - vec![] - } - /// Value definition for weights with vector of (u16, u16). - #[pallet::type_value] - pub fn DefaultWeights() -> Vec<(u16, u16)> { - vec![] - } - /// Default value for key with type T::AccountId derived from trailing zeroes. - #[pallet::type_value] - pub fn DefaultKey() -> T::AccountId { - T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) - .expect("trailing zeroes always produce a valid account ID; qed") - } - - #[pallet::storage] // --- DMAP ( netuid, hotkey ) --> uid + #[pallet::storage] // --- DMAP ( netuid ) --> stake_weight | weight for stake used in YC. + pub(super) type StakeWeight = + StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; + #[pallet::storage] + /// --- DMAP ( netuid, hotkey ) --> uid pub type Uids = StorageDoubleMap<_, Identity, u16, Blake2_128Concat, T::AccountId, u16, OptionQuery>; - #[pallet::storage] // --- DMAP ( netuid, uid ) --> hotkey + #[pallet::storage] + /// --- DMAP ( netuid, uid ) --> hotkey pub type Keys = StorageDoubleMap<_, Identity, u16, Identity, u16, T::AccountId, ValueQuery, DefaultKey>; - #[pallet::storage] // --- DMAP ( netuid ) --> (hotkey, se, ve) + #[pallet::storage] + /// --- DMAP ( netuid ) --> (hotkey, se, ve) pub type LoadedEmission = StorageMap<_, Identity, u16, Vec<(T::AccountId, u64, u64)>, OptionQuery>; - - #[pallet::storage] // --- DMAP ( netuid ) --> active - pub(super) type Active = + #[pallet::storage] + /// --- DMAP ( netuid ) --> active + pub type Active = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyBoolVec>; - #[pallet::storage] // --- DMAP ( netuid ) --> rank - pub(super) type Rank = - StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; - #[pallet::storage] // --- DMAP ( netuid ) --> trust - pub(super) type Trust = - StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; - #[pallet::storage] // --- DMAP ( netuid ) --> consensus - pub(super) type Consensus = + #[pallet::storage] + /// --- DMAP ( netuid ) --> rank + pub type Rank = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; + #[pallet::storage] + /// --- DMAP ( netuid ) --> trust + pub type Trust = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; + #[pallet::storage] + /// --- DMAP ( netuid ) --> consensus + pub type Consensus = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; - #[pallet::storage] // --- DMAP ( netuid ) --> incentive - pub(super) type Incentive = + #[pallet::storage] + /// --- DMAP ( netuid ) --> incentive + pub type Incentive = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; - #[pallet::storage] // --- DMAP ( netuid ) --> dividends - pub(super) type Dividends = + #[pallet::storage] + /// --- DMAP ( netuid ) --> dividends + pub type Dividends = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; - #[pallet::storage] // --- DMAP ( netuid ) --> emission - pub(super) type Emission = + #[pallet::storage] + /// --- DMAP ( netuid ) --> emission + pub type Emission = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU64Vec>; - #[pallet::storage] // --- DMAP ( netuid ) --> last_update - pub(super) type LastUpdate = + #[pallet::storage] + /// --- DMAP ( netuid ) --> last_update + pub type LastUpdate = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU64Vec>; - #[pallet::storage] // --- DMAP ( netuid ) --> validator_trust - pub(super) type ValidatorTrust = + #[pallet::storage] + /// --- DMAP ( netuid ) --> validator_trust + pub type ValidatorTrust = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; - #[pallet::storage] // --- DMAP ( netuid ) --> pruning_scores - pub(super) type PruningScores = + #[pallet::storage] + /// --- DMAP ( netuid ) --> pruning_scores + pub type PruningScores = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyU16Vec>; - #[pallet::storage] // --- DMAP ( netuid ) --> validator_permit - pub(super) type ValidatorPermit = + #[pallet::storage] + /// --- DMAP ( netuid ) --> validator_permit + pub type ValidatorPermit = StorageMap<_, Identity, u16, Vec, ValueQuery, EmptyBoolVec>; - - #[pallet::storage] // --- DMAP ( netuid, uid ) --> weights - pub(super) type Weights = StorageDoubleMap< + #[pallet::storage] + /// --- DMAP ( netuid, uid ) --> weights + pub type Weights = StorageDoubleMap< _, Identity, u16, @@ -1128,8 +1134,9 @@ pub mod pallet { ValueQuery, DefaultWeights, >; - #[pallet::storage] // --- DMAP ( netuid, uid ) --> bonds - pub(super) type Bonds = StorageDoubleMap< + #[pallet::storage] + /// --- DMAP ( netuid, uid ) --> bonds + pub type Bonds = StorageDoubleMap< _, Identity, u16, @@ -1139,13 +1146,87 @@ pub mod pallet { ValueQuery, DefaultBonds, >; + #[pallet::storage] + /// --- DMAP ( netuid, uid ) --> block_at_registration + pub type BlockAtRegistration = StorageDoubleMap< + _, + Identity, + u16, + Identity, + u16, + u64, + ValueQuery, + DefaultBlockAtRegistration, + >; + #[pallet::storage] + /// --- MAP ( netuid, hotkey ) --> axon_info + pub type Axons = + StorageDoubleMap<_, Identity, u16, Blake2_128Concat, T::AccountId, AxonInfoOf, OptionQuery>; + #[pallet::storage] + /// --- MAP ( netuid, hotkey ) --> prometheus_info + pub type Prometheus = StorageDoubleMap< + _, + Identity, + u16, + Blake2_128Concat, + T::AccountId, + PrometheusInfoOf, + OptionQuery, + >; + #[pallet::storage] // --- MAP ( coldkey ) --> identity + pub type Identities = + StorageMap<_, Blake2_128Concat, T::AccountId, ChainIdentityOf, OptionQuery>; - #[pallet::storage] // --- Storage for migration run status - pub type HasMigrationRun = StorageMap<_, Identity, Vec, bool, ValueQuery>; + #[pallet::storage] // --- MAP ( netuid ) --> identity + pub type SubnetIdentities = + StorageMap<_, Blake2_128Concat, u16, SubnetIdentityOf, OptionQuery>; + + /// ================================= + /// ==== Axon / Promo Endpoints ===== + /// ================================= + #[pallet::storage] // --- NMAP ( hot, netuid, name ) --> last_block | Returns the last block of a transaction for a given key, netuid, and name. + pub type TransactionKeyLastBlock = StorageNMap< + _, + ( + NMapKey, // hot + NMapKey, // netuid + NMapKey, // extrinsic enum. + ), + u64, + ValueQuery, + >; + #[pallet::storage] + /// --- MAP ( key ) --> last_block + pub type LastTxBlock = + StorageMap<_, Identity, T::AccountId, u64, ValueQuery, DefaultLastTxBlock>; + #[pallet::storage] + /// --- MAP ( key ) --> last_tx_block_childkey_take + pub type LastTxBlockChildKeyTake = + StorageMap<_, Identity, T::AccountId, u64, ValueQuery, DefaultLastTxBlock>; + #[pallet::storage] + /// --- MAP ( key ) --> last_tx_block_delegate_take + pub type LastTxBlockDelegateTake = + StorageMap<_, Identity, T::AccountId, u64, ValueQuery, DefaultLastTxBlock>; + #[pallet::storage] + /// ITEM( weights_min_stake ) + pub type WeightsMinStake = StorageValue<_, u64, ValueQuery, DefaultWeightsMinStake>; + #[pallet::storage] + /// --- MAP (netuid, who) --> (hash, weight) | Returns the hash and weight committed by an account for a given netuid. + pub type WeightCommits = StorageDoubleMap< + _, + Twox64Concat, + u16, + Twox64Concat, + T::AccountId, + (H256, u64), + OptionQuery, + >; /// ================== /// ==== Genesis ===== /// ================== + #[pallet::storage] // --- Storage for migration run status + pub type HasMigrationRun = StorageMap<_, Identity, Vec, bool, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -1164,1113 +1245,6 @@ pub mod pallet { } } - #[pallet::genesis_build] - impl BuildGenesisConfig for GenesisConfig { - fn build(&self) { - // Set initial total issuance from balances - TotalIssuance::::put(self.balances_issuance); - - // Subnet config values - let netuid: u16 = 3; - let tempo = 99; - let max_uids = 4096; - - // The functions for initializing new networks/setting defaults cannot be run directly from genesis functions like extrinsics would - // --- Set this network uid to alive. - NetworksAdded::::insert(netuid, true); - - // --- Fill tempo memory item. - Tempo::::insert(netuid, tempo); - - // --- Fill modality item. - // Only modality 0 exists (text) - NetworkModality::::insert(netuid, 0); - - // Make network parameters explicit. - if !Tempo::::contains_key(netuid) { - Tempo::::insert(netuid, Tempo::::get(netuid)); - } - if !Kappa::::contains_key(netuid) { - Kappa::::insert(netuid, Kappa::::get(netuid)); - } - if !Difficulty::::contains_key(netuid) { - Difficulty::::insert(netuid, Difficulty::::get(netuid)); - } - if !MaxAllowedUids::::contains_key(netuid) { - MaxAllowedUids::::insert(netuid, MaxAllowedUids::::get(netuid)); - } - if !ImmunityPeriod::::contains_key(netuid) { - ImmunityPeriod::::insert(netuid, ImmunityPeriod::::get(netuid)); - } - if !ActivityCutoff::::contains_key(netuid) { - ActivityCutoff::::insert(netuid, ActivityCutoff::::get(netuid)); - } - if !EmissionValues::::contains_key(netuid) { - EmissionValues::::insert(netuid, EmissionValues::::get(netuid)); - } - if !MaxWeightsLimit::::contains_key(netuid) { - MaxWeightsLimit::::insert(netuid, MaxWeightsLimit::::get(netuid)); - } - if !MinAllowedWeights::::contains_key(netuid) { - MinAllowedWeights::::insert(netuid, MinAllowedWeights::::get(netuid)); - } - if !RegistrationsThisInterval::::contains_key(netuid) { - RegistrationsThisInterval::::insert( - netuid, - RegistrationsThisInterval::::get(netuid), - ); - } - if !POWRegistrationsThisInterval::::contains_key(netuid) { - POWRegistrationsThisInterval::::insert( - netuid, - POWRegistrationsThisInterval::::get(netuid), - ); - } - if !BurnRegistrationsThisInterval::::contains_key(netuid) { - BurnRegistrationsThisInterval::::insert( - netuid, - BurnRegistrationsThisInterval::::get(netuid), - ); - } - - // Set max allowed uids - MaxAllowedUids::::insert(netuid, max_uids); - - let mut next_uid = 0u16; - - for (coldkey, hotkeys) in self.stakes.iter() { - for (hotkey, stake_uid) in hotkeys.iter() { - let (stake, uid) = stake_uid; - - // Expand Yuma Consensus with new position. - Rank::::mutate(netuid, |v| v.push(0)); - Trust::::mutate(netuid, |v| v.push(0)); - Active::::mutate(netuid, |v| v.push(true)); - Emission::::mutate(netuid, |v| v.push(0)); - Consensus::::mutate(netuid, |v| v.push(0)); - Incentive::::mutate(netuid, |v| v.push(0)); - Dividends::::mutate(netuid, |v| v.push(0)); - LastUpdate::::mutate(netuid, |v| v.push(0)); - PruningScores::::mutate(netuid, |v| v.push(0)); - ValidatorTrust::::mutate(netuid, |v| v.push(0)); - ValidatorPermit::::mutate(netuid, |v| v.push(false)); - - // Insert account information. - Keys::::insert(netuid, uid, hotkey.clone()); // Make hotkey - uid association. - Uids::::insert(netuid, hotkey.clone(), uid); // Make uid - hotkey association. - BlockAtRegistration::::insert(netuid, uid, 0); // Fill block at registration. - IsNetworkMember::::insert(hotkey.clone(), netuid, true); // Fill network is member. - - // Fill stake information. - Owner::::insert(hotkey.clone(), coldkey.clone()); - - // Update OwnedHotkeys map - let mut hotkeys = OwnedHotkeys::::get(coldkey); - if !hotkeys.contains(hotkey) { - hotkeys.push(hotkey.clone()); - OwnedHotkeys::::insert(coldkey, hotkeys); - } - - TotalHotkeyStake::::insert(hotkey.clone(), stake); - TotalColdkeyStake::::insert( - coldkey.clone(), - TotalColdkeyStake::::get(coldkey).saturating_add(*stake), - ); - - // Update total issuance value - TotalIssuance::::put(TotalIssuance::::get().saturating_add(*stake)); - - Stake::::insert(hotkey.clone(), coldkey.clone(), stake); - - // Update StakingHotkeys map - let mut staking_hotkeys = StakingHotkeys::::get(coldkey); - if !staking_hotkeys.contains(hotkey) { - staking_hotkeys.push(hotkey.clone()); - StakingHotkeys::::insert(coldkey, staking_hotkeys); - } - - next_uid = next_uid.checked_add(1).expect( - "should not have total number of hotkey accounts larger than u16::MAX", - ); - } - } - - // Set correct length for Subnet neurons - SubnetworkN::::insert(netuid, next_uid); - - // --- Increase total network count. - TotalNetworks::::mutate(|n| { - *n = n.checked_add(1).expect( - "should not have total number of networks larger than u16::MAX in genesis", - ) - }); - - // Get the root network uid. - let root_netuid: u16 = 0; - - // Set the root network as added. - NetworksAdded::::insert(root_netuid, true); - - // Increment the number of total networks. - TotalNetworks::::mutate(|n| { - *n = n.checked_add(1).expect( - "should not have total number of networks larger than u16::MAX in genesis", - ) - }); - // Set the number of validators to 1. - SubnetworkN::::insert(root_netuid, 0); - - // Set the maximum number to the number of senate members. - MaxAllowedUids::::insert(root_netuid, 64u16); - - // Set the maximum number to the number of validators to all members. - MaxAllowedValidators::::insert(root_netuid, 64u16); - - // Set the min allowed weights to zero, no weights restrictions. - MinAllowedWeights::::insert(root_netuid, 0); - - // Set the max weight limit to infinity, no weight restrictions. - MaxWeightsLimit::::insert(root_netuid, u16::MAX); - - // Add default root tempo. - Tempo::::insert(root_netuid, 100); - - // Set the root network as open. - NetworkRegistrationAllowed::::insert(root_netuid, true); - - // Set target registrations for validators as 1 per block. - TargetRegistrationsPerInterval::::insert(root_netuid, 1); - } - } - - // ================ - // ==== Hooks ===== - // ================ - - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_idle(_n: BlockNumberFor, _remaining_weight: Weight) -> Weight { - Weight::zero() - } - - fn on_initialize(_block_number: BlockNumberFor) -> Weight { - let mut total_weight = Weight::zero(); - - // Create a Weight::MAX value to pass to swap_coldkeys_this_block - let max_weight = Weight::MAX; - - // Perform coldkey swapping - let swap_weight = match Self::swap_coldkeys_this_block(&max_weight) { - Ok(weight_used) => weight_used, - Err(e) => { - log::error!("Error while swapping coldkeys: {:?}", e); - Weight::zero() - } - }; - total_weight = total_weight.saturating_add(swap_weight); - - // Perform block step - let block_step_result = Self::block_step(); - match block_step_result { - Ok(_) => { - log::debug!("Successfully ran block step."); - total_weight = total_weight.saturating_add( - Weight::from_parts(110_634_229_000_u64, 0) - .saturating_add(T::DbWeight::get().reads(8304_u64)) - .saturating_add(T::DbWeight::get().writes(110_u64)), - ); - } - Err(e) => { - log::error!("Error while stepping block: {:?}", e); - total_weight = total_weight.saturating_add( - Weight::from_parts(110_634_229_000_u64, 0) - .saturating_add(T::DbWeight::get().reads(8304_u64)) - .saturating_add(T::DbWeight::get().writes(110_u64)), - ); - } - } - - total_weight - } - - fn on_runtime_upgrade() -> frame_support::weights::Weight { - // --- Migrate storage - use crate::migration; - let mut weight = frame_support::weights::Weight::from_parts(0, 0); - - // Hex encoded foundation coldkey - let hex = hex_literal::hex![ - "feabaafee293d3b76dae304e2f9d885f77d2b17adab9e17e921b321eccd61c77" - ]; - weight = weight - // Initializes storage version (to 1) - .saturating_add(migration::migrate_to_v1_separate_emission::()) - // Storage version v1 -> v2 - .saturating_add(migration::migrate_to_v2_fixed_total_stake::()) - // Doesn't check storage version. TODO: Remove after upgrade - .saturating_add(migration::migrate_create_root_network::()) - // Storage version v2 -> v3 - .saturating_add(migration::migrate_transfer_ownership_to_foundation::( - hex, - )) - // Storage version v3 -> v4 - .saturating_add(migration::migrate_delete_subnet_21::()) - // Storage version v4 -> v5 - .saturating_add(migration::migrate_delete_subnet_3::()) - // Doesn't check storage version. TODO: Remove after upgrade - .saturating_add(migration::migration5_total_issuance::(false)) - // Populate OwnedHotkeys map for coldkey swap. Doesn't update storage vesion. - .saturating_add(migration::migrate_populate_owned::()) - // Populate StakingHotkeys map for coldkey swap. Doesn't update storage vesion. - .saturating_add(migration::migrate_populate_staking_hotkeys::()) - // Fix total coldkey stake. - .saturating_add(migration::migrate_fix_total_coldkey_stake::()); - - weight - } - } - - /// Dispatchable functions allow users to interact with the pallet and invoke state changes. - /// These functions materialize as "extrinsics", which are often compared to transactions. - /// Dispatchable functions must be annotated with a weight and must return a DispatchResult. - #[pallet::call] - impl Pallet { - /// --- Sets the caller weights for the incentive mechanism. The call can be - /// made from the hotkey account so is potentially insecure, however, the damage - /// of changing weights is minimal if caught early. This function includes all the - /// checks that the passed weights meet the requirements. Stored as u16s they represent - /// rational values in the range [0,1] which sum to 1 and can be interpreted as - /// probabilities. The specific weights determine how inflation propagates outward - /// from this peer. - /// - /// Note: The 16 bit integers weights should represent 1.0 as the max u16. - /// However, the function normalizes all integers to u16_max anyway. This means that if the sum of all - /// elements is larger or smaller than the amount of elements * u16_max, all elements - /// will be corrected for this deviation. - /// - /// # Args: - /// * `origin`: (Origin): - /// - The caller, a hotkey who wishes to set their weights. - /// - /// * `netuid` (u16): - /// - The network uid we are setting these weights on. - /// - /// * `dests` (Vec): - /// - The edge endpoint for the weight, i.e. j for w_ij. - /// - /// * 'weights' (Vec): - /// - The u16 integer encoded weights. Interpreted as rational - /// values in the range [0,1]. They must sum to in32::MAX. - /// - /// * 'version_key' ( u64 ): - /// - The network version key to check if the validator is up to date. - /// - /// # Event: - /// * WeightsSet; - /// - On successfully setting the weights on chain. - /// - /// # Raises: - /// * 'SubNetworkDoesNotExist': - /// - Attempting to set weights on a non-existent network. - /// - /// * 'NotRegistered': - /// - Attempting to set weights from a non registered account. - /// - /// * 'WeightVecNotEqualSize': - /// - Attempting to set weights with uids not of same length. - /// - /// * 'DuplicateUids': - /// - Attempting to set weights with duplicate uids. - /// - /// * 'UidsLengthExceedUidsInSubNet': - /// - Attempting to set weights above the max allowed uids. - /// - /// * 'UidVecContainInvalidOne': - /// - Attempting to set weights with invalid uids. - /// - /// * 'WeightVecLengthIsLow': - /// - Attempting to set weights with fewer weights than min. - /// - /// * 'MaxWeightExceeded': - /// - Attempting to set weights with max value exceeding limit. - #[pallet::call_index(0)] - #[pallet::weight((Weight::from_parts(22_060_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4106)) - .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] - pub fn set_weights( - origin: OriginFor, - netuid: u16, - dests: Vec, - weights: Vec, - version_key: u64, - ) -> DispatchResult { - if !Self::get_commit_reveal_weights_enabled(netuid) { - return Self::do_set_weights(origin, netuid, dests, weights, version_key); - } - - Err(Error::::CommitRevealEnabled.into()) - } - - /// ---- Used to commit a hash of your weight values to later be revealed. - /// - /// # Args: - /// * `origin`: (`::RuntimeOrigin`): - /// - The signature of the committing hotkey. - /// - /// * `netuid` (`u16`): - /// - The u16 network identifier. - /// - /// * `commit_hash` (`H256`): - /// - The hash representing the committed weights. - /// - /// # Raises: - /// * `WeightsCommitNotAllowed`: - /// - Attempting to commit when it is not allowed. - /// - #[pallet::call_index(96)] - #[pallet::weight((Weight::from_parts(46_000_000, 0) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] - pub fn commit_weights( - origin: T::RuntimeOrigin, - netuid: u16, - commit_hash: H256, - ) -> DispatchResult { - Self::do_commit_weights(origin, netuid, commit_hash) - } - - /// ---- Used to reveal the weights for a previously committed hash. - /// - /// # Args: - /// * `origin`: (`::RuntimeOrigin`): - /// - The signature of the revealing hotkey. - /// - /// * `netuid` (`u16`): - /// - The u16 network identifier. - /// - /// * `uids` (`Vec`): - /// - The uids for the weights being revealed. - /// - /// * `values` (`Vec`): - /// - The values of the weights being revealed. - /// - /// * `salt` (`Vec`): - /// - The random salt to protect from brute-force guessing attack in case of small weight changes bit-wise. - /// - /// * `version_key` (`u64`): - /// - The network version key. - /// - /// # Raises: - /// * `NoWeightsCommitFound`: - /// - Attempting to reveal weights without an existing commit. - /// - /// * `InvalidRevealCommitHashNotMatchTempo`: - /// - Attempting to reveal weights outside the valid tempo. - /// - /// * `InvalidRevealCommitHashNotMatch`: - /// - The revealed hash does not match the committed hash. - /// - #[pallet::call_index(97)] - #[pallet::weight((Weight::from_parts(103_000_000, 0) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] - pub fn reveal_weights( - origin: T::RuntimeOrigin, - netuid: u16, - uids: Vec, - values: Vec, - salt: Vec, - version_key: u64, - ) -> DispatchResult { - Self::do_reveal_weights(origin, netuid, uids, values, salt, version_key) - } - - /// # Args: - /// * `origin`: (Origin): - /// - The caller, a hotkey who wishes to set their weights. - /// - /// * `netuid` (u16): - /// - The network uid we are setting these weights on. - /// - /// * `hotkey` (T::AccountId): - /// - The hotkey associated with the operation and the calling coldkey. - /// - /// * `dests` (Vec): - /// - The edge endpoint for the weight, i.e. j for w_ij. - /// - /// * 'weights' (Vec): - /// - The u16 integer encoded weights. Interpreted as rational - /// values in the range [0,1]. They must sum to in32::MAX. - /// - /// * 'version_key' ( u64 ): - /// - The network version key to check if the validator is up to date. - /// - /// # Event: - /// - /// * WeightsSet; - /// - On successfully setting the weights on chain. - /// - /// # Raises: - /// - /// * NonAssociatedColdKey; - /// - Attempting to set weights on a non-associated cold key. - /// - /// * 'SubNetworkDoesNotExist': - /// - Attempting to set weights on a non-existent network. - /// - /// * 'NotRootSubnet': - /// - Attempting to set weights on a subnet that is not the root network. - /// - /// * 'WeightVecNotEqualSize': - /// - Attempting to set weights with uids not of same length. - /// - /// * 'UidVecContainInvalidOne': - /// - Attempting to set weights with invalid uids. - /// - /// * 'NotRegistered': - /// - Attempting to set weights from a non registered account. - /// - /// * 'WeightVecLengthIsLow': - /// - Attempting to set weights with fewer weights than min. - /// - /// * 'IncorrectWeightVersionKey': - /// - Attempting to set weights with the incorrect network version key. - /// - /// * 'SettingWeightsTooFast': - /// - Attempting to set weights too fast. - /// - /// * 'WeightVecLengthIsLow': - /// - Attempting to set weights with fewer weights than min. - /// - /// * 'MaxWeightExceeded': - /// - Attempting to set weights with max value exceeding limit. - /// - #[pallet::call_index(8)] - #[pallet::weight((Weight::from_parts(10_151_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4104)) - .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] - pub fn set_root_weights( - origin: OriginFor, - netuid: u16, - hotkey: T::AccountId, - dests: Vec, - weights: Vec, - version_key: u64, - ) -> DispatchResult { - Self::do_set_root_weights(origin, netuid, hotkey, dests, weights, version_key) - } - - /// --- Sets the key as a delegate. - /// - /// # Args: - /// * 'origin': (Origin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The hotkey we are delegating (must be owned by the coldkey.) - /// - /// * 'take' (u64): - /// - The stake proportion that this hotkey takes from delegations. - /// - /// # Event: - /// * DelegateAdded; - /// - On successfully setting a hotkey as a delegate. - /// - /// # Raises: - /// * 'NotRegistered': - /// - The hotkey we are delegating is not registered on the network. - /// - /// * 'NonAssociatedColdKey': - /// - The hotkey we are delegating is not owned by the calling coldket. - /// - #[pallet::call_index(1)] - #[pallet::weight((Weight::from_parts(79_000_000, 0) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] - pub fn become_delegate(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { - Self::do_become_delegate(origin, hotkey, Self::get_default_take()) - } - - /// --- Allows delegates to decrease its take value. - /// - /// # Args: - /// * 'origin': (::Origin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The hotkey we are delegating (must be owned by the coldkey.) - /// - /// * 'netuid' (u16): - /// - Subnet ID to decrease take for - /// - /// * 'take' (u16): - /// - The new stake proportion that this hotkey takes from delegations. - /// The new value can be between 0 and 11_796 and should be strictly - /// lower than the previous value. It T is the new value (rational number), - /// the the parameter is calculated as [65535 * T]. For example, 1% would be - /// [0.01 * 65535] = [655.35] = 655 - /// - /// # Event: - /// * TakeDecreased; - /// - On successfully setting a decreased take for this hotkey. - /// - /// # Raises: - /// * 'NotRegistered': - /// - The hotkey we are delegating is not registered on the network. - /// - /// * 'NonAssociatedColdKey': - /// - The hotkey we are delegating is not owned by the calling coldkey. - /// - /// * 'DelegateTakeTooLow': - /// - The delegate is setting a take which is not lower than the previous. - /// - #[pallet::call_index(65)] - #[pallet::weight((0, DispatchClass::Normal, Pays::No))] - pub fn decrease_take( - origin: OriginFor, - hotkey: T::AccountId, - take: u16, - ) -> DispatchResult { - Self::do_decrease_take(origin, hotkey, take) - } - - /// --- Allows delegates to increase its take value. This call is rate-limited. - /// - /// # Args: - /// * 'origin': (::Origin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The hotkey we are delegating (must be owned by the coldkey.) - /// - /// * 'take' (u16): - /// - The new stake proportion that this hotkey takes from delegations. - /// The new value can be between 0 and 11_796 and should be strictly - /// greater than the previous value. T is the new value (rational number), - /// the the parameter is calculated as [65535 * T]. For example, 1% would be - /// [0.01 * 65535] = [655.35] = 655 - /// - /// # Event: - /// * TakeIncreased; - /// - On successfully setting a increased take for this hotkey. - /// - /// # Raises: - /// * 'NotRegistered': - /// - The hotkey we are delegating is not registered on the network. - /// - /// * 'NonAssociatedColdKey': - /// - The hotkey we are delegating is not owned by the calling coldkey. - /// - /// * 'DelegateTakeTooHigh': - /// - The delegate is setting a take which is not greater than the previous. - /// - #[pallet::call_index(66)] - #[pallet::weight((0, DispatchClass::Normal, Pays::No))] - pub fn increase_take( - origin: OriginFor, - hotkey: T::AccountId, - take: u16, - ) -> DispatchResult { - Self::do_increase_take(origin, hotkey, take) - } - - /// --- Adds stake to a hotkey. The call is made from the - /// coldkey account linked in the hotkey. - /// Only the associated coldkey is allowed to make staking and - /// unstaking requests. This protects the neuron against - /// attacks on its hotkey running in production code. - /// - /// # Args: - /// * 'origin': (Origin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The associated hotkey account. - /// - /// * 'amount_staked' (u64): - /// - The amount of stake to be added to the hotkey staking account. - /// - /// # Event: - /// * StakeAdded; - /// - On the successfully adding stake to a global account. - /// - /// # Raises: - /// * 'NotEnoughBalanceToStake': - /// - Not enough balance on the coldkey to add onto the global account. - /// - /// * 'NonAssociatedColdKey': - /// - The calling coldkey is not associated with this hotkey. - /// - /// * 'BalanceWithdrawalError': - /// - Errors stemming from transaction pallet. - /// - #[pallet::call_index(2)] - #[pallet::weight((Weight::from_parts(124_000_000, 0) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)), DispatchClass::Normal, Pays::No))] - pub fn add_stake( - origin: OriginFor, - hotkey: T::AccountId, - amount_staked: u64, - ) -> DispatchResult { - Self::do_add_stake(origin, hotkey, amount_staked) - } - - /// Remove stake from the staking account. The call must be made - /// from the coldkey account attached to the neuron metadata. Only this key - /// has permission to make staking and unstaking requests. - /// - /// # Args: - /// * 'origin': (Origin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The associated hotkey account. - /// - /// * 'amount_unstaked' (u64): - /// - The amount of stake to be added to the hotkey staking account. - /// - /// # Event: - /// * StakeRemoved; - /// - On the successfully removing stake from the hotkey account. - /// - /// # Raises: - /// * 'NotRegistered': - /// - Thrown if the account we are attempting to unstake from is non existent. - /// - /// * 'NonAssociatedColdKey': - /// - Thrown if the coldkey does not own the hotkey we are unstaking from. - /// - /// * 'NotEnoughStakeToWithdraw': - /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. - /// - #[pallet::call_index(3)] - #[pallet::weight((Weight::from_parts(111_000_000, 0) - .saturating_add(Weight::from_parts(0, 43991)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)), DispatchClass::Normal, Pays::No))] - pub fn remove_stake( - origin: OriginFor, - hotkey: T::AccountId, - amount_unstaked: u64, - ) -> DispatchResult { - Self::do_remove_stake(origin, hotkey, amount_unstaked) - } - - /// Serves or updates axon /promethteus information for the neuron associated with the caller. If the caller is - /// already registered the metadata is updated. If the caller is not registered this call throws NotRegistered. - /// - /// # Args: - /// * 'origin': (Origin): - /// - The signature of the caller. - /// - /// * 'netuid' (u16): - /// - The u16 network identifier. - /// - /// * 'version' (u64): - /// - The bittensor version identifier. - /// - /// * 'ip' (u64): - /// - The endpoint ip information as a u128 encoded integer. - /// - /// * 'port' (u16): - /// - The endpoint port information as a u16 encoded integer. - /// - /// * 'ip_type' (u8): - /// - The endpoint ip version as a u8, 4 or 6. - /// - /// * 'protocol' (u8): - /// - UDP:1 or TCP:0 - /// - /// * 'placeholder1' (u8): - /// - Placeholder for further extra params. - /// - /// * 'placeholder2' (u8): - /// - Placeholder for further extra params. - /// - /// # Event: - /// * AxonServed; - /// - On successfully serving the axon info. - /// - /// # Raises: - /// * 'SubNetworkDoesNotExist': - /// - Attempting to set weights on a non-existent network. - /// - /// * 'NotRegistered': - /// - Attempting to set weights from a non registered account. - /// - /// * 'InvalidIpType': - /// - The ip type is not 4 or 6. - /// - /// * 'InvalidIpAddress': - /// - The numerically encoded ip address does not resolve to a proper ip. - /// - /// * 'ServingRateLimitExceeded': - /// - Attempting to set prometheus information withing the rate limit min. - /// - #[pallet::call_index(4)] - #[pallet::weight((Weight::from_parts(46_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] - pub fn serve_axon( - origin: OriginFor, - netuid: u16, - version: u32, - ip: u128, - port: u16, - ip_type: u8, - protocol: u8, - placeholder1: u8, - placeholder2: u8, - ) -> DispatchResult { - Self::do_serve_axon( - origin, - netuid, - version, - ip, - port, - ip_type, - protocol, - placeholder1, - placeholder2, - ) - } - - /// ---- Set prometheus information for the neuron. - /// # Args: - /// * 'origin': (Origin): - /// - The signature of the calling hotkey. - /// - /// * 'netuid' (u16): - /// - The u16 network identifier. - /// - /// * 'version' (u16): - /// - The bittensor version identifier. - /// - /// * 'ip' (u128): - /// - The prometheus ip information as a u128 encoded integer. - /// - /// * 'port' (u16): - /// - The prometheus port information as a u16 encoded integer. - /// - /// * 'ip_type' (u8): - /// - The ip type v4 or v6. - /// - #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(45_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] - pub fn serve_prometheus( - origin: OriginFor, - netuid: u16, - version: u32, - ip: u128, - port: u16, - ip_type: u8, - ) -> DispatchResult { - Self::do_serve_prometheus(origin, netuid, version, ip, port, ip_type) - } - - /// ---- Registers a new neuron to the subnetwork. - /// - /// # Args: - /// * 'origin': (Origin): - /// - The signature of the calling hotkey. - /// - /// * 'netuid' (u16): - /// - The u16 network identifier. - /// - /// * 'block_number' ( u64 ): - /// - Block hash used to prove work done. - /// - /// * 'nonce' ( u64 ): - /// - Positive integer nonce used in POW. - /// - /// * 'work' ( Vec ): - /// - Vector encoded bytes representing work done. - /// - /// * 'hotkey' ( T::AccountId ): - /// - Hotkey to be registered to the network. - /// - /// * 'coldkey' ( T::AccountId ): - /// - Associated coldkey account. - /// - /// # Event: - /// * NeuronRegistered; - /// - On successfully registering a uid to a neuron slot on a subnetwork. - /// - /// # Raises: - /// * 'SubNetworkDoesNotExist': - /// - Attempting to register to a non existent network. - /// - /// * 'TooManyRegistrationsThisBlock': - /// - This registration exceeds the total allowed on this network this block. - /// - /// * 'HotKeyAlreadyRegisteredInSubNet': - /// - The hotkey is already registered on this network. - /// - /// * 'InvalidWorkBlock': - /// - The work has been performed on a stale, future, or non existent block. - /// - /// * 'InvalidDifficulty': - /// - The work does not match the difficulty. - /// - /// * 'InvalidSeal': - /// - The seal is incorrect. - /// - #[pallet::call_index(6)] - #[pallet::weight((Weight::from_parts(192_000_000, 0) - .saturating_add(T::DbWeight::get().reads(24)) - .saturating_add(T::DbWeight::get().writes(22)), DispatchClass::Normal, Pays::No))] - pub fn register( - origin: OriginFor, - netuid: u16, - block_number: u64, - nonce: u64, - work: Vec, - hotkey: T::AccountId, - coldkey: T::AccountId, - ) -> DispatchResult { - Self::do_registration(origin, netuid, block_number, nonce, work, hotkey, coldkey) - } - - /// Register the hotkey to root network - #[pallet::call_index(62)] - #[pallet::weight((Weight::from_parts(164_000_000, 0) - .saturating_add(T::DbWeight::get().reads(23)) - .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::No))] - pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { - Self::do_root_register(origin, hotkey) - } - - /// Attempt to adjust the senate membership to include a hotkey - #[pallet::call_index(63)] - #[pallet::weight((Weight::from_parts(0, 0) - .saturating_add(T::DbWeight::get().reads(0)) - .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Normal, Pays::Yes))] - pub fn adjust_senate(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { - Self::do_adjust_senate(origin, hotkey) - } - - /// User register a new subnetwork via burning token - #[pallet::call_index(7)] - #[pallet::weight((Weight::from_parts(177_000_000, 0) - .saturating_add(T::DbWeight::get().reads(26)) - .saturating_add(T::DbWeight::get().writes(24)), DispatchClass::Normal, Pays::No))] - pub fn burned_register( - origin: OriginFor, - netuid: u16, - hotkey: T::AccountId, - ) -> DispatchResult { - Self::do_burned_registration(origin, netuid, hotkey) - } - - /// The extrinsic for user to change its hotkey - ///#[pallet::call_index(70)] - ///#[pallet::weight((Weight::from_parts(1_940_000_000, 0) - ///.saturating_add(T::DbWeight::get().reads(272)) - ///.saturating_add(T::DbWeight::get().writes(527)), DispatchClass::Operational, Pays::No))] - ///pub fn swap_hotkey( - /// origin: OriginFor, - /// hotkey: T::AccountId, - /// new_hotkey: T::AccountId, - ///) -> DispatchResultWithPostInfo { - /// Self::do_swap_hotkey(origin, &hotkey, &new_hotkey) - ///} - - /// The extrinsic for user to change the coldkey associated with their account. - /// - /// # Arguments - /// - /// * `origin` - The origin of the call, must be signed by the old coldkey. - /// * `old_coldkey` - The current coldkey associated with the account. - /// * `new_coldkey` - The new coldkey to be associated with the account. - /// - /// # Returns - /// - /// Returns a `DispatchResultWithPostInfo` indicating success or failure of the operation. - /// - /// # Weight - /// - /// Weight is calculated based on the number of database reads and writes. - #[pallet::call_index(71)] - #[pallet::weight((Weight::from_parts(1_940_000_000, 0) - .saturating_add(T::DbWeight::get().reads(272)) - .saturating_add(T::DbWeight::get().writes(527)), DispatchClass::Operational, Pays::No))] - pub fn swap_coldkey( - origin: OriginFor, - new_coldkey: T::AccountId, - ) -> DispatchResultWithPostInfo { - Self::do_swap_coldkey(origin, &new_coldkey) - } - /// Unstakes all tokens associated with a hotkey and transfers them to a new coldkey. - /// - /// # Arguments - /// - /// * `origin` - The origin of the call, must be signed by the current coldkey. - /// * `hotkey` - The hotkey associated with the stakes to be unstaked. - /// * `new_coldkey` - The new coldkey to receive the unstaked tokens. - /// - /// # Returns - /// - /// Returns a `DispatchResult` indicating success or failure of the operation. - /// - /// # Weight - /// - /// Weight is calculated based on the number of database reads and writes. - #[cfg(test)] - #[pallet::call_index(72)] - #[pallet::weight((Weight::from_parts(21_000_000, 0) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Operational, Pays::No))] - pub fn schedule_coldkey_swap( - origin: OriginFor, - new_coldkey: T::AccountId, - work: Vec, - block_number: u64, - nonce: u64, - ) -> DispatchResult { - // Attain the calling coldkey from the origin. - let old_coldkey: T::AccountId = ensure_signed(origin)?; - Self::do_schedule_coldkey_swap(&old_coldkey, &new_coldkey, work, block_number, nonce) - } - - // ---- SUDO ONLY FUNCTIONS ------------------------------------------------------------ - - // ================================== - // ==== Parameter Sudo calls ======== - // ================================== - // Each function sets the corresponding hyper paramter on the specified network - // Args: - // * 'origin': (Origin): - // - The caller, must be sudo. - // - // * `netuid` (u16): - // - The network identifier. - // - // * `hyperparameter value` (u16): - // - The value of the hyper parameter. - // - - /// Authenticates a council proposal and dispatches a function call with `Root` origin. - /// - /// The dispatch origin for this call must be a council majority. - /// - /// ## Complexity - /// - O(1). - #[pallet::call_index(51)] - #[pallet::weight((Weight::from_parts(0, 0), DispatchClass::Operational, Pays::No))] - pub fn sudo( - origin: OriginFor, - call: Box, - ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is a council majority. - T::CouncilOrigin::ensure_origin(origin)?; - - let result = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - let error = result.map(|_| ()).map_err(|e| e.error); - Self::deposit_event(Event::Sudid(error)); - - return result; - } - - /// Authenticates a council proposal and dispatches a function call with `Root` origin. - /// This function does not check the weight of the call, and instead allows the - /// user to specify the weight of the call. - /// - /// The dispatch origin for this call must be a council majority. - /// - /// ## Complexity - /// - O(1). - #[allow(deprecated)] - #[pallet::call_index(52)] - #[pallet::weight((*weight, call.get_dispatch_info().class, Pays::No))] - pub fn sudo_unchecked_weight( - origin: OriginFor, - call: Box, - weight: Weight, - ) -> DispatchResultWithPostInfo { - // We dont need to check the weight witness, suppress warning. - // See https://github.com/paritytech/polkadot-sdk/pull/1818. - let _ = weight; - - // This is a public call, so we ensure that the origin is a council majority. - T::CouncilOrigin::ensure_origin(origin)?; - - let result = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - let error = result.map(|_| ()).map_err(|e| e.error); - Self::deposit_event(Event::Sudid(error)); - - return result; - } - - /// User vote on a proposal - #[pallet::call_index(55)] - #[pallet::weight((Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().reads(0)) - .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Operational))] - pub fn vote( - origin: OriginFor, - hotkey: T::AccountId, - proposal: T::Hash, - #[pallet::compact] index: u32, - approve: bool, - ) -> DispatchResultWithPostInfo { - Self::do_vote_root(origin, &hotkey, proposal, index, approve) - } - - /// User register a new subnetwork - #[pallet::call_index(59)] - #[pallet::weight((Weight::from_parts(157_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) - .saturating_add(T::DbWeight::get().writes(30)), DispatchClass::Operational, Pays::No))] - pub fn register_network(origin: OriginFor) -> DispatchResult { - Self::user_add_network(origin) - } - - /// Facility extrinsic for user to get taken from faucet - /// It is only available when pow-faucet feature enabled - /// Just deployed in testnet and devnet for testing purpose - #[pallet::call_index(60)] - #[pallet::weight((Weight::from_parts(91_000_000, 0) - .saturating_add(T::DbWeight::get().reads(27)) - .saturating_add(T::DbWeight::get().writes(22)), DispatchClass::Normal, Pays::No))] - pub fn faucet( - origin: OriginFor, - block_number: u64, - nonce: u64, - work: Vec, - ) -> DispatchResult { - if cfg!(feature = "pow-faucet") { - return Self::do_faucet(origin, block_number, nonce, work); - } - - Err(Error::::FaucetDisabled.into()) - } - - /// Remove a user's subnetwork - /// The caller must be the owner of the network - #[pallet::call_index(61)] - #[pallet::weight((Weight::from_parts(119_000_000, 0) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] - pub fn dissolve_network(origin: OriginFor, netuid: u16) -> DispatchResult { - Self::user_remove_network(origin, netuid) - } - - /// Sets values for liquid alpha - #[pallet::call_index(64)] - #[pallet::weight((0, DispatchClass::Operational, Pays::No))] - pub fn sudo_hotfix_swap_coldkey_delegates( - _origin: OriginFor, - _old_coldkey: T::AccountId, - _new_coldkey: T::AccountId, - ) -> DispatchResult { - Ok(()) - } - } - // ---- Subtensor helper functions. impl Pallet { /// Returns the transaction priority for setting weights. @@ -2333,13 +1307,27 @@ pub enum CallType { Other, } +#[derive(Debug, PartialEq)] +pub enum CustomTransactionError { + ColdkeyInSwapSchedule, +} + +impl From for u8 { + fn from(variant: CustomTransactionError) -> u8 { + match variant { + CustomTransactionError::ColdkeyInSwapSchedule => 0, + } + } +} + #[freeze_struct("61e2b893d5ce6701")] #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] pub struct SubtensorSignedExtension(pub PhantomData); impl Default for SubtensorSignedExtension where - T::RuntimeCall: Dispatchable, + ::RuntimeCall: + Dispatchable, ::RuntimeCall: IsSubType>, { fn default() -> Self { @@ -2349,7 +1337,8 @@ where impl SubtensorSignedExtension where - T::RuntimeCall: Dispatchable, + ::RuntimeCall: + Dispatchable, ::RuntimeCall: IsSubType>, { pub fn new() -> Self { @@ -2380,14 +1369,15 @@ impl sp_std::fmt::Debug for SubtensorSignedE impl SignedExtension for SubtensorSignedExtension where - T::RuntimeCall: Dispatchable, + ::RuntimeCall: + Dispatchable, ::RuntimeCall: IsSubType>, ::RuntimeCall: IsSubType>, { const IDENTIFIER: &'static str = "SubtensorSignedExtension"; type AccountId = T::AccountId; - type Call = T::RuntimeCall; + type Call = ::RuntimeCall; type AdditionalSigned = (); type Pre = (CallType, u64, Self::AccountId); @@ -2402,18 +1392,6 @@ where _info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { - // Check if the call is one of the balance transfer types we want to reject - match call.is_sub_type() { - Some(BalancesCall::transfer_allow_death { .. }) - | Some(BalancesCall::transfer_keep_alive { .. }) - | Some(BalancesCall::transfer_all { .. }) => { - if Pallet::::coldkey_in_arbitration(who) { - return Err(TransactionValidityError::Invalid(InvalidTransaction::Call)); - } - } - _ => {} // Other Balances calls are allowed - } - match call.is_sub_type() { Some(Call::commit_weights { netuid, .. }) => { if Self::check_weights_min_stake(who) { @@ -2424,7 +1402,7 @@ where ..Default::default() }) } else { - Err(InvalidTransaction::Call.into()) + Err(InvalidTransaction::Custom(1).into()) } } Some(Call::reveal_weights { netuid, .. }) => { @@ -2436,7 +1414,7 @@ where ..Default::default() }) } else { - Err(InvalidTransaction::Call.into()) + Err(InvalidTransaction::Custom(2).into()) } } Some(Call::set_weights { netuid, .. }) => { @@ -2448,7 +1426,7 @@ where ..Default::default() }) } else { - Err(InvalidTransaction::Call.into()) + Err(InvalidTransaction::Custom(3).into()) } } Some(Call::set_root_weights { netuid, hotkey, .. }) => { @@ -2460,7 +1438,7 @@ where ..Default::default() }) } else { - Err(InvalidTransaction::Call.into()) + Err(InvalidTransaction::Custom(4).into()) } } Some(Call::add_stake { .. }) => Ok(ValidTransaction { @@ -2479,7 +1457,7 @@ where if registrations_this_interval >= (max_registrations_per_interval.saturating_mul(3)) { // If the registration limit for the interval is exceeded, reject the transaction - return InvalidTransaction::ExhaustsResources.into(); + return Err(InvalidTransaction::Custom(5).into()); } Ok(ValidTransaction { priority: Self::get_priority_vanilla(), @@ -2491,8 +1469,9 @@ where ..Default::default() }), Some(Call::dissolve_network { .. }) => { - if Pallet::::coldkey_in_arbitration(who) { - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + if ColdkeySwapScheduled::::contains_key(who) { + InvalidTransaction::Custom(CustomTransactionError::ColdkeyInSwapSchedule.into()) + .into() } else { Ok(ValidTransaction { priority: Self::get_priority_vanilla(), @@ -2500,10 +1479,25 @@ where }) } } - _ => Ok(ValidTransaction { - priority: Self::get_priority_vanilla(), - ..Default::default() - }), + _ => { + if let Some( + BalancesCall::transfer_keep_alive { .. } + | BalancesCall::transfer_all { .. } + | BalancesCall::transfer_allow_death { .. }, + ) = call.is_sub_type() + { + if ColdkeySwapScheduled::::contains_key(who) { + return InvalidTransaction::Custom( + CustomTransactionError::ColdkeyInSwapSchedule.into(), + ) + .into(); + } + } + Ok(ValidTransaction { + priority: Self::get_priority_vanilla(), + ..Default::default() + }) + } } } diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs new file mode 100644 index 000000000..414af7e6b --- /dev/null +++ b/pallets/subtensor/src/macros/config.rs @@ -0,0 +1,214 @@ +#![allow(clippy::crate_in_macro_def)] + +use frame_support::pallet_macros::pallet_section; +/// A [`pallet_section`] that defines the errors for a pallet. +/// This can later be imported into the pallet using [`import_section`]. +#[pallet_section] +mod config { + /// Configure the pallet by specifying the parameters and types on which it depends. + #[pallet::config] + pub trait Config: frame_system::Config { + /// call type + type RuntimeCall: Parameter + + Dispatchable + + From> + + IsType<::RuntimeCall> + + From>; + + /// Because this pallet emits events, it depends on the runtime's definition of an event. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// A sudo-able call. + type SudoRuntimeCall: Parameter + + UnfilteredDispatchable + + GetDispatchInfo; + + /// Origin checking for council majority + type CouncilOrigin: EnsureOrigin; + + /// Currency type that will be used to place deposits on neurons + type Currency: fungible::Balanced + + fungible::Mutate; + + /// Senate members with members management functions. + type SenateMembers: crate::MemberManagement; + + /// Interface to allow other pallets to control who can register identities + type TriumvirateInterface: crate::CollectiveInterface; + + /// The scheduler type used for scheduling delayed calls. + type Scheduler: ScheduleAnon< + BlockNumberFor, + LocalCallOf, + PalletsOriginOf, + Hasher = Self::Hashing, + >; + + /// the preimage to store the call data. + type Preimages: QueryPreimage + StorePreimage; + + /// ================================= + /// ==== Initial Value Constants ==== + /// ================================= + + /// Initial currency issuance. + #[pallet::constant] + type InitialIssuance: Get; + /// Initial min allowed weights setting. + #[pallet::constant] + type InitialMinAllowedWeights: Get; + /// Initial Emission Ratio. + #[pallet::constant] + type InitialEmissionValue: Get; + /// Initial max weight limit. + #[pallet::constant] + type InitialMaxWeightsLimit: Get; + /// Tempo for each network. + #[pallet::constant] + type InitialTempo: Get; + /// Initial Difficulty. + #[pallet::constant] + type InitialDifficulty: Get; + /// Initial Max Difficulty. + #[pallet::constant] + type InitialMaxDifficulty: Get; + /// Initial Min Difficulty. + #[pallet::constant] + type InitialMinDifficulty: Get; + /// Initial RAO Recycled. + #[pallet::constant] + type InitialRAORecycledForRegistration: Get; + /// Initial Burn. + #[pallet::constant] + type InitialBurn: Get; + /// Initial Max Burn. + #[pallet::constant] + type InitialMaxBurn: Get; + /// Initial Min Burn. + #[pallet::constant] + type InitialMinBurn: Get; + /// Initial adjustment interval. + #[pallet::constant] + type InitialAdjustmentInterval: Get; + /// Initial bonds moving average. + #[pallet::constant] + type InitialBondsMovingAverage: Get; + /// Initial target registrations per interval. + #[pallet::constant] + type InitialTargetRegistrationsPerInterval: Get; + /// Rho constant. + #[pallet::constant] + type InitialRho: Get; + /// Kappa constant. + #[pallet::constant] + type InitialKappa: Get; + /// Max UID constant. + #[pallet::constant] + type InitialMaxAllowedUids: Get; + /// Initial validator context pruning length. + #[pallet::constant] + type InitialValidatorPruneLen: Get; + /// Initial scaling law power. + #[pallet::constant] + type InitialScalingLawPower: Get; + /// Immunity Period Constant. + #[pallet::constant] + type InitialImmunityPeriod: Get; + /// Activity constant. + #[pallet::constant] + type InitialActivityCutoff: Get; + /// Initial max registrations per block. + #[pallet::constant] + type InitialMaxRegistrationsPerBlock: Get; + /// Initial pruning score for each neuron. + #[pallet::constant] + type InitialPruningScore: Get; + /// Initial maximum allowed validators per network. + #[pallet::constant] + type InitialMaxAllowedValidators: Get; + /// Initial default delegation take. + #[pallet::constant] + type InitialDefaultDelegateTake: Get; + /// Initial minimum delegation take. + #[pallet::constant] + type InitialMinDelegateTake: Get; + /// Initial default childkey take. + #[pallet::constant] + type InitialDefaultChildKeyTake: Get; + /// Initial minimum childkey take. + #[pallet::constant] + type InitialMinChildKeyTake: Get; + /// Initial maximum childkey take. + #[pallet::constant] + type InitialMaxChildKeyTake: Get; + /// Initial weights version key. + #[pallet::constant] + type InitialWeightsVersionKey: Get; + /// Initial serving rate limit. + #[pallet::constant] + type InitialServingRateLimit: Get; + /// Initial transaction rate limit. + #[pallet::constant] + type InitialTxRateLimit: Get; + /// Initial delegate take transaction rate limit. + #[pallet::constant] + type InitialTxDelegateTakeRateLimit: Get; + /// Initial childkey take transaction rate limit. + #[pallet::constant] + type InitialTxChildKeyTakeRateLimit: Get; + /// Initial percentage of total stake required to join senate. + #[pallet::constant] + type InitialSenateRequiredStakePercentage: Get; + /// Initial adjustment alpha on burn and pow. + #[pallet::constant] + type InitialAdjustmentAlpha: Get; + /// Initial network immunity period + #[pallet::constant] + type InitialNetworkImmunityPeriod: Get; + /// Initial minimum allowed network UIDs + #[pallet::constant] + type InitialNetworkMinAllowedUids: Get; + /// Initial network minimum burn cost + #[pallet::constant] + type InitialNetworkMinLockCost: Get; + /// Initial network subnet cut. + #[pallet::constant] + type InitialSubnetOwnerCut: Get; + /// Initial lock reduction interval. + #[pallet::constant] + type InitialNetworkLockReductionInterval: Get; + /// Initial max allowed subnets + #[pallet::constant] + type InitialSubnetLimit: Get; + /// Initial network creation rate limit + #[pallet::constant] + type InitialNetworkRateLimit: Get; + /// Initial target stakes per interval issuance. + #[pallet::constant] + type InitialTargetStakesPerInterval: Get; + /// Cost of swapping a hotkey. + #[pallet::constant] + type KeySwapCost: Get; + /// The upper bound for the alpha parameter. Used for Liquid Alpha. + #[pallet::constant] + type AlphaHigh: Get; + /// The lower bound for the alpha parameter. Used for Liquid Alpha. + #[pallet::constant] + type AlphaLow: Get; + /// A flag to indicate if Liquid Alpha is enabled. + #[pallet::constant] + type LiquidAlphaOn: Get; + /// Initial network max stake. + #[pallet::constant] + type InitialNetworkMaxStake: Get; + /// Initial hotkey emission tempo. + #[pallet::constant] + type InitialHotkeyEmissionTempo: Get; + /// Coldkey swap schedule duartion. + #[pallet::constant] + type InitialColdkeySwapScheduleDuration: Get>; + /// Dissolve network schedule duration + #[pallet::constant] + type InitialDissolveNetworkScheduleDuration: Get>; + } +} diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs new file mode 100644 index 000000000..a97e4494d --- /dev/null +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -0,0 +1,1214 @@ +use frame_support::pallet_macros::pallet_section; + +/// A [`pallet_section`] that defines the errors for a pallet. +/// This can later be imported into the pallet using [`import_section`]. +#[pallet_section] +mod dispatches { + use frame_support::traits::schedule::v3::Anon as ScheduleAnon; + use frame_support::traits::schedule::DispatchTime; + use frame_system::pallet_prelude::BlockNumberFor; + use sp_runtime::traits::Saturating; + /// Dispatchable functions allow users to interact with the pallet and invoke state changes. + /// These functions materialize as "extrinsics", which are often compared to transactions. + /// Dispatchable functions must be annotated with a weight and must return a DispatchResult. + #[pallet::call] + impl Pallet { + /// --- Sets the caller weights for the incentive mechanism. The call can be + /// made from the hotkey account so is potentially insecure, however, the damage + /// of changing weights is minimal if caught early. This function includes all the + /// checks that the passed weights meet the requirements. Stored as u16s they represent + /// rational values in the range [0,1] which sum to 1 and can be interpreted as + /// probabilities. The specific weights determine how inflation propagates outward + /// from this peer. + /// + /// Note: The 16 bit integers weights should represent 1.0 as the max u16. + /// However, the function normalizes all integers to u16_max anyway. This means that if the sum of all + /// elements is larger or smaller than the amount of elements * u16_max, all elements + /// will be corrected for this deviation. + /// + /// # Args: + /// * `origin`: (Origin): + /// - The caller, a hotkey who wishes to set their weights. + /// + /// * `netuid` (u16): + /// - The network uid we are setting these weights on. + /// + /// * `dests` (Vec): + /// - The edge endpoint for the weight, i.e. j for w_ij. + /// + /// * 'weights' (Vec): + /// - The u16 integer encoded weights. Interpreted as rational + /// values in the range [0,1]. They must sum to in32::MAX. + /// + /// * 'version_key' ( u64 ): + /// - The network version key to check if the validator is up to date. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + #[pallet::call_index(0)] + #[pallet::weight((Weight::from_parts(22_060_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4106)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn set_weights( + origin: OriginFor, + netuid: u16, + dests: Vec, + weights: Vec, + version_key: u64, + ) -> DispatchResult { + if !Self::get_commit_reveal_weights_enabled(netuid) { + return Self::do_set_weights(origin, netuid, dests, weights, version_key); + } + + Err(Error::::CommitRevealEnabled.into()) + } + + /// ---- Used to commit a hash of your weight values to later be revealed. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `commit_hash` (`H256`): + /// - The hash representing the committed weights. + /// + /// # Raises: + /// * `WeightsCommitNotAllowed`: + /// - Attempting to commit when it is not allowed. + /// + #[pallet::call_index(96)] + #[pallet::weight((Weight::from_parts(46_000_000, 0) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] + pub fn commit_weights( + origin: T::RuntimeOrigin, + netuid: u16, + commit_hash: H256, + ) -> DispatchResult { + Self::do_commit_weights(origin, netuid, commit_hash) + } + + /// ---- Used to reveal the weights for a previously committed hash. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the revealing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `uids` (`Vec`): + /// - The uids for the weights being revealed. + /// + /// * `values` (`Vec`): + /// - The values of the weights being revealed. + /// + /// * `salt` (`Vec`): + /// - The random salt to protect from brute-force guessing attack in case of small weight changes bit-wise. + /// + /// * `version_key` (`u64`): + /// - The network version key. + /// + /// # Raises: + /// * `NoWeightsCommitFound`: + /// - Attempting to reveal weights without an existing commit. + /// + /// * `InvalidRevealCommitHashNotMatchTempo`: + /// - Attempting to reveal weights outside the valid tempo. + /// + /// * `InvalidRevealCommitHashNotMatch`: + /// - The revealed hash does not match the committed hash. + /// + #[pallet::call_index(97)] + #[pallet::weight((Weight::from_parts(103_000_000, 0) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] + pub fn reveal_weights( + origin: T::RuntimeOrigin, + netuid: u16, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + Self::do_reveal_weights(origin, netuid, uids, values, salt, version_key) + } + + /// # Args: + /// * `origin`: (Origin): + /// - The caller, a hotkey who wishes to set their weights. + /// + /// * `netuid` (u16): + /// - The network uid we are setting these weights on. + /// + /// * `hotkey` (T::AccountId): + /// - The hotkey associated with the operation and the calling coldkey. + /// + /// * `dests` (Vec): + /// - The edge endpoint for the weight, i.e. j for w_ij. + /// + /// * 'weights' (Vec): + /// - The u16 integer encoded weights. Interpreted as rational + /// values in the range [0,1]. They must sum to in32::MAX. + /// + /// * 'version_key' ( u64 ): + /// - The network version key to check if the validator is up to date. + /// + /// # Event: + /// + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// + /// * NonAssociatedColdKey; + /// - Attempting to set weights on a non-associated cold key. + /// + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRootSubnet': + /// - Attempting to set weights on a subnet that is not the root network. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'IncorrectWeightVersionKey': + /// - Attempting to set weights with the incorrect network version key. + /// + /// * 'SettingWeightsTooFast': + /// - Attempting to set weights too fast. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + /// + #[pallet::call_index(8)] + #[pallet::weight((Weight::from_parts(10_151_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4104)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn set_root_weights( + origin: OriginFor, + netuid: u16, + hotkey: T::AccountId, + dests: Vec, + weights: Vec, + version_key: u64, + ) -> DispatchResult { + Self::do_set_root_weights(origin, netuid, hotkey, dests, weights, version_key) + } + + /// --- Sets the key as a delegate. + /// + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The hotkey we are delegating (must be owned by the coldkey.) + /// + /// * 'take' (u64): + /// - The stake proportion that this hotkey takes from delegations. + /// + /// # Event: + /// * DelegateAdded; + /// - On successfully setting a hotkey as a delegate. + /// + /// # Raises: + /// * 'NotRegistered': + /// - The hotkey we are delegating is not registered on the network. + /// + /// * 'NonAssociatedColdKey': + /// - The hotkey we are delegating is not owned by the calling coldket. + /// + #[pallet::call_index(1)] + #[pallet::weight((Weight::from_parts(79_000_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] + pub fn become_delegate(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { + Self::do_become_delegate(origin, hotkey, Self::get_default_delegate_take()) + } + + /// --- Allows delegates to decrease its take value. + /// + /// # Args: + /// * 'origin': (::Origin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The hotkey we are delegating (must be owned by the coldkey.) + /// + /// * 'netuid' (u16): + /// - Subnet ID to decrease take for + /// + /// * 'take' (u16): + /// - The new stake proportion that this hotkey takes from delegations. + /// The new value can be between 0 and 11_796 and should be strictly + /// lower than the previous value. It T is the new value (rational number), + /// the the parameter is calculated as [65535 * T]. For example, 1% would be + /// [0.01 * 65535] = [655.35] = 655 + /// + /// # Event: + /// * TakeDecreased; + /// - On successfully setting a decreased take for this hotkey. + /// + /// # Raises: + /// * 'NotRegistered': + /// - The hotkey we are delegating is not registered on the network. + /// + /// * 'NonAssociatedColdKey': + /// - The hotkey we are delegating is not owned by the calling coldkey. + /// + /// * 'DelegateTakeTooLow': + /// - The delegate is setting a take which is not lower than the previous. + /// + #[pallet::call_index(65)] + #[pallet::weight((0, DispatchClass::Normal, Pays::No))] + pub fn decrease_take( + origin: OriginFor, + hotkey: T::AccountId, + take: u16, + ) -> DispatchResult { + Self::do_decrease_take(origin, hotkey, take) + } + + /// --- Allows delegates to increase its take value. This call is rate-limited. + /// + /// # Args: + /// * 'origin': (::Origin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The hotkey we are delegating (must be owned by the coldkey.) + /// + /// * 'take' (u16): + /// - The new stake proportion that this hotkey takes from delegations. + /// The new value can be between 0 and 11_796 and should be strictly + /// greater than the previous value. T is the new value (rational number), + /// the the parameter is calculated as [65535 * T]. For example, 1% would be + /// [0.01 * 65535] = [655.35] = 655 + /// + /// # Event: + /// * TakeIncreased; + /// - On successfully setting a increased take for this hotkey. + /// + /// # Raises: + /// * 'NotRegistered': + /// - The hotkey we are delegating is not registered on the network. + /// + /// * 'NonAssociatedColdKey': + /// - The hotkey we are delegating is not owned by the calling coldkey. + /// + /// * 'DelegateTakeTooHigh': + /// - The delegate is setting a take which is not greater than the previous. + /// + #[pallet::call_index(66)] + #[pallet::weight((0, DispatchClass::Normal, Pays::No))] + pub fn increase_take( + origin: OriginFor, + hotkey: T::AccountId, + take: u16, + ) -> DispatchResult { + Self::do_increase_take(origin, hotkey, take) + } + + /// --- Adds stake to a hotkey. The call is made from the + /// coldkey account linked in the hotkey. + /// Only the associated coldkey is allowed to make staking and + /// unstaking requests. This protects the neuron against + /// attacks on its hotkey running in production code. + /// + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + /// + /// * 'amount_staked' (u64): + /// - The amount of stake to be added to the hotkey staking account. + /// + /// # Event: + /// * StakeAdded; + /// - On the successfully adding stake to a global account. + /// + /// # Raises: + /// * 'NotEnoughBalanceToStake': + /// - Not enough balance on the coldkey to add onto the global account. + /// + /// * 'NonAssociatedColdKey': + /// - The calling coldkey is not associated with this hotkey. + /// + /// * 'BalanceWithdrawalError': + /// - Errors stemming from transaction pallet. + /// + #[pallet::call_index(2)] + #[pallet::weight((Weight::from_parts(124_000_000, 0) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)), DispatchClass::Normal, Pays::No))] + pub fn add_stake( + origin: OriginFor, + hotkey: T::AccountId, + amount_staked: u64, + ) -> DispatchResult { + Self::do_add_stake(origin, hotkey, amount_staked) + } + + /// Remove stake from the staking account. The call must be made + /// from the coldkey account attached to the neuron metadata. Only this key + /// has permission to make staking and unstaking requests. + /// + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + /// + /// * 'amount_unstaked' (u64): + /// - The amount of stake to be added to the hotkey staking account. + /// + /// # Event: + /// * StakeRemoved; + /// - On the successfully removing stake from the hotkey account. + /// + /// # Raises: + /// * 'NotRegistered': + /// - Thrown if the account we are attempting to unstake from is non existent. + /// + /// * 'NonAssociatedColdKey': + /// - Thrown if the coldkey does not own the hotkey we are unstaking from. + /// + /// * 'NotEnoughStakeToWithdraw': + /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. + /// + #[pallet::call_index(3)] + #[pallet::weight((Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 43991)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)), DispatchClass::Normal, Pays::No))] + pub fn remove_stake( + origin: OriginFor, + hotkey: T::AccountId, + amount_unstaked: u64, + ) -> DispatchResult { + Self::do_remove_stake(origin, hotkey, amount_unstaked) + } + + /// Serves or updates axon /promethteus information for the neuron associated with the caller. If the caller is + /// already registered the metadata is updated. If the caller is not registered this call throws NotRegistered. + /// + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the caller. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'version' (u64): + /// - The bittensor version identifier. + /// + /// * 'ip' (u64): + /// - The endpoint ip information as a u128 encoded integer. + /// + /// * 'port' (u16): + /// - The endpoint port information as a u16 encoded integer. + /// + /// * 'ip_type' (u8): + /// - The endpoint ip version as a u8, 4 or 6. + /// + /// * 'protocol' (u8): + /// - UDP:1 or TCP:0 + /// + /// * 'placeholder1' (u8): + /// - Placeholder for further extra params. + /// + /// * 'placeholder2' (u8): + /// - Placeholder for further extra params. + /// + /// # Event: + /// * AxonServed; + /// - On successfully serving the axon info. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'InvalidIpType': + /// - The ip type is not 4 or 6. + /// + /// * 'InvalidIpAddress': + /// - The numerically encoded ip address does not resolve to a proper ip. + /// + /// * 'ServingRateLimitExceeded': + /// - Attempting to set prometheus information withing the rate limit min. + /// + #[pallet::call_index(4)] + #[pallet::weight((Weight::from_parts(46_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] + pub fn serve_axon( + origin: OriginFor, + netuid: u16, + version: u32, + ip: u128, + port: u16, + ip_type: u8, + protocol: u8, + placeholder1: u8, + placeholder2: u8, + ) -> DispatchResult { + Self::do_serve_axon( + origin, + netuid, + version, + ip, + port, + ip_type, + protocol, + placeholder1, + placeholder2, + ) + } + + /// ---- Set prometheus information for the neuron. + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'version' (u16): + /// - The bittensor version identifier. + /// + /// * 'ip' (u128): + /// - The prometheus ip information as a u128 encoded integer. + /// + /// * 'port' (u16): + /// - The prometheus port information as a u16 encoded integer. + /// + /// * 'ip_type' (u8): + /// - The ip type v4 or v6. + /// + #[pallet::call_index(5)] + #[pallet::weight((Weight::from_parts(45_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] + pub fn serve_prometheus( + origin: OriginFor, + netuid: u16, + version: u32, + ip: u128, + port: u16, + ip_type: u8, + ) -> DispatchResult { + Self::do_serve_prometheus(origin, netuid, version, ip, port, ip_type) + } + + /// ---- Registers a new neuron to the subnetwork. + /// + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'block_number' ( u64 ): + /// - Block hash used to prove work done. + /// + /// * 'nonce' ( u64 ): + /// - Positive integer nonce used in POW. + /// + /// * 'work' ( Vec ): + /// - Vector encoded bytes representing work done. + /// + /// * 'hotkey' ( T::AccountId ): + /// - Hotkey to be registered to the network. + /// + /// * 'coldkey' ( T::AccountId ): + /// - Associated coldkey account. + /// + /// # Event: + /// * NeuronRegistered; + /// - On successfully registering a uid to a neuron slot on a subnetwork. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to register to a non existent network. + /// + /// * 'TooManyRegistrationsThisBlock': + /// - This registration exceeds the total allowed on this network this block. + /// + /// * 'HotKeyAlreadyRegisteredInSubNet': + /// - The hotkey is already registered on this network. + /// + /// * 'InvalidWorkBlock': + /// - The work has been performed on a stale, future, or non existent block. + /// + /// * 'InvalidDifficulty': + /// - The work does not match the difficulty. + /// + /// * 'InvalidSeal': + /// - The seal is incorrect. + /// + #[pallet::call_index(6)] + #[pallet::weight((Weight::from_parts(192_000_000, 0) + .saturating_add(T::DbWeight::get().reads(24)) + .saturating_add(T::DbWeight::get().writes(22)), DispatchClass::Normal, Pays::No))] + pub fn register( + origin: OriginFor, + netuid: u16, + block_number: u64, + nonce: u64, + work: Vec, + hotkey: T::AccountId, + coldkey: T::AccountId, + ) -> DispatchResult { + Self::do_registration(origin, netuid, block_number, nonce, work, hotkey, coldkey) + } + + /// Register the hotkey to root network + #[pallet::call_index(62)] + #[pallet::weight((Weight::from_parts(164_000_000, 0) + .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::No))] + pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { + Self::do_root_register(origin, hotkey) + } + + /// Attempt to adjust the senate membership to include a hotkey + #[pallet::call_index(63)] + #[pallet::weight((Weight::from_parts(0, 0) + .saturating_add(T::DbWeight::get().reads(0)) + .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Normal, Pays::Yes))] + pub fn adjust_senate(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { + Self::do_adjust_senate(origin, hotkey) + } + + /// User register a new subnetwork via burning token + #[pallet::call_index(7)] + #[pallet::weight((Weight::from_parts(177_000_000, 0) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(24)), DispatchClass::Normal, Pays::No))] + pub fn burned_register( + origin: OriginFor, + netuid: u16, + hotkey: T::AccountId, + ) -> DispatchResult { + Self::do_burned_registration(origin, netuid, hotkey) + } + + /// The extrinsic for user to change its hotkey + #[pallet::call_index(70)] + #[pallet::weight((Weight::from_parts(1_940_000_000, 0) + .saturating_add(T::DbWeight::get().reads(272)) + .saturating_add(T::DbWeight::get().writes(527)), DispatchClass::Operational, Pays::No))] + pub fn swap_hotkey( + origin: OriginFor, + hotkey: T::AccountId, + new_hotkey: T::AccountId, + ) -> DispatchResultWithPostInfo { + Self::do_swap_hotkey(origin, &hotkey, &new_hotkey) + } + + /// The extrinsic for user to change the coldkey associated with their account. + /// + /// # Arguments + /// + /// * `origin` - The origin of the call, must be signed by the old coldkey. + /// * `old_coldkey` - The current coldkey associated with the account. + /// * `new_coldkey` - The new coldkey to be associated with the account. + /// + /// # Returns + /// + /// Returns a `DispatchResultWithPostInfo` indicating success or failure of the operation. + /// + /// # Weight + /// + /// Weight is calculated based on the number of database reads and writes. + #[pallet::call_index(71)] + #[pallet::weight((Weight::from_parts(127_713_000, 0) + .saturating_add(Weight::from_parts(0, 11645)) + .saturating_add(T::DbWeight::get().reads(18)) + .saturating_add(T::DbWeight::get().writes(12)), DispatchClass::Operational, Pays::No))] + pub fn swap_coldkey( + origin: OriginFor, + old_coldkey: T::AccountId, + new_coldkey: T::AccountId, + ) -> DispatchResultWithPostInfo { + // Ensure it's called with root privileges (scheduler has root privileges) + ensure_root(origin)?; + log::info!("swap_coldkey: {:?} -> {:?}", old_coldkey, new_coldkey); + + Self::do_swap_coldkey(&old_coldkey, &new_coldkey) + } + + /// Sets the childkey take for a given hotkey. + /// + /// This function allows a coldkey to set the childkey take for a given hotkey. + /// The childkey take determines the proportion of stake that the hotkey keeps for itself + /// when distributing stake to its children. + /// + /// # Arguments: + /// * `origin` (::RuntimeOrigin): + /// - The signature of the calling coldkey. Setting childkey take can only be done by the coldkey. + /// + /// * `hotkey` (T::AccountId): + /// - The hotkey for which the childkey take will be set. + /// + /// * `take` (u16): + /// - The new childkey take value. This is a percentage represented as a value between 0 and 10000, + /// where 10000 represents 100%. + /// + /// # Events: + /// * `ChildkeyTakeSet`: + /// - On successfully setting the childkey take for a hotkey. + /// + /// # Errors: + /// * `NonAssociatedColdKey`: + /// - The coldkey does not own the hotkey. + /// * `InvalidChildkeyTake`: + /// - The provided take value is invalid (greater than the maximum allowed take). + /// * `TxChildkeyTakeRateLimitExceeded`: + /// - The rate limit for changing childkey take has been exceeded. + /// + #[pallet::call_index(75)] + #[pallet::weight(( + Weight::from_parts(34_000, 0) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)), + DispatchClass::Normal, + Pays::Yes +))] + pub fn set_childkey_take( + origin: OriginFor, + hotkey: T::AccountId, + netuid: u16, + take: u16, + ) -> DispatchResult { + let coldkey = ensure_signed(origin)?; + + // Call the utility function to set the childkey take + Self::do_set_childkey_take(coldkey, hotkey, netuid, take) + } + + // ---- SUDO ONLY FUNCTIONS ------------------------------------------------------------ + + /// Sets the transaction rate limit for changing childkey take. + /// + /// This function can only be called by the root origin. + /// + /// # Arguments: + /// * `origin` - The origin of the call, must be root. + /// * `tx_rate_limit` - The new rate limit in blocks. + /// + /// # Errors: + /// * `BadOrigin` - If the origin is not root. + /// + #[pallet::call_index(69)] + #[pallet::weight(( + Weight::from_parts(6_000, 0) + .saturating_add(T::DbWeight::get().writes(1)), + DispatchClass::Operational, + Pays::No +))] + pub fn sudo_set_tx_childkey_take_rate_limit( + origin: OriginFor, + tx_rate_limit: u64, + ) -> DispatchResult { + ensure_root(origin)?; + Self::set_tx_childkey_take_rate_limit(tx_rate_limit); + Ok(()) + } + + /// Sets the minimum allowed childkey take. + /// + /// This function can only be called by the root origin. + /// + /// # Arguments: + /// * `origin` - The origin of the call, must be root. + /// * `take` - The new minimum childkey take value. + /// + /// # Errors: + /// * `BadOrigin` - If the origin is not root. + /// + #[pallet::call_index(76)] + #[pallet::weight(( + Weight::from_parts(6_000, 0) + .saturating_add(T::DbWeight::get().writes(1)), + DispatchClass::Operational, + Pays::No + ))] + pub fn sudo_set_min_childkey_take(origin: OriginFor, take: u16) -> DispatchResult { + ensure_root(origin)?; + Self::set_min_childkey_take(take); + Ok(()) + } + + /// Sets the maximum allowed childkey take. + /// + /// This function can only be called by the root origin. + /// + /// # Arguments: + /// * `origin` - The origin of the call, must be root. + /// * `take` - The new maximum childkey take value. + /// + /// # Errors: + /// * `BadOrigin` - If the origin is not root. + /// + #[pallet::call_index(77)] + #[pallet::weight(( + Weight::from_parts(6_000, 0) + .saturating_add(T::DbWeight::get().writes(1)), + DispatchClass::Operational, + Pays::No + ))] + pub fn sudo_set_max_childkey_take(origin: OriginFor, take: u16) -> DispatchResult { + ensure_root(origin)?; + Self::set_max_childkey_take(take); + Ok(()) + } + // ================================== + // ==== Parameter Sudo calls ======== + // ================================== + // Each function sets the corresponding hyper paramter on the specified network + // Args: + // * 'origin': (Origin): + // - The caller, must be sudo. + // + // * `netuid` (u16): + // - The network identifier. + // + // * `hyperparameter value` (u16): + // - The value of the hyper parameter. + // + + /// Authenticates a council proposal and dispatches a function call with `Root` origin. + /// + /// The dispatch origin for this call must be a council majority. + /// + /// ## Complexity + /// - O(1). + #[pallet::call_index(51)] + #[pallet::weight((Weight::from_parts(0, 0), DispatchClass::Operational, Pays::No))] + pub fn sudo( + origin: OriginFor, + call: Box, + ) -> DispatchResultWithPostInfo { + // This is a public call, so we ensure that the origin is a council majority. + T::CouncilOrigin::ensure_origin(origin)?; + + let result = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); + let error = result.map(|_| ()).map_err(|e| e.error); + Self::deposit_event(Event::Sudid(error)); + + return result; + } + + /// Authenticates a council proposal and dispatches a function call with `Root` origin. + /// This function does not check the weight of the call, and instead allows the + /// user to specify the weight of the call. + /// + /// The dispatch origin for this call must be a council majority. + /// + /// ## Complexity + /// - O(1). + #[allow(deprecated)] + #[pallet::call_index(52)] + #[pallet::weight((*weight, call.get_dispatch_info().class, Pays::No))] + pub fn sudo_unchecked_weight( + origin: OriginFor, + call: Box, + weight: Weight, + ) -> DispatchResultWithPostInfo { + // We dont need to check the weight witness, suppress warning. + // See https://github.com/paritytech/polkadot-sdk/pull/1818. + let _ = weight; + + // This is a public call, so we ensure that the origin is a council majority. + T::CouncilOrigin::ensure_origin(origin)?; + + let result = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); + let error = result.map(|_| ()).map_err(|e| e.error); + Self::deposit_event(Event::Sudid(error)); + + return result; + } + + /// User vote on a proposal + #[pallet::call_index(55)] + #[pallet::weight((Weight::from_parts(0, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().reads(0)) + .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Operational))] + pub fn vote( + origin: OriginFor, + hotkey: T::AccountId, + proposal: T::Hash, + #[pallet::compact] index: u32, + approve: bool, + ) -> DispatchResultWithPostInfo { + Self::do_vote_root(origin, &hotkey, proposal, index, approve) + } + + /// User register a new subnetwork + #[pallet::call_index(59)] + #[pallet::weight((Weight::from_parts(157_000_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(30)), DispatchClass::Operational, Pays::No))] + pub fn register_network(origin: OriginFor) -> DispatchResult { + Self::user_add_network(origin, None) + } + + /// Facility extrinsic for user to get taken from faucet + /// It is only available when pow-faucet feature enabled + /// Just deployed in testnet and devnet for testing purpose + #[pallet::call_index(60)] + #[pallet::weight((Weight::from_parts(91_000_000, 0) + .saturating_add(T::DbWeight::get().reads(27)) + .saturating_add(T::DbWeight::get().writes(22)), DispatchClass::Normal, Pays::No))] + pub fn faucet( + origin: OriginFor, + block_number: u64, + nonce: u64, + work: Vec, + ) -> DispatchResult { + if cfg!(feature = "pow-faucet") { + return Self::do_faucet(origin, block_number, nonce, work); + } + + Err(Error::::FaucetDisabled.into()) + } + + /// Remove a user's subnetwork + /// The caller must be the owner of the network + #[pallet::call_index(61)] + #[pallet::weight((Weight::from_parts(119_000_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] + pub fn dissolve_network( + origin: OriginFor, + coldkey: T::AccountId, + netuid: u16, + ) -> DispatchResult { + ensure_root(origin)?; + Self::user_remove_network(coldkey, netuid) + } + + /// Set a single child for a given hotkey on a specified network. + /// + /// This function allows a coldkey to set a single child for a given hotkey on a specified network. + /// The proportion of the hotkey's stake to be allocated to the child is also specified. + /// + /// # Arguments: + /// * `origin` (::RuntimeOrigin): + /// - The signature of the calling coldkey. Setting a hotkey child can only be done by the coldkey. + /// + /// * `hotkey` (T::AccountId): + /// - The hotkey which will be assigned the child. + /// + /// * `child` (T::AccountId): + /// - The child which will be assigned to the hotkey. + /// + /// * `netuid` (u16): + /// - The u16 network identifier where the childkey will exist. + /// + /// * `proportion` (u64): + /// - Proportion of the hotkey's stake to be given to the child, the value must be u64 normalized. + /// + /// # Events: + /// * `ChildAddedSingular`: + /// - On successfully registering a child to a hotkey. + /// + /// # Errors: + /// * `SubNetworkDoesNotExist`: + /// - Attempting to register to a non-existent network. + /// * `RegistrationNotPermittedOnRootSubnet`: + /// - Attempting to register a child on the root network. + /// * `NonAssociatedColdKey`: + /// - The coldkey does not own the hotkey or the child is the same as the hotkey. + /// * `HotKeyAccountNotExists`: + /// - The hotkey account does not exist. + /// + /// # Detailed Explanation of Checks: + /// 1. **Signature Verification**: Ensures that the caller has signed the transaction, verifying the coldkey. + /// 2. **Root Network Check**: Ensures that the delegation is not on the root network, as child hotkeys are not valid on the root. + /// 3. **Network Existence Check**: Ensures that the specified network exists. + /// 4. **Ownership Verification**: Ensures that the coldkey owns the hotkey. + /// 5. **Hotkey Account Existence Check**: Ensures that the hotkey account already exists. + /// 6. **Child-Hotkey Distinction**: Ensures that the child is not the same as the hotkey. + /// 7. **Old Children Cleanup**: Removes the hotkey from the parent list of its old children. + /// 8. **New Children Assignment**: Assigns the new child to the hotkey and updates the parent list for the new child. + // TODO: Benchmark this call + #[pallet::call_index(67)] + #[pallet::weight((Weight::from_parts(119_000_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::Yes))] + pub fn set_children( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + netuid: u16, + children: Vec<(u64, T::AccountId)>, + ) -> DispatchResultWithPostInfo { + Self::do_set_children(origin, hotkey, netuid, children)?; + Ok(().into()) + } + + /// Schedules a coldkey swap operation to be executed at a future block. + /// + /// This function allows a user to schedule the swapping of their coldkey to a new one + /// at a specified future block. The swap is not executed immediately but is scheduled + /// to occur at the specified block number. + /// + /// # Arguments + /// + /// * `origin` - The origin of the call, which should be signed by the current coldkey owner. + /// * `new_coldkey` - The account ID of the new coldkey that will replace the current one. + /// * `when` - The block number at which the coldkey swap should be executed. + /// + /// # Returns + /// + /// Returns a `DispatchResultWithPostInfo` indicating whether the scheduling was successful. + /// + /// # Errors + /// + /// This function may return an error if: + /// * The origin is not signed. + /// * The scheduling fails due to conflicts or system constraints. + /// + /// # Notes + /// + /// - The actual swap is not performed by this function. It merely schedules the swap operation. + /// - The weight of this call is set to a fixed value and may need adjustment based on benchmarking. + /// + /// # TODO + /// + /// - Implement proper weight calculation based on the complexity of the operation. + /// - Consider adding checks to prevent scheduling too far into the future. + /// TODO: Benchmark this call + #[pallet::call_index(73)] + #[pallet::weight((Weight::from_parts(119_000_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::Yes))] + pub fn schedule_swap_coldkey( + origin: OriginFor, + new_coldkey: T::AccountId, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!( + !ColdkeySwapScheduled::::contains_key(&who), + Error::::SwapAlreadyScheduled + ); + + let current_block: BlockNumberFor = >::block_number(); + let duration: BlockNumberFor = ColdkeySwapScheduleDuration::::get(); + let when: BlockNumberFor = current_block.saturating_add(duration); + + let call = Call::::swap_coldkey { + old_coldkey: who.clone(), + new_coldkey: new_coldkey.clone(), + }; + + let bound_call = T::Preimages::bound(LocalCallOf::::from(call.clone())) + .map_err(|_| Error::::FailedToSchedule)?; + + T::Scheduler::schedule( + DispatchTime::At(when), + None, + 63, + frame_system::RawOrigin::Root.into(), + bound_call, + ) + .map_err(|_| Error::::FailedToSchedule)?; + + ColdkeySwapScheduled::::insert(&who, ()); + // Emit the SwapScheduled event + Self::deposit_event(Event::ColdkeySwapScheduled { + old_coldkey: who.clone(), + new_coldkey: new_coldkey.clone(), + execution_block: when, + }); + + Ok(().into()) + } + + /// Schedule the dissolution of a network at a specified block number. + /// + /// # Arguments + /// + /// * `origin` - The origin of the call, must be signed by the sender. + /// * `netuid` - The u16 network identifier to be dissolved. + /// + /// # Returns + /// + /// Returns a `DispatchResultWithPostInfo` indicating success or failure of the operation. + /// + /// # Weight + /// + /// Weight is calculated based on the number of database reads and writes. + + #[pallet::call_index(74)] + #[pallet::weight((Weight::from_parts(119_000_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::Yes))] + pub fn schedule_dissolve_network( + origin: OriginFor, + netuid: u16, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + let current_block: BlockNumberFor = >::block_number(); + let duration: BlockNumberFor = DissolveNetworkScheduleDuration::::get(); + let when: BlockNumberFor = current_block.saturating_add(duration); + + let call = Call::::dissolve_network { + coldkey: who.clone(), + netuid, + }; + + let bound_call = T::Preimages::bound(LocalCallOf::::from(call.clone())) + .map_err(|_| Error::::FailedToSchedule)?; + + T::Scheduler::schedule( + DispatchTime::At(when), + None, + 63, + frame_system::RawOrigin::Root.into(), + bound_call, + ) + .map_err(|_| Error::::FailedToSchedule)?; + + // Emit the SwapScheduled event + Self::deposit_event(Event::DissolveNetworkScheduled { + account: who.clone(), + netuid, + execution_block: when, + }); + + Ok(().into()) + } + + /// ---- Set prometheus information for the neuron. + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'version' (u16): + /// - The bittensor version identifier. + /// + /// * 'ip' (u128): + /// - The prometheus ip information as a u128 encoded integer. + /// + /// * 'port' (u16): + /// - The prometheus port information as a u16 encoded integer. + /// + /// * 'ip_type' (u8): + /// - The ip type v4 or v6. + /// + #[pallet::call_index(68)] + #[pallet::weight((Weight::from_parts(45_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::Yes))] + pub fn set_identity( + origin: OriginFor, + name: Vec, + url: Vec, + image: Vec, + discord: Vec, + description: Vec, + additional: Vec, + ) -> DispatchResult { + Self::do_set_identity(origin, name, url, image, discord, description, additional) + } + + /// ---- Set the identity information for a subnet. + /// # Args: + /// * `origin` - (::Origin): + /// - The signature of the calling coldkey, which must be the owner of the subnet. + /// + /// * `netuid` (u16): + /// - The unique network identifier of the subnet. + /// + /// * `subnet_name` (Vec): + /// - The name of the subnet. + /// + /// * `github_repo` (Vec): + /// - The GitHub repository associated with the subnet identity. + /// + /// * `subnet_contact` (Vec): + /// - The contact information for the subnet. + #[pallet::call_index(78)] + #[pallet::weight((Weight::from_parts(45_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::Yes))] + pub fn set_subnet_identity( + origin: OriginFor, + netuid: u16, + subnet_name: Vec, + github_repo: Vec, + subnet_contact: Vec, + ) -> DispatchResult { + Self::do_set_subnet_identity(origin, netuid, subnet_name, github_repo, subnet_contact) + } + + /// User register a new subnetwork + #[pallet::call_index(79)] + #[pallet::weight((Weight::from_parts(157_000_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(30)), DispatchClass::Operational, Pays::No))] + pub fn register_network_with_identity( + origin: OriginFor, + identity: Option, + ) -> DispatchResult { + Self::user_add_network(origin, identity) + } + } +} diff --git a/pallets/subtensor/src/errors.rs b/pallets/subtensor/src/macros/errors.rs similarity index 89% rename from pallets/subtensor/src/errors.rs rename to pallets/subtensor/src/macros/errors.rs index f26a1cd55..22a0a6f89 100644 --- a/pallets/subtensor/src/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -158,5 +158,31 @@ mod errors { InsufficientBalanceToPerformColdkeySwap, /// The maximum number of coldkey destinations has been reached MaxColdkeyDestinationsReached, + /// Attempting to set an invalid child for a hotkey on a network. + InvalidChild, + /// Duplicate child when setting children. + DuplicateChild, + /// Proportion overflow when setting children. + ProportionOverflow, + /// Too many children MAX 5. + TooManyChildren, + /// Default transaction rate limit exceeded. + TxRateLimitExceeded, + /// Swap coldkey only callable by root. + SwapColdkeyOnlyCallableByRoot, + /// Swap already scheduled. + SwapAlreadyScheduled, + /// failed to swap coldkey + FailedToSchedule, + /// New coldkey is hotkey + NewColdKeyIsHotkey, + /// New coldkey is in arbitration + NewColdkeyIsInArbitration, + /// Childkey take is invalid. + InvalidChildkeyTake, + /// Childkey take rate limit exceeded. + TxChildkeyTakeRateLimitExceeded, + /// Invalid identity. + InvalidIdentity, } } diff --git a/pallets/subtensor/src/events.rs b/pallets/subtensor/src/macros/events.rs similarity index 83% rename from pallets/subtensor/src/events.rs rename to pallets/subtensor/src/macros/events.rs index a5fb90e3f..ac6b69012 100644 --- a/pallets/subtensor/src/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -83,6 +83,14 @@ mod events { TxRateLimitSet(u64), /// setting the delegate take transaction rate limit. TxDelegateTakeRateLimitSet(u64), + /// setting the childkey take transaction rate limit. + TxChildKeyTakeRateLimitSet(u64), + /// minimum childkey take set + MinChildKeyTakeSet(u16), + /// maximum childkey take set + MaxChildKeyTakeSet(u16), + /// childkey take set + ChildKeyTakeSet(T::AccountId, u16), /// a sudo call is done. Sudid(DispatchResult), /// registration is allowed/disallowed for a subnet. @@ -164,12 +172,37 @@ mod events { /// The account ID of the new coldkey new_coldkey: T::AccountId, /// The arbitration block for the coldkey swap - arbitration_block: u64, + execution_block: BlockNumberFor, }, /// The arbitration period has been extended ArbitrationPeriodExtended { /// The account ID of the coldkey coldkey: T::AccountId, }, + /// The children of a hotkey have been set + SetChildren(T::AccountId, u16, Vec<(u64, T::AccountId)>), + /// The hotkey emission tempo has been set + HotkeyEmissionTempoSet(u64), + /// The network maximum stake has been set + NetworkMaxStakeSet(u16, u64), + /// The identity of a coldkey has been set + ChainIdentitySet(T::AccountId), + /// The identity of a subnet has been set + SubnetIdentitySet(u16), + /// The identity of a subnet has been removed + SubnetIdentityRemoved(u16), + /// A dissolve network extrinsic scheduled. + DissolveNetworkScheduled { + /// The account ID schedule the dissolve network extrisnic + account: T::AccountId, + /// network ID will be dissolved + netuid: u16, + /// extrinsic execution block number + execution_block: BlockNumberFor, + }, + /// The duration of schedule coldkey swap has been set + ColdkeySwapScheduleDurationSet(BlockNumberFor), + /// The duration of dissolve network has been set + DissolveNetworkScheduleDurationSet(BlockNumberFor), } } diff --git a/pallets/subtensor/src/macros/genesis.rs b/pallets/subtensor/src/macros/genesis.rs new file mode 100644 index 000000000..7d3768a81 --- /dev/null +++ b/pallets/subtensor/src/macros/genesis.rs @@ -0,0 +1,163 @@ +use frame_support::pallet_macros::pallet_section; + +/// A [`pallet_section`] that defines the errors for a pallet. +/// This can later be imported into the pallet using [`import_section`]. +#[pallet_section] +mod genesis { + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + // Set initial total issuance from balances + TotalIssuance::::put(self.balances_issuance); + + // Subnet config values + let netuid: u16 = 3; + let tempo = 99; + let max_uids = 4096; + + // The functions for initializing new networks/setting defaults cannot be run directly from genesis functions like extrinsics would + // --- Set this network uid to alive. + NetworksAdded::::insert(netuid, true); + + // --- Fill tempo memory item. + Tempo::::insert(netuid, tempo); + + // --- Fill modality item. + // Only modality 0 exists (text) + NetworkModality::::insert(netuid, 0); + + // Make network parameters explicit. + if !Tempo::::contains_key(netuid) { + Tempo::::insert(netuid, Tempo::::get(netuid)); + } + if !Kappa::::contains_key(netuid) { + Kappa::::insert(netuid, Kappa::::get(netuid)); + } + if !Difficulty::::contains_key(netuid) { + Difficulty::::insert(netuid, Difficulty::::get(netuid)); + } + if !MaxAllowedUids::::contains_key(netuid) { + MaxAllowedUids::::insert(netuid, MaxAllowedUids::::get(netuid)); + } + if !ImmunityPeriod::::contains_key(netuid) { + ImmunityPeriod::::insert(netuid, ImmunityPeriod::::get(netuid)); + } + if !ActivityCutoff::::contains_key(netuid) { + ActivityCutoff::::insert(netuid, ActivityCutoff::::get(netuid)); + } + if !EmissionValues::::contains_key(netuid) { + EmissionValues::::insert(netuid, EmissionValues::::get(netuid)); + } + if !MaxWeightsLimit::::contains_key(netuid) { + MaxWeightsLimit::::insert(netuid, MaxWeightsLimit::::get(netuid)); + } + if !MinAllowedWeights::::contains_key(netuid) { + MinAllowedWeights::::insert(netuid, MinAllowedWeights::::get(netuid)); + } + if !RegistrationsThisInterval::::contains_key(netuid) { + RegistrationsThisInterval::::insert( + netuid, + RegistrationsThisInterval::::get(netuid), + ); + } + if !POWRegistrationsThisInterval::::contains_key(netuid) { + POWRegistrationsThisInterval::::insert( + netuid, + POWRegistrationsThisInterval::::get(netuid), + ); + } + if !BurnRegistrationsThisInterval::::contains_key(netuid) { + BurnRegistrationsThisInterval::::insert( + netuid, + BurnRegistrationsThisInterval::::get(netuid), + ); + } + + // Set max allowed uids + MaxAllowedUids::::insert(netuid, max_uids); + + let mut next_uid: u16 = 0; + + for (coldkey, hotkeys) in self.stakes.iter() { + for (hotkey, stake_uid) in hotkeys.iter() { + let (stake, uid) = stake_uid; + + // Expand Yuma Consensus with new position. + Rank::::mutate(netuid, |v| v.push(0)); + Trust::::mutate(netuid, |v| v.push(0)); + Active::::mutate(netuid, |v| v.push(true)); + Emission::::mutate(netuid, |v| v.push(0)); + Consensus::::mutate(netuid, |v| v.push(0)); + Incentive::::mutate(netuid, |v| v.push(0)); + Dividends::::mutate(netuid, |v| v.push(0)); + LastUpdate::::mutate(netuid, |v| v.push(0)); + PruningScores::::mutate(netuid, |v| v.push(0)); + ValidatorTrust::::mutate(netuid, |v| v.push(0)); + ValidatorPermit::::mutate(netuid, |v| v.push(false)); + + // Insert account information. + Keys::::insert(netuid, uid, hotkey.clone()); // Make hotkey - uid association. + Uids::::insert(netuid, hotkey.clone(), uid); // Make uid - hotkey association. + BlockAtRegistration::::insert(netuid, uid, 0); // Fill block at registration. + IsNetworkMember::::insert(hotkey.clone(), netuid, true); // Fill network is member. + + // Fill stake information. + Owner::::insert(hotkey.clone(), coldkey.clone()); + + TotalHotkeyStake::::insert(hotkey.clone(), stake); + TotalColdkeyStake::::insert( + coldkey.clone(), + TotalColdkeyStake::::get(coldkey).saturating_add(*stake), + ); + + // Update total issuance value + TotalIssuance::::put(TotalIssuance::::get().saturating_add(*stake)); + + Stake::::insert(hotkey.clone(), coldkey.clone(), stake); + + next_uid = next_uid.saturating_add(1); + } + } + + // Set correct length for Subnet neurons + SubnetworkN::::insert(netuid, next_uid); + + // --- Increase total network count. + TotalNetworks::::mutate(|n| *n = n.saturating_add(1)); + + // Get the root network uid. + let root_netuid: u16 = 0; + + // Set the root network as added. + NetworksAdded::::insert(root_netuid, true); + + // Increment the number of total networks. + TotalNetworks::::mutate(|n| *n = n.saturating_add(1)); + + // Set the number of validators to 1. + SubnetworkN::::insert(root_netuid, 0); + + // Set the maximum number to the number of senate members. + MaxAllowedUids::::insert(root_netuid, 64u16); + + // Set the maximum number to the number of validators to all members. + MaxAllowedValidators::::insert(root_netuid, 64u16); + + // Set the min allowed weights to zero, no weights restrictions. + MinAllowedWeights::::insert(root_netuid, 0); + + // Set the max weight limit to infitiy, no weight restrictions. + MaxWeightsLimit::::insert(root_netuid, u16::MAX); + + // Add default root tempo. + Tempo::::insert(root_netuid, 100); + + // Set the root network as open. + NetworkRegistrationAllowed::::insert(root_netuid, true); + + // Set target registrations for validators as 1 per block. + TargetRegistrationsPerInterval::::insert(root_netuid, 1); + } + } +} diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs new file mode 100644 index 000000000..76f140002 --- /dev/null +++ b/pallets/subtensor/src/macros/hooks.rs @@ -0,0 +1,83 @@ +use frame_support::pallet_macros::pallet_section; + +/// A [`pallet_section`] that defines the events for a pallet. +/// This can later be imported into the pallet using [`import_section`]. +#[pallet_section] +mod hooks { + // ================ + // ==== Hooks ===== + // ================ + #[pallet::hooks] + impl Hooks> for Pallet { + // ---- Called on the initialization of this pallet. (the order of on_finalize calls is determined in the runtime) + // + // # Args: + // * 'n': (BlockNumberFor): + // - The number of the block we are initializing. + fn on_initialize(_block_number: BlockNumberFor) -> Weight { + let block_step_result = Self::block_step(); + match block_step_result { + Ok(_) => { + // --- If the block step was successful, return the weight. + log::debug!("Successfully ran block step."); + Weight::from_parts(110_634_229_000_u64, 0) + .saturating_add(T::DbWeight::get().reads(8304_u64)) + .saturating_add(T::DbWeight::get().writes(110_u64)) + } + Err(e) => { + // --- If the block step was unsuccessful, return the weight anyway. + log::error!("Error while stepping block: {:?}", e); + Weight::from_parts(110_634_229_000_u64, 0) + .saturating_add(T::DbWeight::get().reads(8304_u64)) + .saturating_add(T::DbWeight::get().writes(110_u64)) + } + } + } + + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // --- Migrate storage + let mut weight = frame_support::weights::Weight::from_parts(0, 0); + + // Hex encoded foundation coldkey + let hex = hex_literal::hex![ + "feabaafee293d3b76dae304e2f9d885f77d2b17adab9e17e921b321eccd61c77" + ]; + weight = weight + // Initializes storage version (to 1) + .saturating_add(migrations::migrate_to_v1_separate_emission::migrate_to_v1_separate_emission::()) + // Storage version v1 -> v2 + .saturating_add(migrations::migrate_to_v2_fixed_total_stake::migrate_to_v2_fixed_total_stake::()) + // Doesn't check storage version. TODO: Remove after upgrade + .saturating_add(migrations::migrate_create_root_network::migrate_create_root_network::()) + // Storage version v2 -> v3 + .saturating_add(migrations::migrate_transfer_ownership_to_foundation::migrate_transfer_ownership_to_foundation::( + hex, + )) + // Storage version v3 -> v4 + .saturating_add(migrations::migrate_delete_subnet_21::migrate_delete_subnet_21::()) + // Storage version v4 -> v5 + .saturating_add(migrations::migrate_delete_subnet_3::migrate_delete_subnet_3::()) + // Doesn't check storage version. TODO: Remove after upgrade + // Storage version v5 -> v6 + .saturating_add(migrations::migrate_total_issuance::migrate_total_issuance::(false)) + // Populate OwnedHotkeys map for coldkey swap. Doesn't update storage vesion. + // Storage version v6 -> v7 + .saturating_add(migrations::migrate_populate_owned_hotkeys::migrate_populate_owned::()) + // Populate StakingHotkeys map for coldkey swap. Doesn't update storage vesion. + // Storage version v7 -> v8 + .saturating_add(migrations::migrate_populate_staking_hotkeys::migrate_populate_staking_hotkeys::()) + // Fix total coldkey stake. + // Storage version v8 -> v9 + .saturating_add(migrations::migrate_fix_total_coldkey_stake::migrate_fix_total_coldkey_stake::()) + // Migrate Delegate Ids on chain + .saturating_add(migrations::migrate_chain_identity::migrate_set_hotkey_identities::()); + weight + } + + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::check_accounting_invariants()?; + Ok(()) + } + } +} diff --git a/pallets/subtensor/src/macros/mod.rs b/pallets/subtensor/src/macros/mod.rs new file mode 100644 index 000000000..e491ec8c4 --- /dev/null +++ b/pallets/subtensor/src/macros/mod.rs @@ -0,0 +1,6 @@ +pub mod config; +pub mod dispatches; +pub mod errors; +pub mod events; +pub mod genesis; +pub mod hooks; diff --git a/pallets/subtensor/src/migration.rs b/pallets/subtensor/src/migration.rs deleted file mode 100644 index 866ff08fd..000000000 --- a/pallets/subtensor/src/migration.rs +++ /dev/null @@ -1,692 +0,0 @@ -use super::*; -use alloc::string::String; -use frame_support::traits::DefensiveResult; -use frame_support::{ - pallet_prelude::{Identity, OptionQuery}, - storage_alias, - traits::{fungible::Inspect as _, Get, GetStorageVersion, StorageVersion}, - weights::Weight, -}; -use log::info; -use sp_runtime::Saturating; -use sp_std::vec::Vec; - -// TODO (camfairchild): TEST MIGRATION - -const LOG_TARGET: &str = "loadedemissionmigration"; - -pub mod deprecated_loaded_emission_format { - use super::*; - - type AccountIdOf = ::AccountId; - - #[storage_alias] - pub(super) type LoadedEmission = - StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; -} - -/// Migrates and fixes the total coldkey stake. -/// -/// This function iterates through all staking hotkeys, calculates the total stake for each coldkey, -/// and updates the `TotalColdkeyStake` storage accordingly. The migration is only performed if the -/// on-chain storage version is 6. -/// -/// # Returns -/// The weight of the migration process. -pub fn do_migrate_fix_total_coldkey_stake() -> Weight { - // Initialize the weight with one read operation. - let mut weight = T::DbWeight::get().reads(1); - - // Clear everything from the map first, no limit (u32::MAX) - let removal_results = TotalColdkeyStake::::clear(u32::MAX, None); - // 1 read/write per removal - let entries_removed: u64 = removal_results.backend.into(); - weight = - weight.saturating_add(T::DbWeight::get().reads_writes(entries_removed, entries_removed)); - - // Iterate through all staking hotkeys. - for (coldkey, hotkey_vec) in StakingHotkeys::::iter() { - // Init the zero value. - let mut coldkey_stake_sum: u64 = 0; - weight = weight.saturating_add(T::DbWeight::get().reads(1)); - - // Calculate the total stake for the current coldkey. - for hotkey in hotkey_vec { - // Cant fail on retrieval. - coldkey_stake_sum = - coldkey_stake_sum.saturating_add(Stake::::get(hotkey, coldkey.clone())); - weight = weight.saturating_add(T::DbWeight::get().reads(1)); - } - // Update the `TotalColdkeyStake` storage with the calculated stake sum. - // Cant fail on insert. - TotalColdkeyStake::::insert(coldkey.clone(), coldkey_stake_sum); - weight = weight.saturating_add(T::DbWeight::get().writes(1)); - } - weight -} - -/// Migrates and fixes the total coldkey stake. -/// -/// This function checks if the migration has already run, and if not, it performs the migration -/// to fix the total coldkey stake. It also marks the migration as completed after running. -/// -/// # Returns -/// The weight of the migration process. -pub fn migrate_fix_total_coldkey_stake() -> Weight { - let migration_name = b"fix_total_coldkey_stake_v7".to_vec(); - - // Initialize the weight with one read operation. - let mut weight = T::DbWeight::get().reads(1); - - // Check if the migration has already run - if HasMigrationRun::::get(&migration_name) { - log::info!( - "Migration '{:?}' has already run. Skipping.", - migration_name - ); - return Weight::zero(); - } - - log::info!( - "Running migration '{}'", - String::from_utf8_lossy(&migration_name) - ); - - // Run the migration - weight = weight.saturating_add(do_migrate_fix_total_coldkey_stake::()); - - // Mark the migration as completed - HasMigrationRun::::insert(&migration_name, true); - weight = weight.saturating_add(T::DbWeight::get().writes(1)); - - // Set the storage version to 7 - StorageVersion::new(7).put::>(); - weight = weight.saturating_add(T::DbWeight::get().writes(1)); - - log::info!( - "Migration '{:?}' completed. Storage version set to 7.", - String::from_utf8_lossy(&migration_name) - ); - - // Return the migration weight. - weight -} -/// Performs migration to update the total issuance based on the sum of stakes and total balances. -/// This migration is applicable only if the current storage version is 5, after which it updates the storage version to 6. -/// -/// # Returns -/// Weight of the migration process. -pub fn migration5_total_issuance(test: bool) -> Weight { - let mut weight = T::DbWeight::get().reads(1); // Initialize migration weight - - // Execute migration if the current storage version is 5 - if Pallet::::on_chain_storage_version() == StorageVersion::new(5) || test { - // Calculate the sum of all stake values - let stake_sum: u64 = Stake::::iter().fold(0, |accumulator, (_, _, stake_value)| { - accumulator.saturating_add(stake_value) - }); - weight = weight - .saturating_add(T::DbWeight::get().reads_writes(Stake::::iter().count() as u64, 0)); - - // Calculate the sum of all stake values - let locked_sum: u64 = SubnetLocked::::iter() - .fold(0, |accumulator, (_, locked_value)| { - accumulator.saturating_add(locked_value) - }); - weight = weight.saturating_add( - T::DbWeight::get().reads_writes(SubnetLocked::::iter().count() as u64, 0), - ); - - // Retrieve the total balance sum - let total_balance = T::Currency::total_issuance(); - match TryInto::::try_into(total_balance) { - Ok(total_balance_sum) => { - weight = weight.saturating_add(T::DbWeight::get().reads(1)); - - // Compute the total issuance value - let total_issuance_value: u64 = stake_sum - .saturating_add(total_balance_sum) - .saturating_add(locked_sum); - - // Update the total issuance in storage - TotalIssuance::::put(total_issuance_value); - - // Update the storage version to 6 - StorageVersion::new(6).put::>(); - weight = weight.saturating_add(T::DbWeight::get().writes(1)); - } - Err(_) => { - log::error!("Failed to convert total balance to u64, bailing"); - } - } - } - - weight // Return the computed weight of the migration process -} - -pub fn migrate_transfer_ownership_to_foundation(coldkey: [u8; 32]) -> Weight { - let new_storage_version = 3; - - // Setup migration weight - let mut weight = T::DbWeight::get().reads(1); - - // Grab current version - let onchain_version = Pallet::::on_chain_storage_version(); - - // Only runs if we haven't already updated version past above new_storage_version. - if onchain_version < new_storage_version { - info!(target: LOG_TARGET_1, ">>> Migrating subnet 1 and 11 to foundation control {:?}", onchain_version); - - // We have to decode this using a byte slice as we don't have crypto-std - let coldkey_account: ::AccountId = - ::AccountId::decode(&mut &coldkey[..]) - .expect("coldkey is 32-byte array; qed"); - info!("Foundation coldkey: {:?}", coldkey_account); - - let current_block = Pallet::::get_current_block_as_u64(); - weight.saturating_accrue(T::DbWeight::get().reads(1)); - - // Migrate ownership and set creation time as now - SubnetOwner::::insert(1, coldkey_account.clone()); - SubnetOwner::::insert(11, coldkey_account); - - // We are setting the NetworkRegisteredAt storage to a future block to extend the immunity period to 2 weeks - NetworkRegisteredAt::::insert(1, current_block.saturating_add(13 * 7200)); - NetworkRegisteredAt::::insert(11, current_block); - - weight.saturating_accrue(T::DbWeight::get().writes(4)); - - // Update storage version. - StorageVersion::new(new_storage_version).put::>(); // Update to version so we don't run this again. - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - weight - } else { - info!(target: LOG_TARGET_1, "Migration to v3 already done!"); - Weight::zero() - } -} - -pub fn migrate_create_root_network() -> Weight { - // Get the root network uid. - let root_netuid: u16 = 0; - - // Setup migration weight - let mut weight = T::DbWeight::get().reads(1); - - // Check if root network already exists. - if NetworksAdded::::get(root_netuid) { - // Since we read from the database once to determine this - return weight; - } - - // Set the root network as added. - NetworksAdded::::insert(root_netuid, true); - - // Increment the number of total networks. - TotalNetworks::::mutate(|n| n.saturating_inc()); - - // Set the maximum number to the number of senate members. - MaxAllowedUids::::insert(root_netuid, 64); - - // Set the maximum number to the number of validators to all members. - MaxAllowedValidators::::insert(root_netuid, 64); - - // Set the min allowed weights to zero, no weights restrictions. - MinAllowedWeights::::insert(root_netuid, 0); - - // Set the max weight limit to infitiy, no weight restrictions. - MaxWeightsLimit::::insert(root_netuid, u16::MAX); - - // Add default root tempo. - Tempo::::insert(root_netuid, 100); - - // Set the root network as open. - NetworkRegistrationAllowed::::insert(root_netuid, true); - - // Set target registrations for validators as 1 per block. - TargetRegistrationsPerInterval::::insert(root_netuid, 1); - - // Set weight setting rate limit to 1 day - //WeightsSetRateLimit::::insert(root_netuid, 7200); - - // Add our weights for writing to database - weight.saturating_accrue(T::DbWeight::get().writes(8)); - - // Empty senate members entirely, they will be filled by by registrations - // on the subnet. - for hotkey_i in T::SenateMembers::members().iter() { - T::TriumvirateInterface::remove_votes(hotkey_i).defensive_ok(); - T::SenateMembers::remove_member(hotkey_i).defensive_ok(); - - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } - - weight -} - -pub fn migrate_delete_subnet_3() -> Weight { - let new_storage_version = 5; - - // Setup migration weight - let mut weight = T::DbWeight::get().reads(1); - - // Grab current version - let onchain_version = Pallet::::on_chain_storage_version(); - - // Only runs if we haven't already updated version past above new_storage_version. - if onchain_version < new_storage_version && Pallet::::if_subnet_exist(3) { - info!(target: LOG_TARGET_1, ">>> Removing subnet 3 {:?}", onchain_version); - - let netuid = 3; - - // We do this all manually as we don't want to call code related to giving subnet owner back their locked token cost. - // --- 2. Remove network count. - SubnetworkN::::remove(netuid); - - // --- 3. Remove network modality storage. - NetworkModality::::remove(netuid); - - // --- 4. Remove netuid from added networks. - NetworksAdded::::remove(netuid); - - // --- 6. Decrement the network counter. - TotalNetworks::::mutate(|n| n.saturating_dec()); - - // --- 7. Remove various network-related storages. - NetworkRegisteredAt::::remove(netuid); - - weight.saturating_accrue(T::DbWeight::get().writes(5)); - - // --- 8. Remove incentive mechanism memory. - let _ = Uids::::clear_prefix(netuid, u32::MAX, None); - let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); - - weight.saturating_accrue(T::DbWeight::get().writes(4)); - - // --- 9. Remove various network-related parameters. - Rank::::remove(netuid); - Trust::::remove(netuid); - Active::::remove(netuid); - Emission::::remove(netuid); - Incentive::::remove(netuid); - Consensus::::remove(netuid); - Dividends::::remove(netuid); - PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); - ValidatorPermit::::remove(netuid); - ValidatorTrust::::remove(netuid); - - weight.saturating_accrue(T::DbWeight::get().writes(11)); - - // --- 10. Erase network parameters. - Tempo::::remove(netuid); - Kappa::::remove(netuid); - Difficulty::::remove(netuid); - MaxAllowedUids::::remove(netuid); - ImmunityPeriod::::remove(netuid); - ActivityCutoff::::remove(netuid); - EmissionValues::::remove(netuid); - MaxWeightsLimit::::remove(netuid); - MinAllowedWeights::::remove(netuid); - RegistrationsThisInterval::::remove(netuid); - POWRegistrationsThisInterval::::remove(netuid); - BurnRegistrationsThisInterval::::remove(netuid); - - weight.saturating_accrue(T::DbWeight::get().writes(12)); - - // Update storage version. - StorageVersion::new(new_storage_version).put::>(); // Update version so we don't run this again. - // One write to storage version - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - weight - } else { - info!(target: LOG_TARGET_1, "Migration to v3 already done!"); - Weight::zero() - } -} - -pub fn migrate_delete_subnet_21() -> Weight { - let new_storage_version = 4; - - // Setup migration weight - let mut weight = T::DbWeight::get().reads(1); - - // Grab current version - let onchain_version = Pallet::::on_chain_storage_version(); - - // Only runs if we haven't already updated version past above new_storage_version. - if onchain_version < new_storage_version && Pallet::::if_subnet_exist(21) { - info!(target: LOG_TARGET_1, ">>> Removing subnet 21 {:?}", onchain_version); - - let netuid = 21; - - // We do this all manually as we don't want to call code related to giving subnet owner back their locked token cost. - // --- 2. Remove network count. - SubnetworkN::::remove(netuid); - - // --- 3. Remove network modality storage. - NetworkModality::::remove(netuid); - - // --- 4. Remove netuid from added networks. - NetworksAdded::::remove(netuid); - - // --- 6. Decrement the network counter. - TotalNetworks::::mutate(|n| n.saturating_dec()); - - // --- 7. Remove various network-related storages. - NetworkRegisteredAt::::remove(netuid); - - weight.saturating_accrue(T::DbWeight::get().writes(5)); - - // --- 8. Remove incentive mechanism memory. - let _ = Uids::::clear_prefix(netuid, u32::MAX, None); - let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); - - weight.saturating_accrue(T::DbWeight::get().writes(4)); - - // --- 9. Remove various network-related parameters. - Rank::::remove(netuid); - Trust::::remove(netuid); - Active::::remove(netuid); - Emission::::remove(netuid); - Incentive::::remove(netuid); - Consensus::::remove(netuid); - Dividends::::remove(netuid); - PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); - ValidatorPermit::::remove(netuid); - ValidatorTrust::::remove(netuid); - - weight.saturating_accrue(T::DbWeight::get().writes(11)); - - // --- 10. Erase network parameters. - Tempo::::remove(netuid); - Kappa::::remove(netuid); - Difficulty::::remove(netuid); - MaxAllowedUids::::remove(netuid); - ImmunityPeriod::::remove(netuid); - ActivityCutoff::::remove(netuid); - EmissionValues::::remove(netuid); - MaxWeightsLimit::::remove(netuid); - MinAllowedWeights::::remove(netuid); - RegistrationsThisInterval::::remove(netuid); - POWRegistrationsThisInterval::::remove(netuid); - BurnRegistrationsThisInterval::::remove(netuid); - - weight.saturating_accrue(T::DbWeight::get().writes(12)); - - // Update storage version. - StorageVersion::new(new_storage_version).put::>(); // Update version so we don't run this again. - // One write to storage version - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - weight - } else { - info!(target: LOG_TARGET_1, "Migration to v4 already done!"); - Weight::zero() - } -} - -pub fn migrate_to_v1_separate_emission() -> Weight { - use deprecated_loaded_emission_format as old; - // Check storage version - let mut weight = T::DbWeight::get().reads_writes(1, 0); - - // Grab current version - let onchain_version = Pallet::::on_chain_storage_version(); - - // Only runs if we haven't already updated version to 1. - if onchain_version < 1 { - info!(target: LOG_TARGET, ">>> Updating the LoadedEmission to a new format {:?}", onchain_version); - - // We transform the storage values from the old into the new format. - - // Start by removing any undecodable entries. - let curr_loaded_emission: Vec = old::LoadedEmission::::iter_keys().collect(); - for netuid in curr_loaded_emission { - // Iterates over the netuids - weight.saturating_accrue(T::DbWeight::get().reads(1)); - if old::LoadedEmission::::try_get(netuid).is_err() { - weight.saturating_accrue(T::DbWeight::get().writes(1)); - old::LoadedEmission::::remove(netuid); - log::warn!( - "Was unable to decode old loaded_emisssion for netuid {}", - netuid - ); - } - } - - // Translate the old storage values into the new format. - LoadedEmission::::translate::, u64)>, _>( - |netuid: u16, - netuid_emissions: Vec<(AccountIdOf, u64)>| - -> Option, u64, u64)>> { - info!(target: LOG_TARGET, " Do migration of netuid: {:?}...", netuid); - - // We will assume all loaded emission is validator emissions, - // so this will get distributed over delegatees (nominators), if there are any - // This will NOT effect any servers that are not (also) a delegate validator. - // server_emission will be 0 for any alread loaded emission. - - let mut new_netuid_emissions = Vec::new(); - for (server, validator_emission) in netuid_emissions { - new_netuid_emissions.push((server, 0_u64, validator_emission)); - } - - // One read (old) and write (new) per netuid - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - - Some(new_netuid_emissions) - }, - ); - - // Update storage version. - StorageVersion::new(1).put::>(); // Update to version 2 so we don't run this again. - // One write to storage version - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - weight - } else { - info!(target: LOG_TARGET_1, "Migration to v2 already done!"); - Weight::zero() - } -} - -const LOG_TARGET_1: &str = "fixtotalstakestorage"; - -pub fn migrate_to_v2_fixed_total_stake() -> Weight { - let new_storage_version = 2; - - // Check storage version - let mut weight = T::DbWeight::get().reads(1); - - // Grab current version - let onchain_version = Pallet::::on_chain_storage_version(); - - // Only runs if we haven't already updated version past above new_storage_version. - if onchain_version < new_storage_version { - info!(target: LOG_TARGET_1, ">>> Fixing the TotalStake and TotalColdkeyStake storage {:?}", onchain_version); - - // Stake and TotalHotkeyStake are known to be accurate - // TotalColdkeyStake is known to be inaccurate - // TotalStake is known to be inaccurate - - TotalStake::::put(0); // Set to 0 - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - // We iterate over TotalColdkeyStake keys and set them to 0 - let total_coldkey_stake_keys = TotalColdkeyStake::::iter_keys().collect::>(); - for coldkey in total_coldkey_stake_keys { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - TotalColdkeyStake::::insert(coldkey, 0); // Set to 0 - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - - // Now we iterate over the entire stake map, and sum each coldkey stake - // We also track TotalStake - for (_, coldkey, stake) in Stake::::iter() { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - // Get the current coldkey stake - let mut total_coldkey_stake = TotalColdkeyStake::::get(coldkey.clone()); - weight.saturating_accrue(T::DbWeight::get().reads(1)); - // Add the stake to the coldkey stake - total_coldkey_stake = total_coldkey_stake.saturating_add(stake); - // Update the coldkey stake - TotalColdkeyStake::::insert(coldkey, total_coldkey_stake); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - // Get the current total stake - let mut total_stake = TotalStake::::get(); - weight.saturating_accrue(T::DbWeight::get().reads(1)); - // Add the stake to the total stake - total_stake = total_stake.saturating_add(stake); - // Update the total stake - TotalStake::::put(total_stake); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - - // Now both TotalStake and TotalColdkeyStake are accurate - - // Update storage version. - StorageVersion::new(new_storage_version).put::>(); // Update to version so we don't run this again. - // One write to storage version - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - weight - } else { - info!(target: LOG_TARGET_1, "Migration to v2 already done!"); - Weight::zero() - } -} - -/// Migrate the OwnedHotkeys map to the new storage format -pub fn migrate_populate_owned() -> Weight { - // Setup migration weight - let mut weight = T::DbWeight::get().reads(1); - let migration_name = "Populate OwnedHotkeys map"; - - // Check if this migration is needed (if OwnedHotkeys map is empty) - let migrate = OwnedHotkeys::::iter().next().is_none(); - - // Only runs if the migration is needed - if migrate { - info!(target: LOG_TARGET_1, ">>> Starting Migration: {}", migration_name); - - let mut longest_hotkey_vector: usize = 0; - let mut longest_coldkey: Option = None; - let mut keys_touched: u64 = 0; - let mut storage_reads: u64 = 0; - let mut storage_writes: u64 = 0; - - // Iterate through all Owner entries - Owner::::iter().for_each(|(hotkey, coldkey)| { - storage_reads = storage_reads.saturating_add(1); // Read from Owner storage - let mut hotkeys = OwnedHotkeys::::get(&coldkey); - storage_reads = storage_reads.saturating_add(1); // Read from OwnedHotkeys storage - - // Add the hotkey if it's not already in the vector - if !hotkeys.contains(&hotkey) { - hotkeys.push(hotkey); - keys_touched = keys_touched.saturating_add(1); - - // Update longest hotkey vector info - if longest_hotkey_vector < hotkeys.len() { - longest_hotkey_vector = hotkeys.len(); - longest_coldkey = Some(coldkey.clone()); - } - - // Update the OwnedHotkeys storage - OwnedHotkeys::::insert(&coldkey, hotkeys); - storage_writes = storage_writes.saturating_add(1); // Write to OwnedHotkeys storage - } - - // Accrue weight for reads and writes - weight = weight.saturating_add(T::DbWeight::get().reads_writes(2, 1)); - }); - - // Log migration results - info!( - target: LOG_TARGET_1, - "Migration {} finished. Keys touched: {}, Longest hotkey vector: {}, Storage reads: {}, Storage writes: {}", - migration_name, keys_touched, longest_hotkey_vector, storage_reads, storage_writes - ); - if let Some(c) = longest_coldkey { - info!(target: LOG_TARGET_1, "Longest hotkey vector is controlled by: {:?}", c); - } - - weight - } else { - info!(target: LOG_TARGET_1, "Migration {} already done!", migration_name); - Weight::zero() - } -} - -/// Populate the StakingHotkeys map from Stake map -pub fn migrate_populate_staking_hotkeys() -> Weight { - // Setup migration weight - let mut weight = T::DbWeight::get().reads(1); - let migration_name = "Populate StakingHotkeys map"; - - // Check if this migration is needed (if StakingHotkeys map is empty) - let migrate = StakingHotkeys::::iter().next().is_none(); - - // Only runs if the migration is needed - if migrate { - info!(target: LOG_TARGET_1, ">>> Starting Migration: {}", migration_name); - - let mut longest_hotkey_vector: usize = 0; - let mut longest_coldkey: Option = None; - let mut keys_touched: u64 = 0; - let mut storage_reads: u64 = 0; - let mut storage_writes: u64 = 0; - - // Iterate through all Owner entries - Stake::::iter().for_each(|(hotkey, coldkey, stake)| { - storage_reads = storage_reads.saturating_add(1); // Read from Owner storage - if stake > 0 { - let mut hotkeys = StakingHotkeys::::get(&coldkey); - storage_reads = storage_reads.saturating_add(1); // Read from StakingHotkeys storage - - // Add the hotkey if it's not already in the vector - if !hotkeys.contains(&hotkey) { - hotkeys.push(hotkey); - keys_touched = keys_touched.saturating_add(1); - - // Update longest hotkey vector info - if longest_hotkey_vector < hotkeys.len() { - longest_hotkey_vector = hotkeys.len(); - longest_coldkey = Some(coldkey.clone()); - } - - // Update the StakingHotkeys storage - StakingHotkeys::::insert(&coldkey, hotkeys); - storage_writes = storage_writes.saturating_add(1); // Write to StakingHotkeys storage - } - - // Accrue weight for reads and writes - weight = weight.saturating_add(T::DbWeight::get().reads_writes(2, 1)); - } - }); - - // Log migration results - info!( - target: LOG_TARGET_1, - "Migration {} finished. Keys touched: {}, Longest hotkey vector: {}, Storage reads: {}, Storage writes: {}", - migration_name, keys_touched, longest_hotkey_vector, storage_reads, storage_writes - ); - if let Some(c) = longest_coldkey { - info!(target: LOG_TARGET_1, "Longest hotkey vector is controlled by: {:?}", c); - } - - weight - } else { - info!(target: LOG_TARGET_1, "Migration {} already done!", migration_name); - Weight::zero() - } -} diff --git a/pallets/subtensor/src/migrations/migrate_chain_identity.rs b/pallets/subtensor/src/migrations/migrate_chain_identity.rs new file mode 100644 index 000000000..06ee5dd3f --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_chain_identity.rs @@ -0,0 +1,171 @@ +use crate::alloc::borrow::ToOwned; +use codec::Decode; +use scale_info::prelude::{string::String, vec::Vec}; +use serde::Deserialize; +use sp_core::{crypto::Ss58Codec, ConstU32}; +use sp_runtime::{AccountId32, BoundedVec}; + +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; + +#[derive(Deserialize, Debug)] +struct RegistrationRecordJSON { + address: String, + name: String, + url: String, + description: String, +} + +fn string_to_bounded_vec(input: &str) -> Result>, &'static str> { + let vec_u8: Vec = input.to_owned().into_bytes(); + + // Check if the length is within bounds + if vec_u8.len() > 64 { + return Err("Input string is too long"); + } + + // Convert to BoundedVec + BoundedVec::>::try_from(vec_u8) + .map_err(|_| "Failed to convert to BoundedVec") +} + +pub fn migrate_set_hotkey_identities() -> Weight { + let migration_name = b"migrate_identities".to_vec(); + + // Initialize the weight with one read operation. + let mut weight = T::DbWeight::get().reads(1); + + // Check if the migration has already run + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // Include the JSON file with delegate info + let data = include_str!("../../../../docs/delegate-info.json"); + + // Iterate over all the delegate records + if let Ok(delegates) = serde_json::from_str::>(data) { + // Iterate through the delegates + for delegate in delegates.iter() { + // Convert fields to bounded vecs + let name_result = string_to_bounded_vec(&delegate.name); + let desc_result = string_to_bounded_vec(&delegate.description); + let url_result = string_to_bounded_vec(&delegate.url); + let hotkey: AccountId32 = match AccountId32::from_ss58check(&delegate.address) { + Ok(account) => account, + Err(_) => { + log::warn!( + "Invalid SS58 address: {:?}. Skipping this delegate.", + delegate.address + ); + continue; + } + }; + let decoded_hotkey: T::AccountId = match T::AccountId::decode(&mut hotkey.as_ref()) { + Ok(decoded) => decoded, + Err(e) => { + log::warn!("Failed to decode hotkey: {:?}. Skipping this delegate.", e); + continue; + } + }; + log::info!("Hotkey unwrapped: {:?}", decoded_hotkey); + + // If we should continue with real values. + let mut name: BoundedVec> = BoundedVec::default(); + let mut description: BoundedVec> = BoundedVec::default(); + let mut url: BoundedVec> = BoundedVec::default(); + if let Ok(n) = name_result { + name = n; + } + if let Ok(d) = desc_result { + description = d; + } + if let Ok(u) = url_result { + url = u; + } + + // Unwrap the real values. + let image: BoundedVec> = BoundedVec::default(); + let discord: BoundedVec> = BoundedVec::default(); + let additional: BoundedVec> = BoundedVec::default(); + + // Create the chain identity. + let identity = ChainIdentityOf { + name: name.into(), + url: url.into(), + image: image.into(), + discord: discord.into(), + description: description.into(), + additional: additional.into(), + }; + + // Log the identity details + log::info!("Setting identity for hotkey: {:?}", hotkey); + log::info!("Name: {:?}", String::from_utf8_lossy(&identity.name)); + log::info!("URL: {:?}", String::from_utf8_lossy(&identity.url)); + log::info!("Image: {:?}", String::from_utf8_lossy(&identity.image)); + log::info!("Discord: {:?}", String::from_utf8_lossy(&identity.discord)); + log::info!( + "Description: {:?}", + String::from_utf8_lossy(&identity.description) + ); + log::info!( + "Additional: {:?}", + String::from_utf8_lossy(&identity.additional) + ); + + // Check validation. + let total_length = identity + .name + .len() + .saturating_add(identity.url.len()) + .saturating_add(identity.image.len()) + .saturating_add(identity.discord.len()) + .saturating_add(identity.description.len()) + .saturating_add(identity.additional.len()); + let is_valid: bool = total_length <= 256 + 256 + 1024 + 256 + 1024 + 1024 + && identity.name.len() <= 256 + && identity.url.len() <= 256 + && identity.image.len() <= 1024 + && identity.discord.len() <= 256 + && identity.description.len() <= 1024 + && identity.additional.len() <= 1024; + if !is_valid { + log::info!("Bytes not correct"); + continue; + } + + // Get the owning coldkey. + let coldkey = Owner::::get(decoded_hotkey.clone()); + log::info!("ColdKey: {:?}", decoded_hotkey); + + weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + // Sink into the map. + Identities::::insert(coldkey.clone(), identity.clone()); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + } else { + log::info!("Failed to decode JSON"); + } + // Mark the migration as completed + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed. Storage version set to 7.", + String::from_utf8_lossy(&migration_name) + ); + + // Return the migration weight. + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_create_root_network.rs b/pallets/subtensor/src/migrations/migrate_create_root_network.rs new file mode 100644 index 000000000..c413d1f07 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_create_root_network.rs @@ -0,0 +1,100 @@ +use super::*; +use frame_support::{ + pallet_prelude::{Identity, OptionQuery}, + storage_alias, + traits::{DefensiveResult, Get}, + weights::Weight, +}; +use sp_std::vec::Vec; + +// TODO (camfairchild): TEST MIGRATION + +/// Module containing deprecated storage format for LoadedEmission +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Migrates the storage to create the root network +/// +/// This function performs the following steps: +/// 1. Checks if the root network already exists +/// 2. If not, creates the root network with default settings +/// 3. Removes all existing senate members +/// +/// # Arguments +/// +/// * `T` - The Config trait of the pallet +/// +/// # Returns +/// +/// * `Weight` - The computational weight of this operation +/// +/// # Example +/// +/// ```ignore +/// let weight = migrate_create_root_network::(); +/// ``` +pub fn migrate_create_root_network() -> Weight { + // Define the root network UID + let root_netuid: u16 = 0; + + // Initialize weight counter + let mut weight = T::DbWeight::get().reads(1); + + // Check if root network already exists + if NetworksAdded::::get(root_netuid) { + // Return early if root network already exists + return weight; + } + + // Set the root network as added + NetworksAdded::::insert(root_netuid, true); + + // Increment the total number of networks + TotalNetworks::::mutate(|n| *n = n.saturating_add(1)); + + // Set the maximum number of UIDs to the number of senate members + MaxAllowedUids::::insert(root_netuid, 64); + + // Set the maximum number of validators to all members + MaxAllowedValidators::::insert(root_netuid, 64); + + // Set the minimum allowed weights to zero (no weight restrictions) + MinAllowedWeights::::insert(root_netuid, 0); + + // Set the maximum weight limit to u16::MAX (no weight restrictions) + MaxWeightsLimit::::insert(root_netuid, u16::MAX); + + // Set default root tempo + Tempo::::insert(root_netuid, 100); + + // Set the root network as open for registration + NetworkRegistrationAllowed::::insert(root_netuid, true); + + // Set target registrations for validators as 1 per block + TargetRegistrationsPerInterval::::insert(root_netuid, 1); + + // TODO: Consider if WeightsSetRateLimit should be set + // WeightsSetRateLimit::::insert(root_netuid, 7200); + + // Accrue weight for database writes + weight.saturating_accrue(T::DbWeight::get().writes(8)); + + // Remove all existing senate members + for hotkey_i in T::SenateMembers::members().iter() { + // Remove votes associated with the member + T::TriumvirateInterface::remove_votes(hotkey_i).defensive_ok(); + // Remove the member from the senate + T::SenateMembers::remove_member(hotkey_i).defensive_ok(); + + // Accrue weight for database operations + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } + + log::info!("Migrated create root network"); + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs new file mode 100644 index 000000000..c917c7cab --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs @@ -0,0 +1,128 @@ +use super::*; +use frame_support::{ + pallet_prelude::*, + storage_alias, + traits::{Get, GetStorageVersion, StorageVersion}, + weights::Weight, +}; +use log::info; +use sp_std::vec::Vec; + +/// Constant for logging purposes +const LOG_TARGET: &str = "migrate_delete_subnet_21"; + +/// Module containing deprecated storage format +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Migrates the storage to delete subnet 21 +/// +/// This function performs the following steps: +/// 1. Checks if the migration is necessary +/// 2. Removes all storage related to subnet 21 +/// 3. Updates the storage version +/// +/// # Arguments +/// +/// * `T` - The Config trait of the pallet +/// +/// # Returns +/// +/// * `Weight` - The computational weight of this operation +/// +/// # Example +/// +/// ```ignore +/// let weight = migrate_delete_subnet_21::(); +/// ``` +pub fn migrate_delete_subnet_21() -> Weight { + let new_storage_version = 4; + + // Setup migration weight + let mut weight = T::DbWeight::get().reads(1); + + // Grab current version + let onchain_version = Pallet::::on_chain_storage_version(); + + // Only runs if we haven't already updated version past above new_storage_version and subnet 21 exists. + if onchain_version < new_storage_version && Pallet::::if_subnet_exist(21) { + info!(target: LOG_TARGET, ">>> Removing subnet 21 {:?}", onchain_version); + + let netuid = 21; + + // We do this all manually as we don't want to call code related to giving subnet owner back their locked token cost. + // Remove network count + SubnetworkN::::remove(netuid); + + // Remove network modality storage + NetworkModality::::remove(netuid); + + // Remove netuid from added networks + NetworksAdded::::remove(netuid); + + // Decrement the network counter + TotalNetworks::::mutate(|n| *n = n.saturating_sub(1)); + + // Remove network registration time + NetworkRegisteredAt::::remove(netuid); + + weight.saturating_accrue(T::DbWeight::get().writes(5)); + + // Remove incentive mechanism memory + let _ = Uids::::clear_prefix(netuid, u32::MAX, None); + let _ = Keys::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); + let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + + weight.saturating_accrue(T::DbWeight::get().writes(4)); + + // Remove various network-related parameters + Rank::::remove(netuid); + Trust::::remove(netuid); + Active::::remove(netuid); + Emission::::remove(netuid); + Incentive::::remove(netuid); + Consensus::::remove(netuid); + Dividends::::remove(netuid); + PruningScores::::remove(netuid); + LastUpdate::::remove(netuid); + ValidatorPermit::::remove(netuid); + ValidatorTrust::::remove(netuid); + + weight.saturating_accrue(T::DbWeight::get().writes(11)); + + // Erase network parameters + Tempo::::remove(netuid); + Kappa::::remove(netuid); + Difficulty::::remove(netuid); + MaxAllowedUids::::remove(netuid); + ImmunityPeriod::::remove(netuid); + ActivityCutoff::::remove(netuid); + EmissionValues::::remove(netuid); + MaxWeightsLimit::::remove(netuid); + MinAllowedWeights::::remove(netuid); + RegistrationsThisInterval::::remove(netuid); + POWRegistrationsThisInterval::::remove(netuid); + BurnRegistrationsThisInterval::::remove(netuid); + + weight.saturating_accrue(T::DbWeight::get().writes(12)); + + // Update storage version + StorageVersion::new(new_storage_version).put::>(); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + weight + } else { + info!(target: LOG_TARGET, "Migration to v4 already done or subnet 21 doesn't exist!"); + Weight::zero() + } +} + +// TODO: Add unit tests for this migration +// TODO: Consider adding error handling for storage operations +// TODO: Verify that all relevant storage items for subnet 21 are removed diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs new file mode 100644 index 000000000..217696357 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs @@ -0,0 +1,131 @@ +use super::*; +use frame_support::{ + pallet_prelude::*, + storage_alias, + traits::{Get, GetStorageVersion, StorageVersion}, + weights::Weight, +}; +use log::info; +use sp_std::vec::Vec; + +/// Constant for logging purposes +const LOG_TARGET: &str = "migrate_delete_subnet_3"; + +/// Module containing deprecated storage format +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Migrates the storage to delete subnet 3 +/// +/// This function performs the following steps: +/// 1. Checks if the migration is necessary +/// 2. Removes all storage related to subnet 3 +/// 3. Updates the storage version +/// +/// # Arguments +/// +/// * `T` - The Config trait of the pallet +/// +/// # Returns +/// +/// * `Weight` - The computational weight of this operation +/// +/// # Example +/// +/// ```ignore +/// let weight = migrate_delete_subnet_3::(); +/// ``` +pub fn migrate_delete_subnet_3() -> Weight { + let new_storage_version = 5; + + // Initialize weight counter + let mut weight = T::DbWeight::get().reads(1); + + // Get current on-chain storage version + let onchain_version = Pallet::::on_chain_storage_version(); + + // Only proceed if current version is less than the new version and subnet 3 exists + if onchain_version < new_storage_version && Pallet::::if_subnet_exist(3) { + info!( + target: LOG_TARGET, + "Removing subnet 3. Current version: {:?}", + onchain_version + ); + + let netuid = 3; + + // Remove network count + SubnetworkN::::remove(netuid); + + // Remove network modality storage + NetworkModality::::remove(netuid); + + // Remove netuid from added networks + NetworksAdded::::remove(netuid); + + // Decrement the network counter + TotalNetworks::::mutate(|n| *n = n.saturating_sub(1)); + + // Remove network registration time + NetworkRegisteredAt::::remove(netuid); + + weight.saturating_accrue(T::DbWeight::get().writes(5)); + + // Remove incentive mechanism memory + let _ = Uids::::clear_prefix(netuid, u32::MAX, None); + let _ = Keys::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); + let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + + weight.saturating_accrue(T::DbWeight::get().writes(4)); + + // Remove various network-related parameters + Rank::::remove(netuid); + Trust::::remove(netuid); + Active::::remove(netuid); + Emission::::remove(netuid); + Incentive::::remove(netuid); + Consensus::::remove(netuid); + Dividends::::remove(netuid); + PruningScores::::remove(netuid); + LastUpdate::::remove(netuid); + ValidatorPermit::::remove(netuid); + ValidatorTrust::::remove(netuid); + + weight.saturating_accrue(T::DbWeight::get().writes(11)); + + // Erase network parameters + Tempo::::remove(netuid); + Kappa::::remove(netuid); + Difficulty::::remove(netuid); + MaxAllowedUids::::remove(netuid); + ImmunityPeriod::::remove(netuid); + ActivityCutoff::::remove(netuid); + EmissionValues::::remove(netuid); + MaxWeightsLimit::::remove(netuid); + MinAllowedWeights::::remove(netuid); + RegistrationsThisInterval::::remove(netuid); + POWRegistrationsThisInterval::::remove(netuid); + BurnRegistrationsThisInterval::::remove(netuid); + + weight.saturating_accrue(T::DbWeight::get().writes(12)); + + // Update storage version + StorageVersion::new(new_storage_version).put::>(); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + weight + } else { + info!(target: LOG_TARGET, "Migration to v5 already completed or subnet 3 doesn't exist"); + Weight::zero() + } +} + +// TODO: Add unit tests for this migration +// TODO: Consider adding error handling for storage operations +// TODO: Verify that all relevant storage items for subnet 3 are removed diff --git a/pallets/subtensor/src/migrations/migrate_fix_total_coldkey_stake.rs b/pallets/subtensor/src/migrations/migrate_fix_total_coldkey_stake.rs new file mode 100644 index 000000000..d8534d03a --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_fix_total_coldkey_stake.rs @@ -0,0 +1,91 @@ +use super::*; +use alloc::string::String; +use frame_support::{ + pallet_prelude::{Identity, OptionQuery}, + storage_alias, + traits::{Get, StorageVersion}, + weights::Weight, +}; +use sp_std::vec::Vec; + +// TODO (camfairchild): TEST MIGRATION +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Migrates and fixes the total coldkey stake. +/// +/// This function iterates through all staking hotkeys, calculates the total stake for each coldkey, +/// and updates the `TotalColdkeyStake` storage accordingly. The migration is only performed if the +/// on-chain storage version is 6. +/// +/// # Returns +/// The weight of the migration process. +pub fn do_migrate_fix_total_coldkey_stake() -> Weight { + // Initialize the weight with one read operation. + let mut weight = T::DbWeight::get().reads(1); + + // Iterate through all staking hotkeys. + for (coldkey, hotkey_vec) in StakingHotkeys::::iter() { + // Init the zero value. + let mut coldkey_stake_sum: u64 = 0; + weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + // Calculate the total stake for the current coldkey. + for hotkey in hotkey_vec { + // Cant fail on retrieval. + coldkey_stake_sum = + coldkey_stake_sum.saturating_add(Stake::::get(hotkey, coldkey.clone())); + weight = weight.saturating_add(T::DbWeight::get().reads(1)); + } + // Update the `TotalColdkeyStake` storage with the calculated stake sum. + // Cant fail on insert. + TotalColdkeyStake::::insert(coldkey.clone(), coldkey_stake_sum); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + weight +} +// Public migrate function to be called by Lib.rs on upgrade. +pub fn migrate_fix_total_coldkey_stake() -> Weight { + let migration_name = b"fix_total_coldkey_stake_v7".to_vec(); + + // Initialize the weight with one read operation. + let mut weight = T::DbWeight::get().reads(1); + + // Check if the migration has already run + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return Weight::zero(); + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // Run the migration + weight = weight.saturating_add(do_migrate_fix_total_coldkey_stake::()); + + // Mark the migration as completed + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + // Set the storage version to 7 + StorageVersion::new(7).put::>(); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed. Storage version set to 7.", + String::from_utf8_lossy(&migration_name) + ); + + // Return the migration weight. + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs b/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs new file mode 100644 index 000000000..a488771c5 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs @@ -0,0 +1,83 @@ +use super::*; +use frame_support::pallet_prelude::OptionQuery; +use frame_support::{pallet_prelude::Identity, storage_alias}; +use sp_std::vec::Vec; + +// TODO: Implement comprehensive tests for this migration + +/// Module containing deprecated storage format for LoadedEmission +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +pub mod initialise_total_issuance { + use frame_support::pallet_prelude::Weight; + use frame_support::traits::{fungible, OnRuntimeUpgrade}; + use sp_core::Get; + + use crate::*; + + pub struct Migration(PhantomData); + + impl OnRuntimeUpgrade for Migration { + /// Performs the migration to initialize and update the total issuance. + /// + /// This function does the following: + /// 1. Calculates the total locked tokens across all subnets + /// 2. Retrieves the total account balances and total stake + /// 3. Computes and updates the new total issuance + /// + /// Returns the weight of the migration operation. + fn on_runtime_upgrade() -> Weight { + // Calculate the total locked tokens across all subnets + let subnets_len = crate::SubnetLocked::::iter().count() as u64; + let total_subnet_locked: u64 = + crate::SubnetLocked::::iter().fold(0, |acc, (_, v)| acc.saturating_add(v)); + + // Retrieve the total balance of all accounts + let total_account_balances = <::Currency as fungible::Inspect< + ::AccountId, + >>::total_issuance(); + + // Get the total stake from the system + let total_stake = crate::TotalStake::::get(); + + // Retrieve the previous total issuance for logging purposes + let prev_total_issuance = crate::TotalIssuance::::get(); + + // Calculate the new total issuance + let new_total_issuance = total_account_balances + .saturating_add(total_stake) + .saturating_add(total_subnet_locked); + + // Update the total issuance in storage + crate::TotalIssuance::::put(new_total_issuance); + + // Log the change in total issuance + log::info!( + "Subtensor Pallet Total Issuance Updated: previous: {:?}, new: {:?}", + prev_total_issuance, + new_total_issuance + ); + + // Return the weight of the operation + // We performed subnets_len + 5 reads and 1 write + ::DbWeight::get() + .reads_writes(subnets_len.saturating_add(5), 1) + } + + /// Performs post-upgrade checks to ensure the migration was successful. + /// + /// This function is only compiled when the "try-runtime" feature is enabled. + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + // Verify that all accounting invariants are satisfied after the migration + crate::Pallet::::check_accounting_invariants()?; + Ok(()) + } + } +} diff --git a/pallets/subtensor/src/migrations/migrate_populate_owned_hotkeys.rs b/pallets/subtensor/src/migrations/migrate_populate_owned_hotkeys.rs new file mode 100644 index 000000000..e8fd212ec --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_populate_owned_hotkeys.rs @@ -0,0 +1,82 @@ +use super::*; +use frame_support::{ + pallet_prelude::{Identity, OptionQuery}, + storage_alias, + traits::Get, + weights::Weight, +}; +use log::info; +use sp_std::vec::Vec; + +const LOG_TARGET_1: &str = "migrate_populate_owned"; + +/// Module containing deprecated storage format for LoadedEmission +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Migrate the OwnedHotkeys map to the new storage format +pub fn migrate_populate_owned() -> Weight { + // Setup migration weight + let mut weight = T::DbWeight::get().reads(1); + let migration_name = "Populate OwnedHotkeys map"; + + // Check if this migration is needed (if OwnedHotkeys map is empty) + let migrate = OwnedHotkeys::::iter().next().is_none(); + + // Only runs if the migration is needed + if migrate { + info!(target: LOG_TARGET_1, ">>> Starting Migration: {}", migration_name); + + let mut longest_hotkey_vector: usize = 0; + let mut longest_coldkey: Option = None; + let mut keys_touched: u64 = 0; + let mut storage_reads: u64 = 0; + let mut storage_writes: u64 = 0; + + // Iterate through all Owner entries + Owner::::iter().for_each(|(hotkey, coldkey)| { + storage_reads = storage_reads.saturating_add(1); // Read from Owner storage + let mut hotkeys = OwnedHotkeys::::get(&coldkey); + storage_reads = storage_reads.saturating_add(1); // Read from OwnedHotkeys storage + + // Add the hotkey if it's not already in the vector + if !hotkeys.contains(&hotkey) { + hotkeys.push(hotkey); + keys_touched = keys_touched.saturating_add(1); + + // Update longest hotkey vector info + if longest_hotkey_vector < hotkeys.len() { + longest_hotkey_vector = hotkeys.len(); + longest_coldkey = Some(coldkey.clone()); + } + + // Update the OwnedHotkeys storage + OwnedHotkeys::::insert(&coldkey, hotkeys); + storage_writes = storage_writes.saturating_add(1); // Write to OwnedHotkeys storage + } + + // Accrue weight for reads and writes + weight = weight.saturating_add(T::DbWeight::get().reads_writes(2, 1)); + }); + + // Log migration results + info!( + target: LOG_TARGET_1, + "Migration {} finished. Keys touched: {}, Longest hotkey vector: {}, Storage reads: {}, Storage writes: {}", + migration_name, keys_touched, longest_hotkey_vector, storage_reads, storage_writes + ); + if let Some(c) = longest_coldkey { + info!(target: LOG_TARGET_1, "Longest hotkey vector is controlled by: {:?}", c); + } + + weight + } else { + info!(target: LOG_TARGET_1, "Migration {} already done!", migration_name); + Weight::zero() + } +} diff --git a/pallets/subtensor/src/migrations/migrate_populate_staking_hotkeys.rs b/pallets/subtensor/src/migrations/migrate_populate_staking_hotkeys.rs new file mode 100644 index 000000000..0245ae3c9 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_populate_staking_hotkeys.rs @@ -0,0 +1,83 @@ +use super::*; +use frame_support::{ + pallet_prelude::{Identity, OptionQuery}, + storage_alias, + traits::Get, + weights::Weight, +}; +use log::info; +use sp_std::vec::Vec; +const LOG_TARGET_1: &str = "migrate_populate_owned"; + +/// Module containing deprecated storage format for LoadedEmission +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Populate the StakingHotkeys map from Stake map +pub fn migrate_populate_staking_hotkeys() -> Weight { + // Setup migration weight + let mut weight = T::DbWeight::get().reads(1); + let migration_name = "Populate StakingHotkeys map"; + + // Check if this migration is needed (if StakingHotkeys map is empty) + let migrate = StakingHotkeys::::iter().next().is_none(); + + // Only runs if the migration is needed + if migrate { + info!(target: LOG_TARGET_1, ">>> Starting Migration: {}", migration_name); + + let mut longest_hotkey_vector: usize = 0; + let mut longest_coldkey: Option = None; + let mut keys_touched: u64 = 0; + let mut storage_reads: u64 = 0; + let mut storage_writes: u64 = 0; + + // Iterate through all Owner entries + Stake::::iter().for_each(|(hotkey, coldkey, stake)| { + storage_reads = storage_reads.saturating_add(1); // Read from Owner storage + if stake > 0 { + let mut hotkeys = StakingHotkeys::::get(&coldkey); + storage_reads = storage_reads.saturating_add(1); // Read from StakingHotkeys storage + + // Add the hotkey if it's not already in the vector + if !hotkeys.contains(&hotkey) { + hotkeys.push(hotkey); + keys_touched = keys_touched.saturating_add(1); + + // Update longest hotkey vector info + if longest_hotkey_vector < hotkeys.len() { + longest_hotkey_vector = hotkeys.len(); + longest_coldkey = Some(coldkey.clone()); + } + + // Update the StakingHotkeys storage + StakingHotkeys::::insert(&coldkey, hotkeys); + storage_writes = storage_writes.saturating_add(1); // Write to StakingHotkeys storage + } + + // Accrue weight for reads and writes + weight = weight.saturating_add(T::DbWeight::get().reads_writes(2, 1)); + } + }); + + // Log migration results + info!( + target: LOG_TARGET_1, + "Migration {} finished. Keys touched: {}, Longest hotkey vector: {}, Storage reads: {}, Storage writes: {}", + migration_name, keys_touched, longest_hotkey_vector, storage_reads, storage_writes + ); + if let Some(c) = longest_coldkey { + info!(target: LOG_TARGET_1, "Longest hotkey vector is controlled by: {:?}", c); + } + + weight + } else { + info!(target: LOG_TARGET_1, "Migration {} already done!", migration_name); + Weight::zero() + } +} diff --git a/pallets/subtensor/src/migrations/migrate_to_v1_separate_emission.rs b/pallets/subtensor/src/migrations/migrate_to_v1_separate_emission.rs new file mode 100644 index 000000000..5d28337dc --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_to_v1_separate_emission.rs @@ -0,0 +1,107 @@ +use super::*; +use frame_support::{ + pallet_prelude::*, + storage_alias, + traits::{Get, GetStorageVersion, StorageVersion}, + weights::Weight, +}; +use log::{info, warn}; +use sp_std::vec::Vec; + +/// Constant for logging purposes +const LOG_TARGET: &str = "loadedemissionmigration"; +const LOG_TARGET_1: &str = "fixtotalstakestorage"; + +/// Module containing deprecated storage format +pub mod deprecated_loaded_emission_format { + use super::*; + + type AccountIdOf = ::AccountId; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Migrates the LoadedEmission storage to a new format +/// +/// # Arguments +/// +/// * `T` - The runtime configuration trait +/// +/// # Returns +/// +/// * `Weight` - The computational weight of this operation +/// +/// # Example +/// +/// ```ignore +/// let weight = migrate_to_v1_separate_emission::(); +/// ``` +pub fn migrate_to_v1_separate_emission() -> Weight { + use deprecated_loaded_emission_format as old; + + // Initialize weight counter + let mut weight = T::DbWeight::get().reads_writes(1, 0); + + // Get current on-chain storage version + let onchain_version = Pallet::::on_chain_storage_version(); + + // Only proceed if current version is less than 1 + if onchain_version < 1 { + info!( + target: LOG_TARGET, + ">>> Updating the LoadedEmission to a new format {:?}", onchain_version + ); + + // Collect all network IDs (netuids) from old LoadedEmission storage + let curr_loaded_emission: Vec = old::LoadedEmission::::iter_keys().collect(); + + // Remove any undecodable entries + for netuid in curr_loaded_emission { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if old::LoadedEmission::::try_get(netuid).is_err() { + weight.saturating_accrue(T::DbWeight::get().writes(1)); + old::LoadedEmission::::remove(netuid); + warn!( + "Was unable to decode old loaded_emission for netuid {}", + netuid + ); + } + } + + // Translate old storage values to new format + LoadedEmission::::translate::, u64)>, _>( + |netuid: u16, + netuid_emissions: Vec<(AccountIdOf, u64)>| + -> Option, u64, u64)>> { + info!(target: LOG_TARGET, " Do migration of netuid: {:?}...", netuid); + + // Convert old format (server, validator_emission) to new format (server, server_emission, validator_emission) + // Assume all loaded emission is validator emissions + let new_netuid_emissions = netuid_emissions + .into_iter() + .map(|(server, validator_emission)| (server, 0_u64, validator_emission)) + .collect(); + + // Update weight for read and write operations + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + Some(new_netuid_emissions) + }, + ); + + // Update storage version to 1 + StorageVersion::new(1).put::>(); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + weight + } else { + info!(target: LOG_TARGET_1, "Migration to v1 already completed!"); + Weight::zero() + } +} + +// TODO: Add unit tests for this migration +// TODO: Consider adding error handling for edge cases +// TODO: Verify that all possible states of the old format are handled correctly diff --git a/pallets/subtensor/src/migrations/migrate_to_v2_fixed_total_stake.rs b/pallets/subtensor/src/migrations/migrate_to_v2_fixed_total_stake.rs new file mode 100644 index 000000000..f3e63b6fd --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_to_v2_fixed_total_stake.rs @@ -0,0 +1,104 @@ +use super::*; +use frame_support::{ + pallet_prelude::*, + storage_alias, + traits::{Get, GetStorageVersion, StorageVersion}, + weights::Weight, +}; +use log::info; +use sp_std::vec::Vec; + +/// Constant for logging purposes +const LOG_TARGET: &str = "fix_total_stake_storage"; + +/// Module containing deprecated storage format +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Migrates the storage to fix TotalStake and TotalColdkeyStake +/// +/// This function performs the following steps: +/// 1. Resets TotalStake to 0 +/// 2. Resets all TotalColdkeyStake entries to 0 +/// 3. Recalculates TotalStake and TotalColdkeyStake based on the Stake map +/// +/// # Arguments +/// +/// * `T` - The Config trait of the pallet +/// +/// # Returns +/// +/// * `Weight` - The computational weight of this operation +/// +/// # Example +/// +/// ```ignore +/// let weight = migrate_to_v2_fixed_total_stake::(); +/// ``` +pub fn migrate_to_v2_fixed_total_stake() -> Weight { + let new_storage_version = 2; + + // Initialize weight counter + let mut weight = T::DbWeight::get().reads(1); + + // Get current on-chain storage version + let onchain_version = Pallet::::on_chain_storage_version(); + + // Only proceed if current version is less than the new version + if onchain_version < new_storage_version { + info!( + target: LOG_TARGET, + "Fixing the TotalStake and TotalColdkeyStake storage. Current version: {:?}", + onchain_version + ); + + // Reset TotalStake to 0 + TotalStake::::put(0); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + // Reset all TotalColdkeyStake entries to 0 + let total_coldkey_stake_keys = TotalColdkeyStake::::iter_keys().collect::>(); + for coldkey in total_coldkey_stake_keys { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + TotalColdkeyStake::::insert(coldkey, 0); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + } + + // Recalculate TotalStake and TotalColdkeyStake based on the Stake map + for (_, coldkey, stake) in Stake::::iter() { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + + // Update TotalColdkeyStake + let mut total_coldkey_stake = TotalColdkeyStake::::get(coldkey.clone()); + weight.saturating_accrue(T::DbWeight::get().reads(1)); + total_coldkey_stake = total_coldkey_stake.saturating_add(stake); + TotalColdkeyStake::::insert(coldkey, total_coldkey_stake); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + // Update TotalStake + let mut total_stake = TotalStake::::get(); + weight.saturating_accrue(T::DbWeight::get().reads(1)); + total_stake = total_stake.saturating_add(stake); + TotalStake::::put(total_stake); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + } + + // Update storage version to prevent re-running this migration + StorageVersion::new(new_storage_version).put::>(); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + weight + } else { + info!(target: LOG_TARGET, "Migration to v2 already completed"); + Weight::zero() + } +} + +// TODO: Add unit tests for this migration function +// TODO: Consider adding error handling for potential arithmetic overflow +// TODO: Optimize the iteration over Stake map if possible to reduce database reads diff --git a/pallets/subtensor/src/migrations/migrate_total_issuance.rs b/pallets/subtensor/src/migrations/migrate_total_issuance.rs new file mode 100644 index 000000000..9a4085379 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_total_issuance.rs @@ -0,0 +1,89 @@ +use super::*; +use frame_support::pallet_prelude::OptionQuery; +use frame_support::{ + pallet_prelude::Identity, + storage_alias, + traits::{fungible::Inspect, Get, GetStorageVersion, StorageVersion}, + weights::Weight, +}; +use sp_std::vec::Vec; + +// TODO: Implement comprehensive tests for this migration + +/// Module containing deprecated storage format for LoadedEmission +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Performs migration to update the total issuance based on the sum of stakes and total balances. +/// +/// This migration is applicable only if the current storage version is 5, after which it updates the storage version to 6. +/// +/// # Arguments +/// +/// * `test` - A boolean flag to force migration execution for testing purposes. +/// +/// # Returns +/// +/// * `Weight` - The computational weight of this operation. +/// +/// # Example +/// +/// ```ignore +/// let weight = migrate_total_issuance::(false); +/// ``` +pub fn migrate_total_issuance(test: bool) -> Weight { + // Initialize migration weight with the cost of reading the storage version + let mut weight = T::DbWeight::get().reads(1); + + // Execute migration if the current storage version is 5 or if in test mode + if Pallet::::on_chain_storage_version() == StorageVersion::new(5) || test { + // Calculate the sum of all stake values + let stake_sum: u64 = + Stake::::iter().fold(0, |acc, (_, _, stake)| acc.saturating_add(stake)); + // Add weight for reading all stake entries + weight = weight.saturating_add(T::DbWeight::get().reads(Stake::::iter().count() as u64)); + + // Calculate the sum of all locked subnet values + let locked_sum: u64 = + SubnetLocked::::iter().fold(0, |acc, (_, locked)| acc.saturating_add(locked)); + // Add weight for reading all subnet locked entries + weight = weight + .saturating_add(T::DbWeight::get().reads(SubnetLocked::::iter().count() as u64)); + + // Retrieve the total balance sum + let total_balance = T::Currency::total_issuance(); + // Add weight for reading total issuance + weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + // Attempt to convert total balance to u64 + match TryInto::::try_into(total_balance) { + Ok(total_balance_sum) => { + // Compute the total issuance value + let total_issuance_value: u64 = stake_sum + .saturating_add(total_balance_sum) + .saturating_add(locked_sum); + + // Update the total issuance in storage + TotalIssuance::::put(total_issuance_value); + + // Update the storage version to 6 + StorageVersion::new(6).put::>(); + + // Add weight for writing total issuance and storage version + weight = weight.saturating_add(T::DbWeight::get().writes(2)); + } + Err(_) => { + // TODO: Implement proper error handling for conversion failure + log::error!("Failed to convert total balance to u64, migration aborted"); + } + } + } + + // Return the computed weight of the migration process + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_transfer_ownership_to_foundation.rs b/pallets/subtensor/src/migrations/migrate_transfer_ownership_to_foundation.rs new file mode 100644 index 000000000..8d1bd437c --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_transfer_ownership_to_foundation.rs @@ -0,0 +1,87 @@ +use super::*; +use frame_support::{ + pallet_prelude::{Identity, OptionQuery}, + storage_alias, + traits::{GetStorageVersion, StorageVersion}, + weights::Weight, +}; +use log::info; +use sp_core::Get; +use sp_std::vec::Vec; + +/// Constant for logging purposes +const LOG_TARGET: &str = "migrate_transfer_ownership"; + +/// Module containing deprecated storage format +pub mod deprecated_loaded_emission_format { + use super::*; + + #[storage_alias] + pub(super) type LoadedEmission = + StorageMap, Identity, u16, Vec<(AccountIdOf, u64)>, OptionQuery>; +} + +/// Migrates subnet ownership to the foundation and updates related storage +/// +/// # Arguments +/// +/// * `coldkey` - 32-byte array representing the foundation's coldkey +/// +/// # Returns +/// +/// * `Weight` - The computational weight of this operation +/// +/// # Example +/// +/// ```ignore +/// let foundation_coldkey = [0u8; 32]; // Replace with actual foundation coldkey +/// let weight = migrate_transfer_ownership_to_foundation::(foundation_coldkey); +/// ``` +pub fn migrate_transfer_ownership_to_foundation(coldkey: [u8; 32]) -> Weight { + let new_storage_version = 3; + + // Initialize weight counter + let mut weight = T::DbWeight::get().reads(1); + + // Get current on-chain storage version + let onchain_version = Pallet::::on_chain_storage_version(); + + // Only proceed if current version is less than the new version + if onchain_version < new_storage_version { + info!( + target: LOG_TARGET, + "Migrating subnet 1 and 11 to foundation control. Current version: {:?}", + onchain_version + ); + + // Decode the foundation's coldkey into an AccountId + // TODO: Consider error handling for decoding failure + let coldkey_account: T::AccountId = T::AccountId::decode(&mut &coldkey[..]) + .expect("coldkey should be a valid 32-byte array"); + info!(target: LOG_TARGET, "Foundation coldkey: {:?}", coldkey_account); + + // Get the current block number + let current_block = Pallet::::get_current_block_as_u64(); + weight.saturating_accrue(T::DbWeight::get().reads(1)); + + // Transfer ownership of subnets 1 and 11 to the foundation + SubnetOwner::::insert(1, coldkey_account.clone()); + SubnetOwner::::insert(11, coldkey_account); + + // Set the registration time for subnet 1 to extend immunity period + NetworkRegisteredAt::::insert(1, current_block.saturating_add(13 * 7200)); + // Set the registration time for subnet 11 to the current block + NetworkRegisteredAt::::insert(11, current_block); + + weight.saturating_accrue(T::DbWeight::get().writes(4)); + + // Update the storage version to prevent re-running this migration + StorageVersion::new(new_storage_version).put::>(); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + weight + } else { + info!(target: LOG_TARGET, "Migration to v3 already completed"); + Weight::zero() + } +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs new file mode 100644 index 000000000..6036b23e0 --- /dev/null +++ b/pallets/subtensor/src/migrations/mod.rs @@ -0,0 +1,13 @@ +use super::*; +pub mod migrate_chain_identity; +pub mod migrate_create_root_network; +pub mod migrate_delete_subnet_21; +pub mod migrate_delete_subnet_3; +pub mod migrate_fix_total_coldkey_stake; +pub mod migrate_init_total_issuance; +pub mod migrate_populate_owned_hotkeys; +pub mod migrate_populate_staking_hotkeys; +pub mod migrate_to_v1_separate_emission; +pub mod migrate_to_v2_fixed_total_stake; +pub mod migrate_total_issuance; +pub mod migrate_transfer_ownership_to_foundation; diff --git a/pallets/subtensor/src/delegate_info.rs b/pallets/subtensor/src/rpc_info/delegate_info.rs similarity index 99% rename from pallets/subtensor/src/delegate_info.rs rename to pallets/subtensor/src/rpc_info/delegate_info.rs index 56b25d230..a41b6e17e 100644 --- a/pallets/subtensor/src/delegate_info.rs +++ b/pallets/subtensor/src/rpc_info/delegate_info.rs @@ -148,7 +148,7 @@ impl Pallet { } } - log::info!( + log::debug!( "Total delegated stake for coldkey {:?}: {}", coldkey, total_delegated diff --git a/pallets/subtensor/src/rpc_info/mod.rs b/pallets/subtensor/src/rpc_info/mod.rs new file mode 100644 index 000000000..7d050b601 --- /dev/null +++ b/pallets/subtensor/src/rpc_info/mod.rs @@ -0,0 +1,5 @@ +use super::*; +pub mod delegate_info; +pub mod neuron_info; +pub mod stake_info; +pub mod subnet_info; diff --git a/pallets/subtensor/src/neuron_info.rs b/pallets/subtensor/src/rpc_info/neuron_info.rs similarity index 96% rename from pallets/subtensor/src/neuron_info.rs rename to pallets/subtensor/src/rpc_info/neuron_info.rs index a4b58d666..cadd4b6e3 100644 --- a/pallets/subtensor/src/neuron_info.rs +++ b/pallets/subtensor/src/rpc_info/neuron_info.rs @@ -117,14 +117,10 @@ impl Pallet { } }) .collect::, Compact)>>(); - - let stake: Vec<(T::AccountId, Compact)> = - as IterableStorageDoubleMap>::iter_prefix( - hotkey.clone(), - ) - .map(|(coldkey, stake)| (coldkey, stake.into())) - .collect(); - + let stake: Vec<(T::AccountId, Compact)> = vec![( + coldkey.clone(), + Self::get_stake_for_hotkey_on_subnet(&hotkey, netuid).into(), + )]; let neuron = NeuronInfo { hotkey: hotkey.clone(), coldkey: coldkey.clone(), diff --git a/pallets/subtensor/src/stake_info.rs b/pallets/subtensor/src/rpc_info/stake_info.rs similarity index 100% rename from pallets/subtensor/src/stake_info.rs rename to pallets/subtensor/src/rpc_info/stake_info.rs diff --git a/pallets/subtensor/src/subnet_info.rs b/pallets/subtensor/src/rpc_info/subnet_info.rs similarity index 68% rename from pallets/subtensor/src/subnet_info.rs rename to pallets/subtensor/src/rpc_info/subnet_info.rs index 4e9e756a0..9b22e0401 100644 --- a/pallets/subtensor/src/subnet_info.rs +++ b/pallets/subtensor/src/rpc_info/subnet_info.rs @@ -27,6 +27,30 @@ pub struct SubnetInfo { owner: T::AccountId, } +#[freeze_struct("65f931972fa13222")] +#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] +pub struct SubnetInfov2 { + netuid: Compact, + rho: Compact, + kappa: Compact, + difficulty: Compact, + immunity_period: Compact, + max_allowed_validators: Compact, + min_allowed_weights: Compact, + max_weights_limit: Compact, + scaling_law_power: Compact, + subnetwork_n: Compact, + max_allowed_uids: Compact, + blocks_since_last_step: Compact, + tempo: Compact, + network_modality: Compact, + network_connect: Vec<[u16; 2]>, + emission_values: Compact, + burn: Compact, + owner: T::AccountId, + identity: Option, +} + #[freeze_struct("55b472510f10e76a")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] pub struct SubnetHyperparams { @@ -80,7 +104,6 @@ impl Pallet { let network_modality = >::get(netuid); let emission_values = Self::get_emission_value(netuid); let burn: Compact = Self::get_burn_as_u64(netuid).into(); - // DEPRECATED let network_connect: Vec<[u16; 2]> = Vec::<[u16; 2]>::new(); // DEPRECATED for ( _netuid_, con_req) in < NetworkConnect as IterableStorageDoubleMap >::iter_prefix(netuid) { @@ -131,6 +154,77 @@ impl Pallet { subnets_info } + pub fn get_subnet_info_v2(netuid: u16) -> Option> { + if !Self::if_subnet_exist(netuid) { + return None; + } + + let rho = Self::get_rho(netuid); + let kappa = Self::get_kappa(netuid); + let difficulty: Compact = Self::get_difficulty_as_u64(netuid).into(); + let immunity_period = Self::get_immunity_period(netuid); + let max_allowed_validators = Self::get_max_allowed_validators(netuid); + let min_allowed_weights = Self::get_min_allowed_weights(netuid); + let max_weights_limit = Self::get_max_weight_limit(netuid); + let scaling_law_power = Self::get_scaling_law_power(netuid); + let subnetwork_n = Self::get_subnetwork_n(netuid); + let max_allowed_uids = Self::get_max_allowed_uids(netuid); + let blocks_since_last_step = Self::get_blocks_since_last_step(netuid); + let tempo = Self::get_tempo(netuid); + let network_modality = >::get(netuid); + let emission_values = Self::get_emission_value(netuid); + let burn: Compact = Self::get_burn_as_u64(netuid).into(); + let identity: Option = SubnetIdentities::::get(netuid); + + // DEPRECATED + let network_connect: Vec<[u16; 2]> = Vec::<[u16; 2]>::new(); + // DEPRECATED for ( _netuid_, con_req) in < NetworkConnect as IterableStorageDoubleMap >::iter_prefix(netuid) { + // network_connect.push([_netuid_, con_req]); + // } + + Some(SubnetInfov2 { + rho: rho.into(), + kappa: kappa.into(), + difficulty, + immunity_period: immunity_period.into(), + netuid: netuid.into(), + max_allowed_validators: max_allowed_validators.into(), + min_allowed_weights: min_allowed_weights.into(), + max_weights_limit: max_weights_limit.into(), + scaling_law_power: scaling_law_power.into(), + subnetwork_n: subnetwork_n.into(), + max_allowed_uids: max_allowed_uids.into(), + blocks_since_last_step: blocks_since_last_step.into(), + tempo: tempo.into(), + network_modality: network_modality.into(), + network_connect, + emission_values: emission_values.into(), + burn, + owner: Self::get_subnet_owner(netuid), + identity, + }) + } + pub fn get_subnets_info_v2() -> Vec>> { + let mut subnet_netuids = Vec::::new(); + let mut max_netuid: u16 = 0; + for (netuid, added) in as IterableStorageMap>::iter() { + if added { + subnet_netuids.push(netuid); + if netuid > max_netuid { + max_netuid = netuid; + } + } + } + + let mut subnets_info = Vec::>>::new(); + for netuid_ in 0..=max_netuid { + if subnet_netuids.contains(&netuid_) { + subnets_info.push(Self::get_subnet_info(netuid_)); + } + } + + subnets_info + } pub fn get_subnet_hyperparams(netuid: u16) -> Option { if !Self::if_subnet_exist(netuid) { return None; diff --git a/pallets/subtensor/src/staking.rs b/pallets/subtensor/src/staking.rs deleted file mode 100644 index 199234a30..000000000 --- a/pallets/subtensor/src/staking.rs +++ /dev/null @@ -1,871 +0,0 @@ -use super::*; -use frame_support::{ - storage::IterableStorageDoubleMap, - traits::{ - tokens::{ - fungible::{Balanced as _, Inspect as _, Mutate as _}, - Fortitude, Precision, Preservation, - }, - Imbalance, - }, -}; - -impl Pallet { - /// ---- The implementation for the extrinsic become_delegate: signals that this hotkey allows delegated stake. - /// - /// # Args: - /// * 'origin': (RuntimeOrigin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The hotkey we are delegating (must be owned by the coldkey.) - /// - /// * 'take' (u16): - /// - The stake proportion that this hotkey takes from delegations. - /// - /// # Event: - /// * DelegateAdded; - /// - On successfully setting a hotkey as a delegate. - /// - /// # Raises: - /// * 'NotRegistered': - /// - The hotkey we are delegating is not registered on the network. - /// - /// * 'NonAssociatedColdKey': - /// - The hotkey we are delegating is not owned by the calling coldket. - /// - /// * 'TxRateLimitExceeded': - /// - Thrown if key has hit transaction rate limit - /// - pub fn do_become_delegate( - origin: T::RuntimeOrigin, - hotkey: T::AccountId, - take: u16, - ) -> dispatch::DispatchResult { - // --- 1. We check the coldkey signuture. - let coldkey = ensure_signed(origin)?; - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - log::info!( - "do_become_delegate( origin:{:?} hotkey:{:?}, take:{:?} )", - coldkey, - hotkey, - take - ); - - // --- 2. Ensure we are delegating an known key. - // --- 3. Ensure that the coldkey is the owner. - Self::do_take_checks(&coldkey, &hotkey)?; - - // --- 4. Ensure we are not already a delegate (dont allow changing delegate take.) - ensure!( - !Self::hotkey_is_delegate(&hotkey), - Error::::HotKeyAlreadyDelegate - ); - - // --- 5. Ensure we don't exceed tx rate limit - let block: u64 = Self::get_current_block_as_u64(); - ensure!( - !Self::exceeds_tx_rate_limit(Self::get_last_tx_block(&coldkey), block), - Error::::DelegateTxRateLimitExceeded - ); - - // --- 5.1 Ensure take is within the min ..= InitialDefaultTake (18%) range - let min_take = MinTake::::get(); - let max_take = MaxTake::::get(); - ensure!(take >= min_take, Error::::DelegateTakeTooLow); - ensure!(take <= max_take, Error::::DelegateTakeTooHigh); - - // --- 6. Delegate the key. - Self::delegate_hotkey(&hotkey, take); - - // Set last block for rate limiting - Self::set_last_tx_block(&coldkey, block); - Self::set_last_tx_block_delegate_take(&coldkey, block); - - // --- 7. Emit the staking event. - log::info!( - "DelegateAdded( coldkey:{:?}, hotkey:{:?}, take:{:?} )", - coldkey, - hotkey, - take - ); - Self::deposit_event(Event::DelegateAdded(coldkey, hotkey, take)); - - // --- 8. Ok and return. - Ok(()) - } - - /// ---- The implementation for the extrinsic decrease_take - /// - /// # Args: - /// * 'origin': (::RuntimeOrigin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The hotkey we are delegating (must be owned by the coldkey.) - /// - /// * 'take' (u16): - /// - The stake proportion that this hotkey takes from delegations for subnet ID. - /// - /// # Event: - /// * TakeDecreased; - /// - On successfully setting a decreased take for this hotkey. - /// - /// # Raises: - /// * 'NotRegistered': - /// - The hotkey we are delegating is not registered on the network. - /// - /// * 'NonAssociatedColdKey': - /// - The hotkey we are delegating is not owned by the calling coldket. - /// - /// * 'DelegateTakeTooLow': - /// - The delegate is setting a take which is not lower than the previous. - /// - pub fn do_decrease_take( - origin: T::RuntimeOrigin, - hotkey: T::AccountId, - take: u16, - ) -> dispatch::DispatchResult { - // --- 1. We check the coldkey signature. - let coldkey = ensure_signed(origin)?; - log::info!( - "do_decrease_take( origin:{:?} hotkey:{:?}, take:{:?} )", - coldkey, - hotkey, - take - ); - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - - // --- 2. Ensure we are delegating a known key. - // Ensure that the coldkey is the owner. - Self::do_take_checks(&coldkey, &hotkey)?; - - // --- 3. Ensure we are always strictly decreasing, never increasing take - if let Ok(current_take) = Delegates::::try_get(&hotkey) { - ensure!(take < current_take, Error::::DelegateTakeTooLow); - } - - // --- 3.1 Ensure take is within the min ..= InitialDefaultTake (18%) range - let min_take = MinTake::::get(); - ensure!(take >= min_take, Error::::DelegateTakeTooLow); - - // --- 4. Set the new take value. - Delegates::::insert(hotkey.clone(), take); - - // --- 5. Emit the take value. - log::info!( - "TakeDecreased( coldkey:{:?}, hotkey:{:?}, take:{:?} )", - coldkey, - hotkey, - take - ); - Self::deposit_event(Event::TakeDecreased(coldkey, hotkey, take)); - - // --- 6. Ok and return. - Ok(()) - } - - /// ---- The implementation for the extrinsic increase_take - /// - /// # Args: - /// * 'origin': (::RuntimeOrigin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The hotkey we are delegating (must be owned by the coldkey.) - /// - /// * 'take' (u16): - /// - The stake proportion that this hotkey takes from delegations for subnet ID. - /// - /// # Event: - /// * TakeIncreased; - /// - On successfully setting a increased take for this hotkey. - /// - /// # Raises: - /// * 'NotRegistered': - /// - The hotkey we are delegating is not registered on the network. - /// - /// * 'NonAssociatedColdKey': - /// - The hotkey we are delegating is not owned by the calling coldket. - /// - /// * 'TxRateLimitExceeded': - /// - Thrown if key has hit transaction rate limit - /// - /// * 'DelegateTakeTooLow': - /// - The delegate is setting a take which is not greater than the previous. - /// - pub fn do_increase_take( - origin: T::RuntimeOrigin, - hotkey: T::AccountId, - take: u16, - ) -> dispatch::DispatchResult { - // --- 1. We check the coldkey signature. - let coldkey = ensure_signed(origin)?; - log::info!( - "do_increase_take( origin:{:?} hotkey:{:?}, take:{:?} )", - coldkey, - hotkey, - take - ); - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - - // --- 2. Ensure we are delegating a known key. - // Ensure that the coldkey is the owner. - Self::do_take_checks(&coldkey, &hotkey)?; - - // --- 3. Ensure we are strinctly increasing take - if let Ok(current_take) = Delegates::::try_get(&hotkey) { - ensure!(take > current_take, Error::::DelegateTakeTooLow); - } - - // --- 4. Ensure take is within the min ..= InitialDefaultTake (18%) range - let max_take = MaxTake::::get(); - ensure!(take <= max_take, Error::::DelegateTakeTooHigh); - - // --- 5. Enforce the rate limit (independently on do_add_stake rate limits) - let block: u64 = Self::get_current_block_as_u64(); - ensure!( - !Self::exceeds_tx_delegate_take_rate_limit( - Self::get_last_tx_block_delegate_take(&coldkey), - block - ), - Error::::DelegateTxRateLimitExceeded - ); - - // Set last block for rate limiting - Self::set_last_tx_block_delegate_take(&coldkey, block); - - // --- 6. Set the new take value. - Delegates::::insert(hotkey.clone(), take); - - // --- 7. Emit the take value. - log::info!( - "TakeIncreased( coldkey:{:?}, hotkey:{:?}, take:{:?} )", - coldkey, - hotkey, - take - ); - Self::deposit_event(Event::TakeIncreased(coldkey, hotkey, take)); - - // --- 8. Ok and return. - Ok(()) - } - - /// ---- The implementation for the extrinsic add_stake: Adds stake to a hotkey account. - /// - /// # Args: - /// * 'origin': (RuntimeOrigin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The associated hotkey account. - /// - /// * 'stake_to_be_added' (u64): - /// - The amount of stake to be added to the hotkey staking account. - /// - /// # Event: - /// * StakeAdded; - /// - On the successfully adding stake to a global account. - /// - /// # Raises: - /// * 'NotEnoughBalanceToStake': - /// - Not enough balance on the coldkey to add onto the global account. - /// - /// * 'NonAssociatedColdKey': - /// - The calling coldkey is not associated with this hotkey. - /// - /// * 'BalanceWithdrawalError': - /// - Errors stemming from transaction pallet. - /// - /// * 'TxRateLimitExceeded': - /// - Thrown if key has hit transaction rate limit - /// - pub fn do_add_stake( - origin: T::RuntimeOrigin, - hotkey: T::AccountId, - stake_to_be_added: u64, - ) -> dispatch::DispatchResult { - // We check that the transaction is signed by the caller and retrieve the T::AccountId coldkey information. - let coldkey = ensure_signed(origin)?; - log::info!( - "do_add_stake( origin:{:?} hotkey:{:?}, stake_to_be_added:{:?} )", - coldkey, - hotkey, - stake_to_be_added - ); - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - - // Ensure the callers coldkey has enough stake to perform the transaction. - ensure!( - Self::can_remove_balance_from_coldkey_account(&coldkey, stake_to_be_added), - Error::::NotEnoughBalanceToStake - ); - - // Ensure that the hotkey account exists this is only possible through registration. - ensure!( - Self::hotkey_account_exists(&hotkey), - Error::::HotKeyAccountNotExists - ); - - // Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. - ensure!( - Self::hotkey_is_delegate(&hotkey) || Self::coldkey_owns_hotkey(&coldkey, &hotkey), - Error::::HotKeyNotDelegateAndSignerNotOwnHotKey - ); - - // Ensure we don't exceed stake rate limit - let stakes_this_interval = - Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); - ensure!( - stakes_this_interval < Self::get_target_stakes_per_interval(), - Error::::StakeRateLimitExceeded - ); - - // If this is a nomination stake, check if total stake after adding will be above - // the minimum required stake. - - // If coldkey is not owner of the hotkey, it's a nomination stake. - if !Self::coldkey_owns_hotkey(&coldkey, &hotkey) { - let total_stake_after_add = - Stake::::get(&hotkey, &coldkey).saturating_add(stake_to_be_added); - - ensure!( - total_stake_after_add >= NominatorMinRequiredStake::::get(), - Error::::NomStakeBelowMinimumThreshold - ); - } - - // Ensure the remove operation from the coldkey is a success. - let actual_amount_to_stake = - Self::remove_balance_from_coldkey_account(&coldkey, stake_to_be_added)?; - - // If we reach here, add the balance to the hotkey. - Self::increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, actual_amount_to_stake); - - // Set last block for rate limiting - let block: u64 = Self::get_current_block_as_u64(); - Self::set_last_tx_block(&coldkey, block); - - // Emit the staking event. - Self::set_stakes_this_interval_for_coldkey_hotkey( - &coldkey, - &hotkey, - stakes_this_interval.saturating_add(1), - block, - ); - log::info!( - "StakeAdded( hotkey:{:?}, stake_to_be_added:{:?} )", - hotkey, - actual_amount_to_stake - ); - Self::deposit_event(Event::StakeAdded(hotkey, actual_amount_to_stake)); - - // Ok and return. - Ok(()) - } - - /// ---- The implementation for the extrinsic remove_stake: Removes stake from a hotkey account and adds it onto a coldkey. - /// - /// # Args: - /// * 'origin': (RuntimeOrigin): - /// - The signature of the caller's coldkey. - /// - /// * 'hotkey' (T::AccountId): - /// - The associated hotkey account. - /// - /// * 'stake_to_be_added' (u64): - /// - The amount of stake to be added to the hotkey staking account. - /// - /// # Event: - /// * StakeRemoved; - /// - On the successfully removing stake from the hotkey account. - /// - /// # Raises: - /// * 'NotRegistered': - /// - Thrown if the account we are attempting to unstake from is non existent. - /// - /// * 'NonAssociatedColdKey': - /// - Thrown if the coldkey does not own the hotkey we are unstaking from. - /// - /// * 'NotEnoughStakeToWithdraw': - /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. - /// - /// * 'TxRateLimitExceeded': - /// - Thrown if key has hit transaction rate limit - /// - pub fn do_remove_stake( - origin: T::RuntimeOrigin, - hotkey: T::AccountId, - stake_to_be_removed: u64, - ) -> dispatch::DispatchResult { - // We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. - let coldkey = ensure_signed(origin)?; - log::info!( - "do_remove_stake( origin:{:?} hotkey:{:?}, stake_to_be_removed:{:?} )", - coldkey, - hotkey, - stake_to_be_removed - ); - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - - // Ensure that the hotkey account exists this is only possible through registration. - ensure!( - Self::hotkey_account_exists(&hotkey), - Error::::HotKeyAccountNotExists - ); - - // Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. - ensure!( - Self::hotkey_is_delegate(&hotkey) || Self::coldkey_owns_hotkey(&coldkey, &hotkey), - Error::::HotKeyNotDelegateAndSignerNotOwnHotKey - ); - - // Ensure that the stake amount to be removed is above zero. - ensure!(stake_to_be_removed > 0, Error::::StakeToWithdrawIsZero); - - // Ensure that the hotkey has enough stake to withdraw. - ensure!( - Self::has_enough_stake(&coldkey, &hotkey, stake_to_be_removed), - Error::::NotEnoughStakeToWithdraw - ); - - // Ensure we don't exceed stake rate limit - let unstakes_this_interval = - Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); - ensure!( - unstakes_this_interval < Self::get_target_stakes_per_interval(), - Error::::UnstakeRateLimitExceeded - ); - - // We remove the balance from the hotkey. - Self::decrease_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_to_be_removed); - - // We add the balance to the coldkey. If the above fails we will not credit this coldkey. - Self::add_balance_to_coldkey_account(&coldkey, stake_to_be_removed); - - // If the stake is below the minimum, we clear the nomination from storage. - // This only applies to nominator stakes. - // If the coldkey does not own the hotkey, it's a nominator stake. - let new_stake = Self::get_stake_for_coldkey_and_hotkey(&coldkey, &hotkey); - Self::clear_small_nomination_if_required(&hotkey, &coldkey, new_stake); - - // Set last block for rate limiting - let block: u64 = Self::get_current_block_as_u64(); - Self::set_last_tx_block(&coldkey, block); - - // Emit the unstaking event. - Self::set_stakes_this_interval_for_coldkey_hotkey( - &coldkey, - &hotkey, - unstakes_this_interval.saturating_add(1), - block, - ); - log::info!( - "StakeRemoved( hotkey:{:?}, stake_to_be_removed:{:?} )", - hotkey, - stake_to_be_removed - ); - Self::deposit_event(Event::StakeRemoved(hotkey, stake_to_be_removed)); - - // Done and ok. - Ok(()) - } - - // Returns true if the passed hotkey allow delegative staking. - // - pub fn hotkey_is_delegate(hotkey: &T::AccountId) -> bool { - Delegates::::contains_key(hotkey) - } - - // Sets the hotkey as a delegate with take. - // - pub fn delegate_hotkey(hotkey: &T::AccountId, take: u16) { - Delegates::::insert(hotkey, take); - } - - // Returns the total amount of stake in the staking table. - // - pub fn get_total_stake() -> u64 { - TotalStake::::get() - } - - // Increases the total amount of stake by the passed amount. - // - pub fn increase_total_stake(increment: u64) { - TotalStake::::put(Self::get_total_stake().saturating_add(increment)); - } - - // Decreases the total amount of stake by the passed amount. - // - pub fn decrease_total_stake(decrement: u64) { - TotalStake::::put(Self::get_total_stake().saturating_sub(decrement)); - } - - // Returns the total amount of stake under a hotkey (delegative or otherwise) - // - pub fn get_total_stake_for_hotkey(hotkey: &T::AccountId) -> u64 { - TotalHotkeyStake::::get(hotkey) - } - - // Returns the total amount of stake held by the coldkey (delegative or otherwise) - // - pub fn get_total_stake_for_coldkey(coldkey: &T::AccountId) -> u64 { - TotalColdkeyStake::::get(coldkey) - } - - // Returns the stake under the cold - hot pairing in the staking table. - // - pub fn get_stake_for_coldkey_and_hotkey(coldkey: &T::AccountId, hotkey: &T::AccountId) -> u64 { - Stake::::get(hotkey, coldkey) - } - - // Retrieves the total stakes for a given hotkey (account ID) for the current staking interval. - pub fn get_stakes_this_interval_for_coldkey_hotkey( - coldkey: &T::AccountId, - hotkey: &T::AccountId, - ) -> u64 { - // Retrieve the configured stake interval duration from storage. - let stake_interval = StakeInterval::::get(); - - // Obtain the current block number as an unsigned 64-bit integer. - let current_block = Self::get_current_block_as_u64(); - - // Fetch the total stakes and the last block number when stakes were made for the hotkey. - let (stakes, block_last_staked_at) = - TotalHotkeyColdkeyStakesThisInterval::::get(coldkey, hotkey); - - // Calculate the block number after which the stakes for the hotkey should be reset. - let block_to_reset_after = block_last_staked_at.saturating_add(stake_interval); - - // If the current block number is beyond the reset point, - // it indicates the end of the staking interval for the hotkey. - if block_to_reset_after <= current_block { - // Reset the stakes for this hotkey for the current interval. - Self::set_stakes_this_interval_for_coldkey_hotkey( - coldkey, - hotkey, - 0, - block_last_staked_at, - ); - // Return 0 as the stake amount since we've just reset the stakes. - return 0; - } - - // If the staking interval has not yet ended, return the current stake amount. - stakes - } - - pub fn get_target_stakes_per_interval() -> u64 { - TargetStakesPerInterval::::get() - } - - // Creates a cold - hot pairing account if the hotkey is not already an active account. - // - pub fn create_account_if_non_existent(coldkey: &T::AccountId, hotkey: &T::AccountId) { - if !Self::hotkey_account_exists(hotkey) { - Stake::::insert(hotkey, coldkey, 0); - Owner::::insert(hotkey, coldkey); - - // Update OwnedHotkeys map - let mut hotkeys = OwnedHotkeys::::get(coldkey); - if !hotkeys.contains(hotkey) { - hotkeys.push(hotkey.clone()); - OwnedHotkeys::::insert(coldkey, hotkeys); - } - - // Update StakingHotkeys map - let mut staking_hotkeys = StakingHotkeys::::get(coldkey); - if !staking_hotkeys.contains(hotkey) { - staking_hotkeys.push(hotkey.clone()); - StakingHotkeys::::insert(coldkey, staking_hotkeys); - } - } - } - - // Returns the coldkey owning this hotkey. This function should only be called for active accounts. - // - pub fn get_owning_coldkey_for_hotkey(hotkey: &T::AccountId) -> T::AccountId { - Owner::::get(hotkey) - } - - // Returns the hotkey take - // - pub fn get_hotkey_take(hotkey: &T::AccountId) -> u16 { - Delegates::::get(hotkey) - } - - // Returns true if the hotkey account has been created. - // - pub fn hotkey_account_exists(hotkey: &T::AccountId) -> bool { - Owner::::contains_key(hotkey) - } - - // Return true if the passed coldkey owns the hotkey. - // - pub fn coldkey_owns_hotkey(coldkey: &T::AccountId, hotkey: &T::AccountId) -> bool { - if Self::hotkey_account_exists(hotkey) { - Owner::::get(hotkey) == *coldkey - } else { - false - } - } - - // Returns true if the cold-hot staking account has enough balance to fufil the decrement. - // - pub fn has_enough_stake(coldkey: &T::AccountId, hotkey: &T::AccountId, decrement: u64) -> bool { - Self::get_stake_for_coldkey_and_hotkey(coldkey, hotkey) >= decrement - } - - // Increases the stake on the hotkey account under its owning coldkey. - // - pub fn increase_stake_on_hotkey_account(hotkey: &T::AccountId, increment: u64) { - Self::increase_stake_on_coldkey_hotkey_account( - &Self::get_owning_coldkey_for_hotkey(hotkey), - hotkey, - increment, - ); - } - - // Decreases the stake on the hotkey account under its owning coldkey. - // - pub fn decrease_stake_on_hotkey_account(hotkey: &T::AccountId, decrement: u64) { - Self::decrease_stake_on_coldkey_hotkey_account( - &Self::get_owning_coldkey_for_hotkey(hotkey), - hotkey, - decrement, - ); - } - - // Increases the stake on the cold - hot pairing by increment while also incrementing other counters. - // This function should be called rather than set_stake under account. - // - pub fn increase_stake_on_coldkey_hotkey_account( - coldkey: &T::AccountId, - hotkey: &T::AccountId, - increment: u64, - ) { - TotalColdkeyStake::::insert( - coldkey, - TotalColdkeyStake::::get(coldkey).saturating_add(increment), - ); - TotalHotkeyStake::::insert( - hotkey, - TotalHotkeyStake::::get(hotkey).saturating_add(increment), - ); - Stake::::insert( - hotkey, - coldkey, - Stake::::get(hotkey, coldkey).saturating_add(increment), - ); - TotalStake::::put(TotalStake::::get().saturating_add(increment)); - - // Update StakingHotkeys map - let mut staking_hotkeys = StakingHotkeys::::get(coldkey); - if !staking_hotkeys.contains(hotkey) { - staking_hotkeys.push(hotkey.clone()); - StakingHotkeys::::insert(coldkey, staking_hotkeys); - } - } - - // Decreases the stake on the cold - hot pairing by the decrement while decreasing other counters. - // - pub fn decrease_stake_on_coldkey_hotkey_account( - coldkey: &T::AccountId, - hotkey: &T::AccountId, - decrement: u64, - ) { - TotalColdkeyStake::::mutate(coldkey, |old| *old = old.saturating_sub(decrement)); - TotalHotkeyStake::::insert( - hotkey, - TotalHotkeyStake::::get(hotkey).saturating_sub(decrement), - ); - Stake::::insert( - hotkey, - coldkey, - Stake::::get(hotkey, coldkey).saturating_sub(decrement), - ); - TotalStake::::put(TotalStake::::get().saturating_sub(decrement)); - - // TODO: Tech debt: Remove StakingHotkeys entry if stake goes to 0 - } - - /// Empties the stake associated with a given coldkey-hotkey account pairing. - /// This function retrieves the current stake for the specified coldkey-hotkey pairing, - /// then subtracts this stake amount from both the TotalColdkeyStake and TotalHotkeyStake. - /// It also removes the stake entry for the hotkey-coldkey pairing and adjusts the TotalStake - /// and TotalIssuance by subtracting the removed stake amount. - /// - /// Returns the amount of stake that was removed. - /// - /// # Arguments - /// - /// * `coldkey` - A reference to the AccountId of the coldkey involved in the staking. - /// * `hotkey` - A reference to the AccountId of the hotkey associated with the coldkey. - pub fn empty_stake_on_coldkey_hotkey_account( - coldkey: &T::AccountId, - hotkey: &T::AccountId, - ) -> u64 { - let current_stake: u64 = Stake::::get(hotkey, coldkey); - TotalColdkeyStake::::mutate(coldkey, |old| *old = old.saturating_sub(current_stake)); - TotalHotkeyStake::::mutate(hotkey, |stake| *stake = stake.saturating_sub(current_stake)); - Stake::::remove(hotkey, coldkey); - TotalStake::::mutate(|stake| *stake = stake.saturating_sub(current_stake)); - TotalIssuance::::mutate(|issuance| *issuance = issuance.saturating_sub(current_stake)); - - // Update StakingHotkeys map - let mut staking_hotkeys = StakingHotkeys::::get(coldkey); - staking_hotkeys.retain(|h| h != hotkey); - StakingHotkeys::::insert(coldkey, staking_hotkeys); - - current_stake - } - - /// Clears the nomination for an account, if it is a nominator account and the stake is below the minimum required threshold. - pub fn clear_small_nomination_if_required( - hotkey: &T::AccountId, - coldkey: &T::AccountId, - stake: u64, - ) { - // Verify if the account is a nominator account by checking ownership of the hotkey by the coldkey. - if !Self::coldkey_owns_hotkey(coldkey, hotkey) { - // If the stake is below the minimum required, it's considered a small nomination and needs to be cleared. - if stake < Self::get_nominator_min_required_stake() { - // Remove the stake from the nominator account. (this is a more forceful unstake operation which ) - // Actually deletes the staking account. - let cleared_stake = Self::empty_stake_on_coldkey_hotkey_account(coldkey, hotkey); - // Add the stake to the coldkey account. - Self::add_balance_to_coldkey_account(coldkey, cleared_stake); - } - } - } - - /// Clears small nominations for all accounts. - /// - /// WARN: This is an O(N) operation, where N is the number of staking accounts. It should be - /// used with caution. - pub fn clear_small_nominations() { - // Loop through all staking accounts to identify and clear nominations below the minimum stake. - for (hotkey, coldkey, stake) in Stake::::iter() { - Self::clear_small_nomination_if_required(&hotkey, &coldkey, stake); - } - } - - pub fn add_balance_to_coldkey_account( - coldkey: &T::AccountId, - amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, - ) { - // infallible - let _ = T::Currency::deposit(coldkey, amount, Precision::BestEffort); - } - - pub fn set_balance_on_coldkey_account( - coldkey: &T::AccountId, - amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, - ) { - T::Currency::set_balance(coldkey, amount); - } - - pub fn can_remove_balance_from_coldkey_account( - coldkey: &T::AccountId, - amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, - ) -> bool { - let current_balance = Self::get_coldkey_balance(coldkey); - if amount > current_balance { - return false; - } - - // This bit is currently untested. @todo - - T::Currency::can_withdraw(coldkey, amount) - .into_result(false) - .is_ok() - } - - pub fn get_coldkey_balance( - coldkey: &T::AccountId, - ) -> <::Currency as fungible::Inspect<::AccountId>>::Balance - { - T::Currency::reducible_balance(coldkey, Preservation::Expendable, Fortitude::Polite) - } - - #[must_use = "Balance must be used to preserve total issuance of token"] - pub fn remove_balance_from_coldkey_account( - coldkey: &T::AccountId, - amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, - ) -> Result { - if amount == 0 { - return Ok(0); - } - - let credit = T::Currency::withdraw( - coldkey, - amount, - Precision::BestEffort, - Preservation::Preserve, - Fortitude::Polite, - ) - .map_err(|_| Error::::BalanceWithdrawalError)? - .peek(); - - if credit == 0 { - return Err(Error::::ZeroBalanceAfterWithdrawn.into()); - } - - Ok(credit) - } - - pub fn kill_coldkey_account( - coldkey: &T::AccountId, - amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, - ) -> Result { - if amount == 0 { - return Ok(0); - } - - let credit = T::Currency::withdraw( - coldkey, - amount, - Precision::Exact, - Preservation::Expendable, - Fortitude::Force, - ) - .map_err(|_| Error::::BalanceWithdrawalError)? - .peek(); - - if credit == 0 { - return Err(Error::::ZeroBalanceAfterWithdrawn.into()); - } - - Ok(credit) - } - - pub fn unstake_all_coldkeys_from_hotkey_account(hotkey: &T::AccountId) { - // Iterate through all coldkeys that have a stake on this hotkey account. - for (delegate_coldkey_i, stake_i) in - as IterableStorageDoubleMap>::iter_prefix( - hotkey, - ) - { - // Remove the stake from the coldkey - hotkey pairing. - Self::decrease_stake_on_coldkey_hotkey_account(&delegate_coldkey_i, hotkey, stake_i); - - // Add the balance to the coldkey account. - Self::add_balance_to_coldkey_account(&delegate_coldkey_i, stake_i); - } - } -} diff --git a/pallets/subtensor/src/staking/add_stake.rs b/pallets/subtensor/src/staking/add_stake.rs new file mode 100644 index 000000000..c9cbd7e04 --- /dev/null +++ b/pallets/subtensor/src/staking/add_stake.rs @@ -0,0 +1,115 @@ +use super::*; + +impl Pallet { + /// ---- The implementation for the extrinsic add_stake: Adds stake to a hotkey account. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + /// + /// * 'stake_to_be_added' (u64): + /// - The amount of stake to be added to the hotkey staking account. + /// + /// # Event: + /// * StakeAdded; + /// - On the successfully adding stake to a global account. + /// + /// # Raises: + /// * 'NotEnoughBalanceToStake': + /// - Not enough balance on the coldkey to add onto the global account. + /// + /// * 'NonAssociatedColdKey': + /// - The calling coldkey is not associated with this hotkey. + /// + /// * 'BalanceWithdrawalError': + /// - Errors stemming from transaction pallet. + /// + /// * 'TxRateLimitExceeded': + /// - Thrown if key has hit transaction rate limit + /// + pub fn do_add_stake( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + stake_to_be_added: u64, + ) -> dispatch::DispatchResult { + // We check that the transaction is signed by the caller and retrieve the T::AccountId coldkey information. + let coldkey = ensure_signed(origin)?; + log::debug!( + "do_add_stake( origin:{:?} hotkey:{:?}, stake_to_be_added:{:?} )", + coldkey, + hotkey, + stake_to_be_added + ); + + // Ensure the callers coldkey has enough stake to perform the transaction. + ensure!( + Self::can_remove_balance_from_coldkey_account(&coldkey, stake_to_be_added), + Error::::NotEnoughBalanceToStake + ); + + // Ensure that the hotkey account exists this is only possible through registration. + ensure!( + Self::hotkey_account_exists(&hotkey), + Error::::HotKeyAccountNotExists + ); + + // Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. + ensure!( + Self::hotkey_is_delegate(&hotkey) || Self::coldkey_owns_hotkey(&coldkey, &hotkey), + Error::::HotKeyNotDelegateAndSignerNotOwnHotKey + ); + + // Ensure we don't exceed stake rate limit + let stakes_this_interval = + Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); + ensure!( + stakes_this_interval < Self::get_target_stakes_per_interval(), + Error::::StakeRateLimitExceeded + ); + + // Set the last time the stake increased for nominator drain protection. + LastAddStakeIncrease::::insert(&hotkey, &coldkey, Self::get_current_block_as_u64()); + + // If coldkey is not owner of the hotkey, it's a nomination stake. + if !Self::coldkey_owns_hotkey(&coldkey, &hotkey) { + let total_stake_after_add = + Stake::::get(&hotkey, &coldkey).saturating_add(stake_to_be_added); + + ensure!( + total_stake_after_add >= NominatorMinRequiredStake::::get(), + Error::::NomStakeBelowMinimumThreshold + ); + } + + // Ensure the remove operation from the coldkey is a success. + let actual_amount_to_stake = + Self::remove_balance_from_coldkey_account(&coldkey, stake_to_be_added)?; + + // If we reach here, add the balance to the hotkey. + Self::increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, actual_amount_to_stake); + + // Set last block for rate limiting + let block: u64 = Self::get_current_block_as_u64(); + Self::set_last_tx_block(&coldkey, block); + + // Emit the staking event. + Self::set_stakes_this_interval_for_coldkey_hotkey( + &coldkey, + &hotkey, + stakes_this_interval.saturating_add(1), + block, + ); + log::debug!( + "StakeAdded( hotkey:{:?}, stake_to_be_added:{:?} )", + hotkey, + actual_amount_to_stake + ); + Self::deposit_event(Event::StakeAdded(hotkey, actual_amount_to_stake)); + + // Ok and return. + Ok(()) + } +} diff --git a/pallets/subtensor/src/staking/become_delegate.rs b/pallets/subtensor/src/staking/become_delegate.rs new file mode 100644 index 000000000..fd600453f --- /dev/null +++ b/pallets/subtensor/src/staking/become_delegate.rs @@ -0,0 +1,86 @@ +use super::*; + +impl Pallet { + /// ---- The implementation for the extrinsic become_delegate: signals that this hotkey allows delegated stake. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The hotkey we are delegating (must be owned by the coldkey.) + /// + /// * 'take' (u16): + /// - The stake proportion that this hotkey takes from delegations. + /// + /// # Event: + /// * DelegateAdded; + /// - On successfully setting a hotkey as a delegate. + /// + /// # Raises: + /// * 'NotRegistered': + /// - The hotkey we are delegating is not registered on the network. + /// + /// * 'NonAssociatedColdKey': + /// - The hotkey we are delegating is not owned by the calling coldket. + /// + /// * 'TxRateLimitExceeded': + /// - Thrown if key has hit transaction rate limit + /// + pub fn do_become_delegate( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + take: u16, + ) -> dispatch::DispatchResult { + // --- 1. We check the coldkey signuture. + let coldkey = ensure_signed(origin)?; + log::debug!( + "do_become_delegate( origin:{:?} hotkey:{:?}, take:{:?} )", + coldkey, + hotkey, + take + ); + + // --- 2. Ensure we are delegating an known key. + // --- 3. Ensure that the coldkey is the owner. + Self::do_take_checks(&coldkey, &hotkey)?; + + // --- 4. Ensure we are not already a delegate (dont allow changing delegate take.) + ensure!( + !Self::hotkey_is_delegate(&hotkey), + Error::::HotKeyAlreadyDelegate + ); + + // --- 5. Ensure we don't exceed tx rate limit + let block: u64 = Self::get_current_block_as_u64(); + ensure!( + !Self::exceeds_tx_rate_limit(Self::get_last_tx_block(&coldkey), block), + Error::::DelegateTxRateLimitExceeded + ); + + // --- 5.1 Ensure take is within the min ..= InitialDefaultDelegateTake (18%) range + let min_take = MinDelegateTake::::get(); + let max_take = MaxDelegateTake::::get(); + ensure!(take >= min_take, Error::::DelegateTakeTooLow); + ensure!(take <= max_take, Error::::DelegateTakeTooHigh); + + // --- 6. Delegate the key. + Self::delegate_hotkey(&hotkey, take); + + // Set last block for rate limiting + Self::set_last_tx_block(&coldkey, block); + Self::set_last_tx_block_delegate_take(&coldkey, block); + + // --- 7. Emit the staking event. + log::debug!( + "DelegateAdded( coldkey:{:?}, hotkey:{:?}, take:{:?} )", + coldkey, + hotkey, + take + ); + Self::deposit_event(Event::DelegateAdded(coldkey, hotkey, take)); + + // --- 8. Ok and return. + Ok(()) + } +} diff --git a/pallets/subtensor/src/staking/decrease_take.rs b/pallets/subtensor/src/staking/decrease_take.rs new file mode 100644 index 000000000..8742f809d --- /dev/null +++ b/pallets/subtensor/src/staking/decrease_take.rs @@ -0,0 +1,72 @@ +use super::*; + +impl Pallet { + /// ---- The implementation for the extrinsic decrease_take + /// + /// # Args: + /// * 'origin': (::RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The hotkey we are delegating (must be owned by the coldkey.) + /// + /// * 'take' (u16): + /// - The stake proportion that this hotkey takes from delegations for subnet ID. + /// + /// # Event: + /// * TakeDecreased; + /// - On successfully setting a decreased take for this hotkey. + /// + /// # Raises: + /// * 'NotRegistered': + /// - The hotkey we are delegating is not registered on the network. + /// + /// * 'NonAssociatedColdKey': + /// - The hotkey we are delegating is not owned by the calling coldket. + /// + /// * 'DelegateTakeTooLow': + /// - The delegate is setting a take which is not lower than the previous. + /// + pub fn do_decrease_take( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + take: u16, + ) -> dispatch::DispatchResult { + // --- 1. We check the coldkey signature. + let coldkey = ensure_signed(origin)?; + log::debug!( + "do_decrease_take( origin:{:?} hotkey:{:?}, take:{:?} )", + coldkey, + hotkey, + take + ); + + // --- 2. Ensure we are delegating a known key. + // Ensure that the coldkey is the owner. + Self::do_take_checks(&coldkey, &hotkey)?; + + // --- 3. Ensure we are always strictly decreasing, never increasing take + if let Ok(current_take) = Delegates::::try_get(&hotkey) { + ensure!(take < current_take, Error::::DelegateTakeTooLow); + } + + // --- 3.1 Ensure take is within the min ..= InitialDefaultDelegateTake (18%) range + let min_take = MinDelegateTake::::get(); + ensure!(take >= min_take, Error::::DelegateTakeTooLow); + + // --- 4. Set the new take value. + Delegates::::insert(hotkey.clone(), take); + + // --- 5. Emit the take value. + log::debug!( + "TakeDecreased( coldkey:{:?}, hotkey:{:?}, take:{:?} )", + coldkey, + hotkey, + take + ); + Self::deposit_event(Event::TakeDecreased(coldkey, hotkey, take)); + + // --- 6. Ok and return. + Ok(()) + } +} diff --git a/pallets/subtensor/src/staking/helpers.rs b/pallets/subtensor/src/staking/helpers.rs new file mode 100644 index 000000000..0328d94e6 --- /dev/null +++ b/pallets/subtensor/src/staking/helpers.rs @@ -0,0 +1,436 @@ +use super::*; +use frame_support::{ + storage::IterableStorageDoubleMap, + traits::{ + tokens::{ + fungible::{Balanced as _, Inspect as _, Mutate as _}, + Fortitude, Precision, Preservation, + }, + Imbalance, + }, +}; + +impl Pallet { + // Returns true if the passed hotkey allow delegative staking. + // + pub fn hotkey_is_delegate(hotkey: &T::AccountId) -> bool { + Delegates::::contains_key(hotkey) + } + + // Sets the hotkey as a delegate with take. + // + pub fn delegate_hotkey(hotkey: &T::AccountId, take: u16) { + Delegates::::insert(hotkey, take); + } + + // Returns the total amount of stake in the staking table. + // + pub fn get_total_stake() -> u64 { + TotalStake::::get() + } + + // Increases the total amount of stake by the passed amount. + // + pub fn increase_total_stake(increment: u64) { + TotalStake::::put(Self::get_total_stake().saturating_add(increment)); + } + + // Decreases the total amount of stake by the passed amount. + // + pub fn decrease_total_stake(decrement: u64) { + TotalStake::::put(Self::get_total_stake().saturating_sub(decrement)); + } + + // Returns the total amount of stake under a hotkey (delegative or otherwise) + // + pub fn get_total_stake_for_hotkey(hotkey: &T::AccountId) -> u64 { + TotalHotkeyStake::::get(hotkey) + } + + // Returns the total amount of stake held by the coldkey (delegative or otherwise) + // + pub fn get_total_stake_for_coldkey(coldkey: &T::AccountId) -> u64 { + TotalColdkeyStake::::get(coldkey) + } + + // Returns the stake under the cold - hot pairing in the staking table. + // + pub fn get_stake_for_coldkey_and_hotkey(coldkey: &T::AccountId, hotkey: &T::AccountId) -> u64 { + Stake::::get(hotkey, coldkey) + } + + // Retrieves the total stakes for a given hotkey (account ID) for the current staking interval. + pub fn get_stakes_this_interval_for_coldkey_hotkey( + coldkey: &T::AccountId, + hotkey: &T::AccountId, + ) -> u64 { + // Retrieve the configured stake interval duration from storage. + let stake_interval = StakeInterval::::get(); + + // Obtain the current block number as an unsigned 64-bit integer. + let current_block = Self::get_current_block_as_u64(); + + // Fetch the total stakes and the last block number when stakes were made for the hotkey. + let (stakes, block_last_staked_at) = + TotalHotkeyColdkeyStakesThisInterval::::get(coldkey, hotkey); + + // Calculate the block number after which the stakes for the hotkey should be reset. + let block_to_reset_after = block_last_staked_at.saturating_add(stake_interval); + + // If the current block number is beyond the reset point, + // it indicates the end of the staking interval for the hotkey. + if block_to_reset_after <= current_block { + // Reset the stakes for this hotkey for the current interval. + Self::set_stakes_this_interval_for_coldkey_hotkey( + coldkey, + hotkey, + 0, + block_last_staked_at, + ); + // Return 0 as the stake amount since we've just reset the stakes. + return 0; + } + + // If the staking interval has not yet ended, return the current stake amount. + stakes + } + + pub fn get_target_stakes_per_interval() -> u64 { + TargetStakesPerInterval::::get() + } + + // Creates a cold - hot pairing account if the hotkey is not already an active account. + // + pub fn create_account_if_non_existent(coldkey: &T::AccountId, hotkey: &T::AccountId) { + if !Self::hotkey_account_exists(hotkey) { + Stake::::insert(hotkey, coldkey, 0); + Owner::::insert(hotkey, coldkey); + + // Update OwnedHotkeys map + let mut hotkeys = OwnedHotkeys::::get(coldkey); + if !hotkeys.contains(hotkey) { + hotkeys.push(hotkey.clone()); + OwnedHotkeys::::insert(coldkey, hotkeys); + } + + // Update StakingHotkeys map + let mut staking_hotkeys = StakingHotkeys::::get(coldkey); + if !staking_hotkeys.contains(hotkey) { + staking_hotkeys.push(hotkey.clone()); + StakingHotkeys::::insert(coldkey, staking_hotkeys); + } + } + } + + /// Returns the coldkey owning this hotkey. This function should only be called for active accounts. + /// + /// # Arguments + /// * `hotkey` - The hotkey account ID. + /// + /// # Returns + /// The coldkey account ID that owns the hotkey. + pub fn get_owning_coldkey_for_hotkey(hotkey: &T::AccountId) -> T::AccountId { + Owner::::get(hotkey) + } + + /// Returns the hotkey take. + /// + /// # Arguments + /// * `hotkey` - The hotkey account ID. + /// + /// # Returns + /// The take value of the hotkey. + pub fn get_hotkey_take(hotkey: &T::AccountId) -> u16 { + Delegates::::get(hotkey) + } + + /// Returns true if the hotkey account has been created. + /// + /// # Arguments + /// * `hotkey` - The hotkey account ID. + /// + /// # Returns + /// True if the hotkey account exists, false otherwise. + pub fn hotkey_account_exists(hotkey: &T::AccountId) -> bool { + Owner::::contains_key(hotkey) + } + + /// Returns true if the passed coldkey owns the hotkey. + /// + /// # Arguments + /// * `coldkey` - The coldkey account ID. + /// * `hotkey` - The hotkey account ID. + /// + /// # Returns + /// True if the coldkey owns the hotkey, false otherwise. + pub fn coldkey_owns_hotkey(coldkey: &T::AccountId, hotkey: &T::AccountId) -> bool { + if Self::hotkey_account_exists(hotkey) { + Owner::::get(hotkey) == *coldkey + } else { + false + } + } + + /// Returns true if the cold-hot staking account has enough balance to fulfill the decrement. + /// + /// # Arguments + /// * `coldkey` - The coldkey account ID. + /// * `hotkey` - The hotkey account ID. + /// * `decrement` - The amount to be decremented. + /// + /// # Returns + /// True if the account has enough balance, false otherwise. + pub fn has_enough_stake(coldkey: &T::AccountId, hotkey: &T::AccountId, decrement: u64) -> bool { + Self::get_stake_for_coldkey_and_hotkey(coldkey, hotkey) >= decrement + } + + /// Increases the stake on the hotkey account under its owning coldkey. + /// + /// # Arguments + /// * `hotkey` - The hotkey account ID. + /// * `increment` - The amount to be incremented. + pub fn increase_stake_on_hotkey_account(hotkey: &T::AccountId, increment: u64) { + Self::increase_stake_on_coldkey_hotkey_account( + &Self::get_owning_coldkey_for_hotkey(hotkey), + hotkey, + increment, + ); + } + + /// Decreases the stake on the hotkey account under its owning coldkey. + /// + /// # Arguments + /// * `hotkey` - The hotkey account ID. + /// * `decrement` - The amount to be decremented. + pub fn decrease_stake_on_hotkey_account(hotkey: &T::AccountId, decrement: u64) { + Self::decrease_stake_on_coldkey_hotkey_account( + &Self::get_owning_coldkey_for_hotkey(hotkey), + hotkey, + decrement, + ); + } + + // Increases the stake on the cold - hot pairing by increment while also incrementing other counters. + // This function should be called rather than set_stake under account. + // + pub fn increase_stake_on_coldkey_hotkey_account( + coldkey: &T::AccountId, + hotkey: &T::AccountId, + increment: u64, + ) { + log::debug!( + "Increasing stake: coldkey: {:?}, hotkey: {:?}, amount: {}", + coldkey, + hotkey, + increment + ); + + TotalColdkeyStake::::insert( + coldkey, + TotalColdkeyStake::::get(coldkey).saturating_add(increment), + ); + TotalHotkeyStake::::insert( + hotkey, + TotalHotkeyStake::::get(hotkey).saturating_add(increment), + ); + Stake::::insert( + hotkey, + coldkey, + Stake::::get(hotkey, coldkey).saturating_add(increment), + ); + TotalStake::::put(TotalStake::::get().saturating_add(increment)); + + // Update StakingHotkeys map + let mut staking_hotkeys = StakingHotkeys::::get(coldkey); + if !staking_hotkeys.contains(hotkey) { + staking_hotkeys.push(hotkey.clone()); + StakingHotkeys::::insert(coldkey, staking_hotkeys); + } + } + + // Decreases the stake on the cold - hot pairing by the decrement while decreasing other counters. + // + pub fn decrease_stake_on_coldkey_hotkey_account( + coldkey: &T::AccountId, + hotkey: &T::AccountId, + decrement: u64, + ) { + TotalColdkeyStake::::mutate(coldkey, |old| *old = old.saturating_sub(decrement)); + TotalHotkeyStake::::insert( + hotkey, + TotalHotkeyStake::::get(hotkey).saturating_sub(decrement), + ); + Stake::::insert( + hotkey, + coldkey, + Stake::::get(hotkey, coldkey).saturating_sub(decrement), + ); + TotalStake::::put(TotalStake::::get().saturating_sub(decrement)); + + // TODO: Tech debt: Remove StakingHotkeys entry if stake goes to 0 + } + + /// Empties the stake associated with a given coldkey-hotkey account pairing. + /// This function retrieves the current stake for the specified coldkey-hotkey pairing, + /// then subtracts this stake amount from both the TotalColdkeyStake and TotalHotkeyStake. + /// It also removes the stake entry for the hotkey-coldkey pairing and adjusts the TotalStake + /// and TotalIssuance by subtracting the removed stake amount. + /// + /// Returns the amount of stake that was removed. + /// + /// # Arguments + /// + /// * `coldkey` - A reference to the AccountId of the coldkey involved in the staking. + /// * `hotkey` - A reference to the AccountId of the hotkey associated with the coldkey. + pub fn empty_stake_on_coldkey_hotkey_account( + coldkey: &T::AccountId, + hotkey: &T::AccountId, + ) -> u64 { + let current_stake: u64 = Stake::::get(hotkey, coldkey); + TotalColdkeyStake::::mutate(coldkey, |old| *old = old.saturating_sub(current_stake)); + TotalHotkeyStake::::mutate(hotkey, |stake| *stake = stake.saturating_sub(current_stake)); + Stake::::remove(hotkey, coldkey); + TotalStake::::mutate(|stake| *stake = stake.saturating_sub(current_stake)); + + // Update StakingHotkeys map + let mut staking_hotkeys = StakingHotkeys::::get(coldkey); + staking_hotkeys.retain(|h| h != hotkey); + StakingHotkeys::::insert(coldkey, staking_hotkeys); + + current_stake + } + + /// Clears the nomination for an account, if it is a nominator account and the stake is below the minimum required threshold. + pub fn clear_small_nomination_if_required( + hotkey: &T::AccountId, + coldkey: &T::AccountId, + stake: u64, + ) { + // Verify if the account is a nominator account by checking ownership of the hotkey by the coldkey. + if !Self::coldkey_owns_hotkey(coldkey, hotkey) { + // If the stake is below the minimum required, it's considered a small nomination and needs to be cleared. + if stake < Self::get_nominator_min_required_stake() { + // Remove the stake from the nominator account. (this is a more forceful unstake operation which ) + // Actually deletes the staking account. + let cleared_stake = Self::empty_stake_on_coldkey_hotkey_account(coldkey, hotkey); + // Add the stake to the coldkey account. + Self::add_balance_to_coldkey_account(coldkey, cleared_stake); + } + } + } + + /// Clears small nominations for all accounts. + /// + /// WARN: This is an O(N) operation, where N is the number of staking accounts. It should be + /// used with caution. + pub fn clear_small_nominations() { + // Loop through all staking accounts to identify and clear nominations below the minimum stake. + for (hotkey, coldkey, stake) in Stake::::iter() { + Self::clear_small_nomination_if_required(&hotkey, &coldkey, stake); + } + } + + pub fn add_balance_to_coldkey_account( + coldkey: &T::AccountId, + amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, + ) { + // infallible + let _ = T::Currency::deposit(coldkey, amount, Precision::BestEffort); + } + + pub fn set_balance_on_coldkey_account( + coldkey: &T::AccountId, + amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, + ) { + T::Currency::set_balance(coldkey, amount); + } + + pub fn can_remove_balance_from_coldkey_account( + coldkey: &T::AccountId, + amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, + ) -> bool { + let current_balance = Self::get_coldkey_balance(coldkey); + if amount > current_balance { + return false; + } + + // This bit is currently untested. @todo + + T::Currency::can_withdraw(coldkey, amount) + .into_result(false) + .is_ok() + } + + pub fn get_coldkey_balance( + coldkey: &T::AccountId, + ) -> <::Currency as fungible::Inspect<::AccountId>>::Balance + { + T::Currency::reducible_balance(coldkey, Preservation::Expendable, Fortitude::Polite) + } + + #[must_use = "Balance must be used to preserve total issuance of token"] + pub fn remove_balance_from_coldkey_account( + coldkey: &T::AccountId, + amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, + ) -> Result { + if amount == 0 { + return Ok(0); + } + + let credit = T::Currency::withdraw( + coldkey, + amount, + Precision::BestEffort, + Preservation::Preserve, + Fortitude::Polite, + ) + .map_err(|_| Error::::BalanceWithdrawalError)? + .peek(); + + if credit == 0 { + return Err(Error::::ZeroBalanceAfterWithdrawn.into()); + } + + Ok(credit) + } + + pub fn kill_coldkey_account( + coldkey: &T::AccountId, + amount: <::Currency as fungible::Inspect<::AccountId>>::Balance, + ) -> Result { + if amount == 0 { + return Ok(0); + } + + let credit = T::Currency::withdraw( + coldkey, + amount, + Precision::Exact, + Preservation::Expendable, + Fortitude::Force, + ) + .map_err(|_| Error::::BalanceWithdrawalError)? + .peek(); + + if credit == 0 { + return Err(Error::::ZeroBalanceAfterWithdrawn.into()); + } + + Ok(credit) + } + + pub fn unstake_all_coldkeys_from_hotkey_account(hotkey: &T::AccountId) { + // Iterate through all coldkeys that have a stake on this hotkey account. + for (delegate_coldkey_i, stake_i) in + as IterableStorageDoubleMap>::iter_prefix( + hotkey, + ) + { + // Remove the stake from the coldkey - hotkey pairing. + Self::decrease_stake_on_coldkey_hotkey_account(&delegate_coldkey_i, hotkey, stake_i); + + // Add the balance to the coldkey account. + Self::add_balance_to_coldkey_account(&delegate_coldkey_i, stake_i); + } + } +} diff --git a/pallets/subtensor/src/staking/increase_take.rs b/pallets/subtensor/src/staking/increase_take.rs new file mode 100644 index 000000000..021818447 --- /dev/null +++ b/pallets/subtensor/src/staking/increase_take.rs @@ -0,0 +1,88 @@ +use super::*; + +impl Pallet { + /// ---- The implementation for the extrinsic increase_take + /// + /// # Args: + /// * 'origin': (::RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The hotkey we are delegating (must be owned by the coldkey.) + /// + /// * 'take' (u16): + /// - The stake proportion that this hotkey takes from delegations for subnet ID. + /// + /// # Event: + /// * TakeIncreased; + /// - On successfully setting a increased take for this hotkey. + /// + /// # Raises: + /// * 'NotRegistered': + /// - The hotkey we are delegating is not registered on the network. + /// + /// * 'NonAssociatedColdKey': + /// - The hotkey we are delegating is not owned by the calling coldket. + /// + /// * 'TxRateLimitExceeded': + /// - Thrown if key has hit transaction rate limit + /// + /// * 'DelegateTakeTooLow': + /// - The delegate is setting a take which is not greater than the previous. + /// + pub fn do_increase_take( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + take: u16, + ) -> dispatch::DispatchResult { + // --- 1. We check the coldkey signature. + let coldkey = ensure_signed(origin)?; + log::debug!( + "do_increase_take( origin:{:?} hotkey:{:?}, take:{:?} )", + coldkey, + hotkey, + take + ); + + // --- 2. Ensure we are delegating a known key. + // Ensure that the coldkey is the owner. + Self::do_take_checks(&coldkey, &hotkey)?; + + // --- 3. Ensure we are strinctly increasing take + if let Ok(current_take) = Delegates::::try_get(&hotkey) { + ensure!(take > current_take, Error::::DelegateTakeTooLow); + } + + // --- 4. Ensure take is within the min ..= InitialDefaultDelegateTake (18%) range + let max_take = MaxDelegateTake::::get(); + ensure!(take <= max_take, Error::::DelegateTakeTooHigh); + + // --- 5. Enforce the rate limit (independently on do_add_stake rate limits) + let block: u64 = Self::get_current_block_as_u64(); + ensure!( + !Self::exceeds_tx_delegate_take_rate_limit( + Self::get_last_tx_block_delegate_take(&coldkey), + block + ), + Error::::DelegateTxRateLimitExceeded + ); + + // Set last block for rate limiting + Self::set_last_tx_block_delegate_take(&coldkey, block); + + // --- 6. Set the new take value. + Delegates::::insert(hotkey.clone(), take); + + // --- 7. Emit the take value. + log::debug!( + "TakeIncreased( coldkey:{:?}, hotkey:{:?}, take:{:?} )", + coldkey, + hotkey, + take + ); + Self::deposit_event(Event::TakeIncreased(coldkey, hotkey, take)); + + // --- 8. Ok and return. + Ok(()) + } +} diff --git a/pallets/subtensor/src/staking/mod.rs b/pallets/subtensor/src/staking/mod.rs new file mode 100644 index 000000000..0b3894b61 --- /dev/null +++ b/pallets/subtensor/src/staking/mod.rs @@ -0,0 +1,8 @@ +use super::*; +pub mod add_stake; +pub mod become_delegate; +pub mod decrease_take; +pub mod helpers; +pub mod increase_take; +pub mod remove_stake; +pub mod set_children; diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs new file mode 100644 index 000000000..4118e8d07 --- /dev/null +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -0,0 +1,109 @@ +use super::*; + +impl Pallet { + /// ---- The implementation for the extrinsic remove_stake: Removes stake from a hotkey account and adds it onto a coldkey. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the caller's coldkey. + /// + /// * 'hotkey' (T::AccountId): + /// - The associated hotkey account. + /// + /// * 'stake_to_be_added' (u64): + /// - The amount of stake to be added to the hotkey staking account. + /// + /// # Event: + /// * StakeRemoved; + /// - On the successfully removing stake from the hotkey account. + /// + /// # Raises: + /// * 'NotRegistered': + /// - Thrown if the account we are attempting to unstake from is non existent. + /// + /// * 'NonAssociatedColdKey': + /// - Thrown if the coldkey does not own the hotkey we are unstaking from. + /// + /// * 'NotEnoughStakeToWithdraw': + /// - Thrown if there is not enough stake on the hotkey to withdwraw this amount. + /// + /// * 'TxRateLimitExceeded': + /// - Thrown if key has hit transaction rate limit + /// + pub fn do_remove_stake( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + stake_to_be_removed: u64, + ) -> dispatch::DispatchResult { + // We check the transaction is signed by the caller and retrieve the T::AccountId coldkey information. + let coldkey = ensure_signed(origin)?; + log::debug!( + "do_remove_stake( origin:{:?} hotkey:{:?}, stake_to_be_removed:{:?} )", + coldkey, + hotkey, + stake_to_be_removed + ); + + // Ensure that the hotkey account exists this is only possible through registration. + ensure!( + Self::hotkey_account_exists(&hotkey), + Error::::HotKeyAccountNotExists + ); + + // Ensure that the hotkey allows delegation or that the hotkey is owned by the calling coldkey. + ensure!( + Self::hotkey_is_delegate(&hotkey) || Self::coldkey_owns_hotkey(&coldkey, &hotkey), + Error::::HotKeyNotDelegateAndSignerNotOwnHotKey + ); + + // Ensure that the stake amount to be removed is above zero. + ensure!(stake_to_be_removed > 0, Error::::StakeToWithdrawIsZero); + + // Ensure that the hotkey has enough stake to withdraw. + ensure!( + Self::has_enough_stake(&coldkey, &hotkey, stake_to_be_removed), + Error::::NotEnoughStakeToWithdraw + ); + + // Ensure we don't exceed stake rate limit + let unstakes_this_interval = + Self::get_stakes_this_interval_for_coldkey_hotkey(&coldkey, &hotkey); + ensure!( + unstakes_this_interval < Self::get_target_stakes_per_interval(), + Error::::UnstakeRateLimitExceeded + ); + + // We remove the balance from the hotkey. + Self::decrease_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake_to_be_removed); + + // We add the balance to the coldkey. If the above fails we will not credit this coldkey. + Self::add_balance_to_coldkey_account(&coldkey, stake_to_be_removed); + + // If the stake is below the minimum, we clear the nomination from storage. + // This only applies to nominator stakes. + // If the coldkey does not own the hotkey, it's a nominator stake. + let new_stake = Self::get_stake_for_coldkey_and_hotkey(&coldkey, &hotkey); + Self::clear_small_nomination_if_required(&hotkey, &coldkey, new_stake); + + // Set last block for rate limiting + let block: u64 = Self::get_current_block_as_u64(); + Self::set_last_tx_block(&coldkey, block); + + // Emit the unstaking event. + Self::set_stakes_this_interval_for_coldkey_hotkey( + &coldkey, + &hotkey, + unstakes_this_interval.saturating_add(1), + block, + ); + log::debug!( + "StakeRemoved( hotkey:{:?}, stake_to_be_removed:{:?} )", + hotkey, + stake_to_be_removed + ); + Self::deposit_event(Event::StakeRemoved(hotkey, stake_to_be_removed)); + + // Done and ok. + Ok(()) + } +} diff --git a/pallets/subtensor/src/staking/set_children.rs b/pallets/subtensor/src/staking/set_children.rs new file mode 100644 index 000000000..9029d58ca --- /dev/null +++ b/pallets/subtensor/src/staking/set_children.rs @@ -0,0 +1,304 @@ +use super::*; + +impl Pallet { + /// ---- The implementation for the extrinsic do_set_child_singular: Sets a single child. + /// + /// This function allows a coldkey to set children keys. + /// + /// # Arguments: + /// * `origin` (::RuntimeOrigin): + /// - The signature of the calling coldkey. Setting a hotkey child can only be done by the coldkey. + /// + /// * `hotkey` (T::AccountId): + /// - The hotkey which will be assigned the child. + /// + /// * `netuid` (u16): + /// - The u16 network identifier where the child keys will exist. + /// + /// * `children` Vec[(u64, T::AccountId)]: + /// - A list of children with their proportions. + /// + /// # Events: + /// * `ChildrenAdded`: + /// - On successfully registering children to a hotkey. + /// + /// # Errors: + /// * `SubNetworkDoesNotExist`: + /// - Attempting to register to a non-existent network. + /// * `RegistrationNotPermittedOnRootSubnet`: + /// - Attempting to register a child on the root network. + /// * `NonAssociatedColdKey`: + /// - The coldkey does not own the hotkey or the child is the same as the hotkey. + /// * `HotKeyAccountNotExists`: + /// - The hotkey account does not exist. + /// + /// # Detailed Explanation of Checks: + /// 1. **Signature Verification**: Ensures that the caller has signed the transaction, verifying the coldkey. + /// 2. **Root Network Check**: Ensures that the delegation is not on the root network, as child hotkeys are not valid on the root. + /// 3. **Network Existence Check**: Ensures that the specified network exists. + /// 4. **Ownership Verification**: Ensures that the coldkey owns the hotkey. + /// 5. **Hotkey Account Existence Check**: Ensures that the hotkey account already exists. + /// 6. **Child-Hotkey Distinction**: Ensures that the child is not the same as the hotkey. + /// 7. **Old Children Cleanup**: Removes the hotkey from the parent list of its old children. + /// 8. **New Children Assignment**: Assigns the new child to the hotkey and updates the parent list for the new child. + /// + pub fn do_set_children( + origin: T::RuntimeOrigin, + hotkey: T::AccountId, + netuid: u16, + children: Vec<(u64, T::AccountId)>, + ) -> DispatchResult { + // --- 1. Check that the caller has signed the transaction. (the coldkey of the pairing) + let coldkey = ensure_signed(origin)?; + log::trace!( + "do_set_children( coldkey:{:?} hotkey:{:?} netuid:{:?} children:{:?} )", + coldkey, + netuid, + hotkey, + children + ); + + // Ensure the hotkey passes the rate limit. + ensure!( + Self::passes_rate_limit_on_subnet( + &TransactionType::SetChildren, // Set children. + &hotkey, // Specific to a hotkey. + netuid, // Specific to a subnet. + ), + Error::::TxRateLimitExceeded + ); + + // Set last transaction block + let current_block = Self::get_current_block_as_u64(); + Self::set_last_transaction_block( + &hotkey, + netuid, + &TransactionType::SetChildren, + current_block, + ); + + // --- 2. Check that this delegation is not on the root network. Child hotkeys are not valid on root. + ensure!( + netuid != Self::get_root_netuid(), + Error::::RegistrationNotPermittedOnRootSubnet + ); + + // --- 3. Check that the network we are trying to create the child on exists. + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // --- 4. Check that the coldkey owns the hotkey. + ensure!( + Self::coldkey_owns_hotkey(&coldkey, &hotkey), + Error::::NonAssociatedColdKey + ); + + // --- 4.1. Ensure that the number of children does not exceed 5. + ensure!(children.len() <= 5, Error::::TooManyChildren); + + // --- 5. Ensure that each child is not the hotkey. + for (_, child_i) in &children { + ensure!(child_i != &hotkey, Error::::InvalidChild); + } + // --- 5.1. Ensure that the sum of the proportions does not exceed u64::MAX. + let _total_proportion: u64 = children + .iter() + .try_fold(0u64, |acc, &(proportion, _)| acc.checked_add(proportion)) + .ok_or(Error::::ProportionOverflow)?; + + // --- 5.2. Ensure there are no duplicates in the list of children. + let mut unique_children = Vec::new(); + for (_, child_i) in &children { + ensure!( + !unique_children.contains(child_i), + Error::::DuplicateChild + ); + unique_children.push(child_i.clone()); + } + + // --- 6. Erase myself from old children's parents. + let old_children: Vec<(u64, T::AccountId)> = ChildKeys::::get(hotkey.clone(), netuid); + + // --- 6.0. Iterate over all my old children and remove myself from their parent's map. + for (_, old_child_i) in old_children.clone().iter() { + // --- 6.1. Get the old child's parents on this network. + let my_old_child_parents: Vec<(u64, T::AccountId)> = + ParentKeys::::get(old_child_i.clone(), netuid); + + // --- 6.2. Filter my hotkey from my old children's parents list. + let filtered_parents: Vec<(u64, T::AccountId)> = my_old_child_parents + .into_iter() + .filter(|(_, parent)| *parent != hotkey) + .collect(); + + // --- 6.3. Update the parent list in storage + ParentKeys::::insert(old_child_i, netuid, filtered_parents); + } + + // --- 7.1. Insert my new children + proportion list into the map. + ChildKeys::::insert(hotkey.clone(), netuid, children.clone()); + + // --- 7.2. Update the parents list for my new children. + for (proportion, new_child_i) in children.clone().iter() { + // --- 8.2.1. Get the child's parents on this network. + let mut new_child_previous_parents: Vec<(u64, T::AccountId)> = + ParentKeys::::get(new_child_i.clone(), netuid); + + // --- 7.2.2. Append my hotkey and proportion to my new child's parents list. + // NOTE: There are no duplicates possible because I previously removed my self from my old children. + new_child_previous_parents.push((*proportion, hotkey.clone())); + + // --- 7.2.3. Update the parents list in storage. + ParentKeys::::insert(new_child_i.clone(), netuid, new_child_previous_parents); + } + + // --- 8. Log and return. + log::trace!( + "SetChildren( netuid:{:?}, hotkey:{:?}, children:{:?} )", + hotkey, + netuid, + children.clone() + ); + Self::deposit_event(Event::SetChildren(hotkey.clone(), netuid, children.clone())); + + // Ok and return. + Ok(()) + } + + /* Retrieves the list of children for a given hotkey and network. + /// + /// # Arguments + /// * `hotkey` - The hotkey whose children are to be retrieved. + /// * `netuid` - The network identifier. + /// + /// # Returns + /// * `Vec<(u64, T::AccountId)>` - A vector of tuples containing the proportion and child account ID. + /// + /// # Example + /// ``` + /// let children = SubtensorModule::get_children(&hotkey, netuid); + */ + pub fn get_children(hotkey: &T::AccountId, netuid: u16) -> Vec<(u64, T::AccountId)> { + ChildKeys::::get(hotkey, netuid) + } + + /* Retrieves the list of parents for a given child and network. + /// + /// # Arguments + /// * `child` - The child whose parents are to be retrieved. + /// * `netuid` - The network identifier. + /// + /// # Returns + /// * `Vec<(u64, T::AccountId)>` - A vector of tuples containing the proportion and parent account ID. + /// + /// # Example + /// ``` + /// let parents = SubtensorModule::get_parents(&child, netuid); + */ + pub fn get_parents(child: &T::AccountId, netuid: u16) -> Vec<(u64, T::AccountId)> { + ParentKeys::::get(child, netuid) + } + + /// Sets the childkey take for a given hotkey. + /// + /// This function allows a coldkey to set the childkey take for a given hotkey. + /// The childkey take determines the proportion of stake that the hotkey keeps for itself + /// when distributing stake to its children. + /// + /// # Arguments: + /// * `coldkey` (T::AccountId): + /// - The coldkey that owns the hotkey. + /// + /// * `hotkey` (T::AccountId): + /// - The hotkey for which the childkey take will be set. + /// + /// * `take` (u16): + /// - The new childkey take value. This is a percentage represented as a value between 0 and 10000, + /// where 10000 represents 100%. + /// + /// # Returns: + /// * `DispatchResult` - The result of the operation. + /// + /// # Errors: + /// * `NonAssociatedColdKey`: + /// - The coldkey does not own the hotkey. + /// * `InvalidChildkeyTake`: + /// - The provided take value is invalid (greater than the maximum allowed take). + /// * `TxChildkeyTakeRateLimitExceeded`: + /// - The rate limit for changing childkey take has been exceeded. + pub fn do_set_childkey_take( + coldkey: T::AccountId, + hotkey: T::AccountId, + netuid: u16, + take: u16, + ) -> DispatchResult { + // Ensure the coldkey owns the hotkey + ensure!( + Self::coldkey_owns_hotkey(&coldkey, &hotkey), + Error::::NonAssociatedColdKey + ); + + // Ensure the take value is valid + ensure!( + take <= Self::get_max_childkey_take(), + Error::::InvalidChildkeyTake + ); + + // Ensure the hotkey passes the rate limit. + ensure!( + Self::passes_rate_limit_on_subnet( + &TransactionType::SetChildkeyTake, // Set childkey take. + &hotkey, // Specific to a hotkey. + netuid, // Specific to a subnet. + ), + Error::::TxChildkeyTakeRateLimitExceeded + ); + + // Set last transaction block + let current_block = Self::get_current_block_as_u64(); + Self::set_last_transaction_block( + &hotkey, + netuid, + &TransactionType::SetChildkeyTake, + current_block, + ); + + // Set the new childkey take value for the given hotkey and network + ChildkeyTake::::insert(hotkey.clone(), netuid, take); + + // Update the last transaction block + Self::set_last_transaction_block( + &hotkey, + netuid, + &TransactionType::SetChildkeyTake, + current_block, + ); + + // Emit the event + Self::deposit_event(Event::ChildKeyTakeSet(hotkey.clone(), take)); + log::debug!( + "Childkey take set for hotkey: {:?} and take: {:?}", + hotkey, + take + ); + Ok(()) + } + + /// Gets the childkey take for a given hotkey. + /// + /// This function retrieves the current childkey take value for a specified hotkey. + /// If no specific take value has been set, it returns the default childkey take. + /// + /// # Arguments: + /// * `hotkey` (&T::AccountId): + /// - The hotkey for which to retrieve the childkey take. + /// + /// # Returns: + /// * `u16` - The childkey take value. This is a percentage represented as a value between 0 and 10000, + /// where 10000 represents 100%. + pub fn get_childkey_take(hotkey: &T::AccountId, netuid: u16) -> u16 { + ChildkeyTake::::get(hotkey, netuid) + } +} diff --git a/pallets/subtensor/src/subnets/mod.rs b/pallets/subtensor/src/subnets/mod.rs new file mode 100644 index 000000000..43bdfec43 --- /dev/null +++ b/pallets/subtensor/src/subnets/mod.rs @@ -0,0 +1,5 @@ +use super::*; +pub mod registration; +pub mod serving; +pub mod uids; +pub mod weights; diff --git a/pallets/subtensor/src/registration.rs b/pallets/subtensor/src/subnets/registration.rs similarity index 84% rename from pallets/subtensor/src/registration.rs rename to pallets/subtensor/src/subnets/registration.rs index 6b73f2fc3..9310b7a3f 100644 --- a/pallets/subtensor/src/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -41,11 +41,7 @@ impl Pallet { ) -> DispatchResult { // --- 1. Check that the caller has signed the transaction. (the coldkey of the pairing) let coldkey = ensure_signed(origin)?; - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - log::info!( + log::debug!( "do_registration( coldkey:{:?} netuid:{:?} hotkey:{:?} )", coldkey, netuid, @@ -135,7 +131,7 @@ impl Pallet { // --- 12.1.2 Expand subnetwork with new account. Self::append_neuron(netuid, &hotkey, current_block_number); - log::info!("add new neuron account"); + log::debug!("add new neuron account"); } else { // --- 13.1.1 Replacement required. // We take the neuron with the lowest pruning score here. @@ -143,7 +139,7 @@ impl Pallet { // --- 13.1.1 Replace the neuron account with the new info. Self::replace_neuron(netuid, subnetwork_uid, &hotkey, current_block_number); - log::info!("prune neuron"); + log::debug!("prune neuron"); } // --- 14. Record the registration and increment block and interval counters. @@ -153,7 +149,7 @@ impl Pallet { Self::increase_rao_recycled(netuid, Self::get_burn_as_u64(netuid)); // --- 15. Deposit successful event. - log::info!( + log::debug!( "NeuronRegistered( netuid:{:?} uid:{:?} hotkey:{:?} ) ", netuid, subnetwork_uid, @@ -224,7 +220,7 @@ impl Pallet { // --- 1. Check that the caller has signed the transaction. // TODO( const ): This not be the hotkey signature or else an exterior actor can register the hotkey and potentially control it? let signing_origin = ensure_signed(origin)?; - log::info!( + log::debug!( "do_registration( origin:{:?} netuid:{:?} hotkey:{:?}, coldkey:{:?} )", signing_origin, netuid, @@ -330,7 +326,7 @@ impl Pallet { // --- 11.1.2 Expand subnetwork with new account. Self::append_neuron(netuid, &hotkey, current_block_number); - log::info!("add new neuron account"); + log::debug!("add new neuron account"); } else { // --- 11.1.1 Replacement required. // We take the neuron with the lowest pruning score here. @@ -338,7 +334,7 @@ impl Pallet { // --- 11.1.1 Replace the neuron account with the new info. Self::replace_neuron(netuid, subnetwork_uid, &hotkey, current_block_number); - log::info!("prune neuron"); + log::debug!("prune neuron"); } // --- 12. Record the registration and increment block and interval counters. @@ -347,7 +343,7 @@ impl Pallet { RegistrationsThisBlock::::mutate(netuid, |val| val.saturating_inc()); // --- 13. Deposit successful event. - log::info!( + log::debug!( "NeuronRegistered( netuid:{:?} uid:{:?} hotkey:{:?} ) ", netuid, subnetwork_uid, @@ -370,7 +366,7 @@ impl Pallet { // --- 1. Check that the caller has signed the transaction. let coldkey = ensure_signed(origin)?; - log::info!("do_faucet( coldkey:{:?} )", coldkey); + log::debug!("do_faucet( coldkey:{:?} )", coldkey); // --- 2. Ensure the passed block number is valid, not in the future or too old. // Work must have been done within 3 blocks (stops long range attacks). @@ -404,7 +400,7 @@ impl Pallet { Self::add_balance_to_coldkey_account(&coldkey, balance_to_add); // --- 6. Deposit successful event. - log::info!( + log::debug!( "Faucet( coldkey:{:?} amount:{:?} ) ", coldkey, balance_to_add @@ -423,65 +419,67 @@ impl Pallet { } /// Determine which peer to prune from the network by finding the element with the lowest pruning score out of - /// immunity period. If all neurons are in immunity period, return node with lowest prunning score. - /// This function will always return an element to prune. + /// immunity period. If there is a tie for lowest pruning score, the neuron registered earliest is pruned. + /// If all neurons are in immunity period, the neuron with the lowest pruning score is pruned. If there is a tie for + /// the lowest pruning score, the immune neuron registered earliest is pruned. + /// Ties for earliest registration are broken by the neuron with the lowest uid. pub fn get_neuron_to_prune(netuid: u16) -> u16 { let mut min_score: u16 = u16::MAX; - let mut min_score_in_immunity_period = u16::MAX; - let mut uid_with_min_score = 0; - let mut uid_with_min_score_in_immunity_period: u16 = 0; + let mut min_score_in_immunity: u16 = u16::MAX; + let mut earliest_registration: u64 = u64::MAX; + let mut earliest_registration_in_immunity: u64 = u64::MAX; + let mut uid_to_prune: u16 = 0; + let mut uid_to_prune_in_immunity: u16 = 0; + + // This boolean is used instead of checking if min_score == u16::MAX, to avoid the case + // where all non-immune neurons have pruning score u16::MAX + // This may be unlikely in practice. + let mut found_non_immune = false; let neurons_n = Self::get_subnetwork_n(netuid); if neurons_n == 0 { return 0; // If there are no neurons in this network. } - let current_block: u64 = Self::get_current_block_as_u64(); - let immunity_period: u64 = Self::get_immunity_period(netuid) as u64; - for neuron_uid_i in 0..neurons_n { - let pruning_score: u16 = Self::get_pruning_score_for_uid(netuid, neuron_uid_i); + for neuron_uid in 0..neurons_n { + let pruning_score: u16 = Self::get_pruning_score_for_uid(netuid, neuron_uid); let block_at_registration: u64 = - Self::get_neuron_block_at_registration(netuid, neuron_uid_i); - #[allow(clippy::comparison_chain)] - if min_score == pruning_score { - if current_block.saturating_sub(block_at_registration) < immunity_period { - //neuron is in immunity period - if min_score_in_immunity_period > pruning_score { - min_score_in_immunity_period = pruning_score; - uid_with_min_score_in_immunity_period = neuron_uid_i; - } - } else { - uid_with_min_score = neuron_uid_i; + Self::get_neuron_block_at_registration(netuid, neuron_uid); + let is_immune = Self::get_neuron_is_immune(netuid, neuron_uid); + + if is_immune { + // if the immune neuron has a lower pruning score than the minimum for immune neurons, + // or, if the pruning scores are equal and the immune neuron was registered earlier than the current minimum for immune neurons, + // then update the minimum pruning score and the uid to prune for immune neurons + if pruning_score < min_score_in_immunity + || (pruning_score == min_score_in_immunity + && block_at_registration < earliest_registration_in_immunity) + { + min_score_in_immunity = pruning_score; + earliest_registration_in_immunity = block_at_registration; + uid_to_prune_in_immunity = neuron_uid; } - } - // Find min pruning score. - else if min_score > pruning_score { - if current_block.saturating_sub(block_at_registration) < immunity_period { - //neuron is in immunity period - if min_score_in_immunity_period > pruning_score { - min_score_in_immunity_period = pruning_score; - uid_with_min_score_in_immunity_period = neuron_uid_i; - } - } else { + } else { + found_non_immune = true; + // if the non-immune neuron has a lower pruning score than the minimum for non-immune neurons, + // or, if the pruning scores are equal and the non-immune neuron was registered earlier than the current minimum for non-immune neurons, + // then update the minimum pruning score and the uid to prune for non-immune neurons + if pruning_score < min_score + || (pruning_score == min_score && block_at_registration < earliest_registration) + { min_score = pruning_score; - uid_with_min_score = neuron_uid_i; + earliest_registration = block_at_registration; + uid_to_prune = neuron_uid; } } } - if min_score == u16::MAX { - //all neuorns are in immunity period - Self::set_pruning_score_for_uid( - netuid, - uid_with_min_score_in_immunity_period, - u16::MAX, - ); - uid_with_min_score_in_immunity_period + + if found_non_immune { + Self::set_pruning_score_for_uid(netuid, uid_to_prune, u16::MAX); + uid_to_prune } else { - // We replace the pruning score here with u16 max to ensure that all peers always have a - // pruning score. In the event that every peer has been pruned this function will prune - // the last element in the network continually. - Self::set_pruning_score_for_uid(netuid, uid_with_min_score, u16::MAX); - uid_with_min_score + Self::set_pruning_score_for_uid(netuid, uid_to_prune_in_immunity, u16::MAX); + uid_to_prune_in_immunity } } @@ -547,6 +545,21 @@ impl Pallet { H256::from_slice(&keccak_256_seal_hash_vec) } + pub fn hash_hotkey_to_u64(hotkey: &T::AccountId) -> u64 { + let binding = hotkey.encode(); + let (hotkey_bytes, _) = binding.split_at(32); + let mut full_bytes = [0u8; 64]; + // Copy the hotkey_bytes into the first half of full_bytes + full_bytes[..32].copy_from_slice(hotkey_bytes); + let keccak_256_seal_hash_vec: [u8; 32] = keccak_256(&full_bytes[..]); + let hash_u64: u64 = u64::from_le_bytes( + keccak_256_seal_hash_vec[0..8] + .try_into() + .unwrap_or_default(), + ); + hash_u64 + } + pub fn create_seal_hash(block_number_u64: u64, nonce_u64: u64, hotkey: &T::AccountId) -> H256 { let nonce = nonce_u64.to_le_bytes(); let block_hash_at_number: H256 = Self::get_block_hash_from_u64(block_number_u64); diff --git a/pallets/subtensor/src/serving.rs b/pallets/subtensor/src/subnets/serving.rs similarity index 98% rename from pallets/subtensor/src/serving.rs rename to pallets/subtensor/src/subnets/serving.rs index eb7fa4369..1a9240c36 100644 --- a/pallets/subtensor/src/serving.rs +++ b/pallets/subtensor/src/subnets/serving.rs @@ -106,7 +106,7 @@ impl Pallet { Axons::::insert(netuid, hotkey_id.clone(), prev_axon); // We deposit axon served event. - log::info!("AxonServed( hotkey:{:?} ) ", hotkey_id.clone()); + log::debug!("AxonServed( hotkey:{:?} ) ", hotkey_id.clone()); Self::deposit_event(Event::AxonServed(netuid, hotkey_id)); // Return is successful dispatch. @@ -204,7 +204,7 @@ impl Pallet { Prometheus::::insert(netuid, hotkey_id.clone(), prev_prometheus); // We deposit prometheus served event. - log::info!("PrometheusServed( hotkey:{:?} ) ", hotkey_id.clone()); + log::debug!("PrometheusServed( hotkey:{:?} ) ", hotkey_id.clone()); Self::deposit_event(Event::PrometheusServed(netuid, hotkey_id)); // Return is successful dispatch. diff --git a/pallets/subtensor/src/uids.rs b/pallets/subtensor/src/subnets/uids.rs similarity index 100% rename from pallets/subtensor/src/uids.rs rename to pallets/subtensor/src/subnets/uids.rs diff --git a/pallets/subtensor/src/weights.rs b/pallets/subtensor/src/subnets/weights.rs similarity index 98% rename from pallets/subtensor/src/weights.rs rename to pallets/subtensor/src/subnets/weights.rs index 1866f8e62..1a53e44cc 100644 --- a/pallets/subtensor/src/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -1,5 +1,5 @@ use super::*; -use crate::math::*; +use crate::epoch::math::*; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_std::vec; @@ -28,7 +28,7 @@ impl Pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; - log::info!("do_commit_weights( hotkey:{:?} netuid:{:?})", who, netuid); + log::debug!("do_commit_weights( hotkey:{:?} netuid:{:?})", who, netuid); ensure!( Self::get_commit_reveal_weights_enabled(netuid), @@ -89,7 +89,7 @@ impl Pallet { ) -> DispatchResult { let who = ensure_signed(origin.clone())?; - log::info!("do_reveal_weights( hotkey:{:?} netuid:{:?})", who, netuid); + log::debug!("do_reveal_weights( hotkey:{:?} netuid:{:?})", who, netuid); ensure!( Self::get_commit_reveal_weights_enabled(netuid), @@ -188,7 +188,7 @@ impl Pallet { ) -> dispatch::DispatchResult { // --- 1. Check the caller's signature. This is the hotkey of a registered account. let hotkey = ensure_signed(origin)?; - log::info!( + log::debug!( "do_set_weights( origin:{:?} netuid:{:?}, uids:{:?}, values:{:?})", hotkey, netuid, @@ -289,7 +289,7 @@ impl Pallet { Self::set_last_update_for_uid(netuid, neuron_uid, current_block); // --- 19. Emit the tracking event. - log::info!( + log::debug!( "WeightsSet( netuid:{:?}, neuron_uid:{:?} )", netuid, neuron_uid @@ -308,7 +308,7 @@ impl Pallet { /// pub fn check_version_key(netuid: u16, version_key: u64) -> bool { let network_version_key: u64 = WeightsVersionKey::::get(netuid); - log::info!( + log::debug!( "check_version_key( network_version_key:{:?}, version_key:{:?} )", network_version_key, version_key diff --git a/pallets/subtensor/src/swap.rs b/pallets/subtensor/src/swap.rs deleted file mode 100644 index 8e4ca5cc9..000000000 --- a/pallets/subtensor/src/swap.rs +++ /dev/null @@ -1,1055 +0,0 @@ -use super::*; -use crate::MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP; -use frame_support::traits::fungible::Mutate; -use frame_support::traits::tokens::Preservation; -use frame_support::{storage::IterableStorageDoubleMap, weights::Weight}; -use sp_core::{Get, U256}; - -impl Pallet { - /// Swaps the hotkey of a coldkey account. - /// - /// # Arguments - /// - /// * `origin` - The origin of the transaction, and also the coldkey account. - /// * `old_hotkey` - The old hotkey to be swapped. - /// * `new_hotkey` - The new hotkey to replace the old one. - /// - /// # Returns - /// - /// * `DispatchResultWithPostInfo` - The result of the dispatch. - /// - /// # Errors - /// - /// * `NonAssociatedColdKey` - If the coldkey does not own the old hotkey. - /// * `HotKeySetTxRateLimitExceeded` - If the transaction rate limit is exceeded. - /// * `NewHotKeyIsSameWithOld` - If the new hotkey is the same as the old hotkey. - /// * `HotKeyAlreadyRegisteredInSubNet` - If the new hotkey is already registered in the subnet. - /// * `NotEnoughBalanceToPaySwapHotKey` - If there is not enough balance to pay for the swap. - pub fn do_swap_hotkey( - origin: T::RuntimeOrigin, - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - ) -> DispatchResultWithPostInfo { - let coldkey = ensure_signed(origin)?; - - ensure!( - !Self::coldkey_in_arbitration(&coldkey), - Error::::ColdkeyIsInArbitration - ); - - let mut weight = T::DbWeight::get().reads(2); - - ensure!(old_hotkey != new_hotkey, Error::::NewHotKeyIsSameWithOld); - ensure!( - !Self::is_hotkey_registered_on_any_network(new_hotkey), - Error::::HotKeyAlreadyRegisteredInSubNet - ); - - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 0)); - ensure!( - Self::coldkey_owns_hotkey(&coldkey, old_hotkey), - Error::::NonAssociatedColdKey - ); - - let block: u64 = Self::get_current_block_as_u64(); - ensure!( - !Self::exceeds_tx_rate_limit(Self::get_last_tx_block(&coldkey), block), - Error::::HotKeySetTxRateLimitExceeded - ); - - weight.saturating_accrue( - T::DbWeight::get().reads((TotalNetworks::::get().saturating_add(1u16)) as u64), - ); - - let swap_cost = Self::get_key_swap_cost(); - log::debug!("Swap cost: {:?}", swap_cost); - - ensure!( - Self::can_remove_balance_from_coldkey_account(&coldkey, swap_cost), - Error::::NotEnoughBalanceToPaySwapHotKey - ); - let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, swap_cost)?; - Self::burn_tokens(actual_burn_amount); - - Self::swap_owner(old_hotkey, new_hotkey, &coldkey, &mut weight); - Self::swap_total_hotkey_stake(old_hotkey, new_hotkey, &mut weight); - Self::swap_delegates(old_hotkey, new_hotkey, &mut weight); - Self::swap_stake(old_hotkey, new_hotkey, &mut weight); - - // Store the value of is_network_member for the old key - let netuid_is_member: Vec = Self::get_netuid_is_member(old_hotkey, &mut weight); - - Self::swap_is_network_member(old_hotkey, new_hotkey, &netuid_is_member, &mut weight); - Self::swap_axons(old_hotkey, new_hotkey, &netuid_is_member, &mut weight); - Self::swap_keys(old_hotkey, new_hotkey, &netuid_is_member, &mut weight); - Self::swap_loaded_emission(old_hotkey, new_hotkey, &netuid_is_member, &mut weight); - Self::swap_uids(old_hotkey, new_hotkey, &netuid_is_member, &mut weight); - Self::swap_prometheus(old_hotkey, new_hotkey, &netuid_is_member, &mut weight); - Self::swap_senate_member(old_hotkey, new_hotkey, &mut weight)?; - - Self::swap_total_hotkey_coldkey_stakes_this_interval(old_hotkey, new_hotkey, &mut weight); - - Self::set_last_tx_block(&coldkey, block); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - Self::deposit_event(Event::HotkeySwapped { - coldkey, - old_hotkey: old_hotkey.clone(), - new_hotkey: new_hotkey.clone(), - }); - - Ok(Some(weight).into()) - } - - /// Swaps the coldkey associated with a set of hotkeys from an old coldkey to a new coldkey. - /// - /// # Arguments - /// - /// * `origin` - The origin of the call, which must be signed by the old coldkey. - /// * `old_coldkey` - The account ID of the old coldkey. - /// * `new_coldkey` - The account ID of the new coldkey. - /// - /// # Returns - /// - /// Returns a `DispatchResultWithPostInfo` indicating success or failure, along with the weight consumed. - /// - /// # Errors - /// - /// This function will return an error if: - /// - The caller is not the old coldkey. - /// - The new coldkey is the same as the old coldkey. - /// - The new coldkey is already associated with other hotkeys. - /// - The transaction rate limit for coldkey swaps has been exceeded. - /// - There's not enough balance to pay for the swap. - /// - /// # Events - /// - /// Emits a `ColdkeySwapped` event when successful. - /// - /// # Weight - /// - /// Weight is tracked and updated throughout the function execution. - pub fn do_swap_coldkey( - origin: T::RuntimeOrigin, - new_coldkey: &T::AccountId, - ) -> DispatchResultWithPostInfo { - let old_coldkey = ensure_signed(origin)?; - ensure!( - !Self::coldkey_in_arbitration(&old_coldkey), - Error::::ColdkeyIsInArbitration - ); - - let mut weight: Weight = T::DbWeight::get().reads(2); - - // Check that the coldkey is a new key (does not exist elsewhere.) - ensure!( - !Self::coldkey_has_associated_hotkeys(new_coldkey), - Error::::ColdKeyAlreadyAssociated - ); - // Check that the new coldkey is not a hotkey. - ensure!( - !Self::hotkey_account_exists(new_coldkey), - Error::::ColdKeyAlreadyAssociated - ); - - // Calculate and charge the swap fee - let swap_cost = Self::get_key_swap_cost(); - log::debug!("Coldkey swap cost: {:?}", swap_cost); - - ensure!( - Self::can_remove_balance_from_coldkey_account(&old_coldkey, swap_cost), - Error::::NotEnoughBalanceToPaySwapColdKey - ); - let actual_burn_amount = - Self::remove_balance_from_coldkey_account(&old_coldkey, swap_cost)?; - Self::burn_tokens(actual_burn_amount); - - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - - // Actually do the swap. - weight = weight.saturating_add( - Self::perform_swap_coldkey(&old_coldkey, new_coldkey) - .map_err(|_| Error::::ColdkeySwapError)?, - ); - - Self::set_last_tx_block(new_coldkey, Self::get_current_block_as_u64()); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - - Self::deposit_event(Event::ColdkeySwapped { - old_coldkey: old_coldkey.clone(), - new_coldkey: new_coldkey.clone(), - }); - - Ok(Some(weight).into()) - } - - /// Checks if a coldkey is currently in arbitration. - /// - /// # Arguments - /// - /// * `coldkey` - The account ID of the coldkey to check. - /// - /// # Returns - /// - /// * `bool` - True if the coldkey is in arbitration, false otherwise. - /// - /// # Notes - /// - /// This function compares the arbitration block number of the coldkey with the current block number. - pub fn coldkey_in_arbitration(coldkey: &T::AccountId) -> bool { - ColdkeyArbitrationBlock::::get(coldkey) > Self::get_current_block_as_u64() - } - - /// Returns the remaining arbitration period for a given coldkey. - /// - /// # Arguments - /// - /// * `coldkey` - The account ID of the coldkey to check. - /// - /// # Returns - /// - /// * `u64` - The remaining arbitration period in blocks. - /// - /// - /// # Notes - /// - /// This function calculates the remaining arbitration period by subtracting the current block number - /// from the arbitration block number of the coldkey. - pub fn get_remaining_arbitration_period(coldkey: &T::AccountId) -> u64 { - let current_block: u64 = Self::get_current_block_as_u64(); - let arbitration_block: u64 = ColdkeyArbitrationBlock::::get(coldkey); - if arbitration_block > current_block { - arbitration_block.saturating_sub(current_block) - } else { - 0 - } - } - - pub fn meets_min_allowed_coldkey_balance(coldkey: &T::AccountId) -> bool { - let all_staked_keys: Vec = StakingHotkeys::::get(coldkey); - let mut total_staking_balance: u64 = 0; - for hotkey in all_staked_keys { - total_staking_balance = total_staking_balance - .saturating_add(Self::get_stake_for_coldkey_and_hotkey(coldkey, &hotkey)); - } - total_staking_balance = - total_staking_balance.saturating_add(Self::get_coldkey_balance(coldkey)); - total_staking_balance >= MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP - } - - /// Schedules a coldkey swap to a new coldkey with arbitration. - /// - /// # Arguments - /// - /// * `old_coldkey` - The account ID of the old coldkey. - /// * `new_coldkey` - The account ID of the new coldkey. - /// * `work` - The proof of work submitted by the caller. - /// * `block_number` - The block number at which the work was performed. - /// * `nonce` - The nonce used for the proof of work. - /// - /// # Returns - /// - /// * `DispatchResult` - The result of the dispatch. - /// - /// # Errors - /// - - /// - `SameColdkey`: The old coldkey is the same as the new coldkey. - /// - `DuplicateColdkey`: The new coldkey is already in the list of destination coldkeys. - /// - `MaxColdkeyDestinationsReached`: There are already the maximum allowed destination coldkeys for the old coldkey. - /// - `InsufficientBalanceToPerformColdkeySwap`: The old coldkey doesn't have the minimum required TAO balance. - /// - `InvalidDifficulty`: The proof of work is invalid or doesn't meet the required difficulty. - /// - /// # Notes - /// - /// This function ensures that the new coldkey is not already in the list of destination coldkeys. - /// It also checks for a minimum TAO balance and verifies the proof of work. - /// The difficulty of the proof of work increases exponentially with each subsequent call. - pub fn do_schedule_coldkey_swap( - old_coldkey: &T::AccountId, - new_coldkey: &T::AccountId, - work: Vec, - block_number: u64, - nonce: u64, - ) -> DispatchResult { - ensure!(old_coldkey != new_coldkey, Error::::SameColdkey); - - // Check if the old_coldkey is a subnet owner for any network - let is_subnet_owner = (0..=TotalNetworks::::get()) - .any(|netuid| SubnetOwner::::get(netuid) == *old_coldkey); - - // Check if the old_coldkey has more than 500 TAO delegated - let total_delegated = Self::get_total_delegated_stake(old_coldkey); - let has_sufficient_delegation = total_delegated > 500_000_000_000; // 500 TAO in RAO - - // Only check the minimum balance if the old_coldkey is not a subnet owner - // and doesn't have sufficient delegation - if !(is_subnet_owner || has_sufficient_delegation) { - ensure!( - Self::meets_min_allowed_coldkey_balance(old_coldkey), - Error::::InsufficientBalanceToPerformColdkeySwap - ); - } - - // Get current destination coldkeys - let mut destination_coldkeys: Vec = - ColdkeySwapDestinations::::get(old_coldkey.clone()); - - // Calculate difficulty based on the number of existing destination coldkeys - let difficulty = Self::calculate_pow_difficulty(destination_coldkeys.len() as u32); - let work_hash = Self::vec_to_hash(work.clone()); - ensure!( - Self::hash_meets_difficulty(&work_hash, difficulty), - Error::::InvalidDifficulty - ); - - // Verify work is the product of the nonce, the block number, and coldkey - let seal = Self::create_seal_hash(block_number, nonce, old_coldkey); - ensure!(seal == work_hash, Error::::InvalidSeal); - - // Check if the new coldkey is already in the swap wallets list - ensure!( - !destination_coldkeys.contains(new_coldkey), - Error::::DuplicateColdkey - ); - - // If the destinations keys are empty or have less than the maximum allowed, we will add the new coldkey to the list - const MAX_COLDKEY_DESTINATIONS: usize = 10; - - if destination_coldkeys.len() < MAX_COLDKEY_DESTINATIONS { - destination_coldkeys.push(new_coldkey.clone()); - ColdkeySwapDestinations::::insert(old_coldkey.clone(), destination_coldkeys.clone()); - } else { - return Err(Error::::MaxColdkeyDestinationsReached.into()); - } - - // It is the first time we have seen this key - if destination_coldkeys.len() == 1_usize { - // Set the arbitration block for this coldkey - let arbitration_block: u64 = - Self::get_current_block_as_u64().saturating_add(ArbitrationPeriod::::get()); - ColdkeyArbitrationBlock::::insert(old_coldkey.clone(), arbitration_block); - - // Update the list of coldkeys to arbitrate on this block - let mut key_to_arbitrate_on_this_block: Vec = - ColdkeysToSwapAtBlock::::get(arbitration_block); - if !key_to_arbitrate_on_this_block.contains(old_coldkey) { - key_to_arbitrate_on_this_block.push(old_coldkey.clone()); - } - ColdkeysToSwapAtBlock::::insert(arbitration_block, key_to_arbitrate_on_this_block); - } - - // Emit an event indicating that a coldkey swap has been scheduled - Self::deposit_event(Event::ColdkeySwapScheduled { - old_coldkey: old_coldkey.clone(), - new_coldkey: new_coldkey.clone(), - arbitration_block: ColdkeyArbitrationBlock::::get(old_coldkey), - }); - - Ok(()) - } - - /// Calculate the proof of work difficulty based on the number of swap attempts - #[allow(clippy::arithmetic_side_effects)] - pub fn calculate_pow_difficulty(swap_attempts: u32) -> U256 { - let base_difficulty: U256 = U256::from(BaseDifficulty::::get()); // Base difficulty - base_difficulty.saturating_mul(U256::from(2).pow(U256::from(swap_attempts))) - } - - /// Arbitrates coldkeys that are scheduled to be swapped on this block. - /// - /// This function retrieves the list of coldkeys scheduled to be swapped on the current block, - /// and processes each coldkey by either extending the arbitration period or performing the swap - /// to the new coldkey. - /// - /// # Returns - /// - /// * `Weight` - The total weight consumed by this operation - pub fn swap_coldkeys_this_block(_weight_limit: &Weight) -> Result { - let mut weight_used = frame_support::weights::Weight::from_parts(0, 0); - - let current_block: u64 = Self::get_current_block_as_u64(); - log::debug!("Swapping coldkeys for block: {:?}", current_block); - - let source_coldkeys: Vec = ColdkeysToSwapAtBlock::::get(current_block); - ColdkeysToSwapAtBlock::::remove(current_block); - weight_used = weight_used.saturating_add(T::DbWeight::get().reads_writes(1, 1)); - - let mut keys_swapped = 0u64; - for coldkey_i in source_coldkeys.iter() { - // TODO: need a sane way to terminate early without locking users in. - // we should update the swap time - // if weight_used.ref_time() > weight_limit.ref_time() { - // log::warn!("Could not finish swapping all coldkeys this block due to weight limit, breaking after swapping {} keys.", keys_swapped); - // break; - // } - - let destinations_coldkeys: Vec = - ColdkeySwapDestinations::::get(coldkey_i); - weight_used = weight_used.saturating_add(T::DbWeight::get().reads(1)); - - if destinations_coldkeys.len() > 1 { - // Do not remove ColdkeySwapDestinations if there are multiple destinations - ColdkeyArbitrationBlock::::insert(coldkey_i.clone(), u64::MAX); - Self::deposit_event(Event::ArbitrationPeriodExtended { - coldkey: coldkey_i.clone(), - }); - } else if let Some(new_coldkey) = destinations_coldkeys.first() { - // Only remove ColdkeySwapDestinations if there's a single destination - ColdkeySwapDestinations::::remove(&coldkey_i); - weight_used = weight_used.saturating_add(T::DbWeight::get().writes(1)); - Self::perform_swap_coldkey(coldkey_i, new_coldkey).map(|weight| { - weight_used = weight_used.saturating_add(weight); - keys_swapped = keys_swapped.saturating_add(1); - })?; - } - } - - Ok(weight_used) - } - - pub fn perform_swap_coldkey( - old_coldkey: &T::AccountId, - new_coldkey: &T::AccountId, - ) -> Result { - log::info!( - "Performing swap for coldkey: {:?} to {:?}", - old_coldkey, - new_coldkey - ); - // Init the weight. - let mut weight = frame_support::weights::Weight::from_parts(0, 0); - - // Swap coldkey references in storage maps - // NOTE The order of these calls is important - Self::swap_stake_for_coldkey(old_coldkey, new_coldkey, &mut weight); - Self::swap_total_hotkey_coldkey_stakes_this_interval_for_coldkey( - old_coldkey, - new_coldkey, - &mut weight, - ); - Self::swap_subnet_owner_for_coldkey(old_coldkey, new_coldkey, &mut weight); - - // Transfer any remaining balance from old_coldkey to new_coldkey - let remaining_balance = Self::get_coldkey_balance(old_coldkey); - if remaining_balance > 0 { - if let Err(e) = Self::kill_coldkey_account(old_coldkey, remaining_balance) { - return Err(e.into()); - } - Self::add_balance_to_coldkey_account(new_coldkey, remaining_balance); - } - - // Swap the coldkey. - let total_balance: u64 = Self::get_coldkey_balance(old_coldkey); - if total_balance > 0 { - // Attempt to transfer the entire total balance to new_coldkey. - T::Currency::transfer( - old_coldkey, - new_coldkey, - total_balance, - Preservation::Expendable, - )?; - } - - Ok(weight) - } - - /// Retrieves the network membership status for a given hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The hotkey to check for network membership. - /// - /// # Returns - /// - /// * `Vec` - A vector of network IDs where the hotkey is a member. - pub fn get_netuid_is_member(old_hotkey: &T::AccountId, weight: &mut Weight) -> Vec { - let netuid_is_member: Vec = - as IterableStorageDoubleMap<_, _, _>>::iter_prefix(old_hotkey) - .map(|(netuid, _)| netuid) - .collect(); - weight.saturating_accrue(T::DbWeight::get().reads(netuid_is_member.len() as u64)); - netuid_is_member - } - - /// Swaps the owner of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `coldkey` - The coldkey owning the hotkey. - /// * `weight` - The weight of the transaction. - /// - pub fn swap_owner( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - coldkey: &T::AccountId, - weight: &mut Weight, - ) { - Owner::::remove(old_hotkey); - Owner::::insert(new_hotkey, coldkey.clone()); - - // Update OwnedHotkeys map - let mut hotkeys = OwnedHotkeys::::get(coldkey); - if !hotkeys.contains(new_hotkey) { - hotkeys.push(new_hotkey.clone()); - } - hotkeys.retain(|hk| *hk != *old_hotkey); - OwnedHotkeys::::insert(coldkey, hotkeys); - - weight.saturating_accrue(T::DbWeight::get().writes(2)); - } - - /// Swaps the total stake of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `weight` - The weight of the transaction. - /// - /// # Weight Calculation - /// - /// * Reads: 1 if the old hotkey exists, otherwise 1 for the failed read. - /// * Writes: 2 if the old hotkey exists (one for removal and one for insertion). - pub fn swap_total_hotkey_stake( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - weight: &mut Weight, - ) { - if let Ok(total_hotkey_stake) = TotalHotkeyStake::::try_get(old_hotkey) { - TotalHotkeyStake::::remove(old_hotkey); - TotalHotkeyStake::::insert(new_hotkey, total_hotkey_stake); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } else { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - } - } - - /// Swaps the delegates of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `weight` - The weight of the transaction. - /// - /// # Weight Calculation - /// - /// * Reads: 1 if the old hotkey exists, otherwise 1 for the failed read. - /// * Writes: 2 if the old hotkey exists (one for removal and one for insertion). - pub fn swap_delegates( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - weight: &mut Weight, - ) { - if let Ok(delegate_take) = Delegates::::try_get(old_hotkey) { - Delegates::::remove(old_hotkey); - Delegates::::insert(new_hotkey, delegate_take); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } else { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - } - } - - /// Swaps the stake of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `weight` - The weight of the transaction. - pub fn swap_stake(old_hotkey: &T::AccountId, new_hotkey: &T::AccountId, weight: &mut Weight) { - let mut writes: u64 = 0; - let stakes: Vec<(T::AccountId, u64)> = Stake::::iter_prefix(old_hotkey).collect(); - let stake_count = stakes.len() as u32; - - for (coldkey, stake_amount) in stakes { - Stake::::insert(new_hotkey, &coldkey, stake_amount); - writes = writes.saturating_add(1u64); // One write for insert - - // Update StakingHotkeys map - let mut staking_hotkeys = StakingHotkeys::::get(&coldkey); - if !staking_hotkeys.contains(new_hotkey) { - staking_hotkeys.push(new_hotkey.clone()); - writes = writes.saturating_add(1u64); // One write for insert - } - if let Some(pos) = staking_hotkeys.iter().position(|x| x == old_hotkey) { - staking_hotkeys.remove(pos); - writes = writes.saturating_add(1u64); // One write for remove - } - StakingHotkeys::::insert(coldkey.clone(), staking_hotkeys); - writes = writes.saturating_add(1u64); // One write for insert - } - - // Clear the prefix for the old hotkey after transferring all stakes - let _ = Stake::::clear_prefix(old_hotkey, stake_count, None); - writes = writes.saturating_add(1); // One write for insert; // One write for clear_prefix - - // TODO: Remove all entries for old hotkey from StakingHotkeys map - - weight.saturating_accrue(T::DbWeight::get().writes(writes)); - } - - /// Swaps the network membership status of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `netuid_is_member` - A vector of network IDs where the hotkey is a member. - /// * `weight` - The weight of the transaction. - pub fn swap_is_network_member( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - netuid_is_member: &[u16], - weight: &mut Weight, - ) { - let _ = IsNetworkMember::::clear_prefix(old_hotkey, netuid_is_member.len() as u32, None); - weight.saturating_accrue(T::DbWeight::get().writes(netuid_is_member.len() as u64)); - for netuid in netuid_is_member.iter() { - IsNetworkMember::::insert(new_hotkey, netuid, true); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - } - - /// Swaps the axons of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `netuid_is_member` - A vector of network IDs where the hotkey is a member. - /// * `weight` - The weight of the transaction. - /// - /// # Weight Calculation - /// - /// * Reads: 1 for each network ID if the old hotkey exists in that network. - /// * Writes: 2 for each network ID if the old hotkey exists in that network (one for removal and one for insertion). - pub fn swap_axons( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - netuid_is_member: &[u16], - weight: &mut Weight, - ) { - for netuid in netuid_is_member.iter() { - if let Ok(axon_info) = Axons::::try_get(netuid, old_hotkey) { - Axons::::remove(netuid, old_hotkey); - Axons::::insert(netuid, new_hotkey, axon_info); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } else { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - } - } - } - - /// Swaps the references in the keys storage map of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `netuid_is_member` - A vector of network IDs where the hotkey is a member. - /// * `weight` - The weight of the transaction. - pub fn swap_keys( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - netuid_is_member: &[u16], - weight: &mut Weight, - ) { - let mut writes: u64 = 0; - for netuid in netuid_is_member { - let keys: Vec<(u16, T::AccountId)> = Keys::::iter_prefix(netuid).collect(); - for (uid, key) in keys { - if key == *old_hotkey { - log::info!("old hotkey found: {:?}", old_hotkey); - Keys::::insert(netuid, uid, new_hotkey.clone()); - } - writes = writes.saturating_add(2u64); - } - } - log::info!("writes: {:?}", writes); - weight.saturating_accrue(T::DbWeight::get().writes(writes)); - } - - /// Swaps the loaded emission of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `netuid_is_member` - A vector of network IDs where the hotkey is a member. - /// * `weight` - The weight of the transaction. - /// - pub fn swap_loaded_emission( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - netuid_is_member: &[u16], - weight: &mut Weight, - ) { - for netuid in netuid_is_member { - if let Some(mut emissions) = LoadedEmission::::get(netuid) { - for emission in emissions.iter_mut() { - if emission.0 == *old_hotkey { - emission.0 = new_hotkey.clone(); - } - } - LoadedEmission::::insert(netuid, emissions); - } - } - weight.saturating_accrue(T::DbWeight::get().writes(netuid_is_member.len() as u64)); - } - - /// Swaps the UIDs of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `netuid_is_member` - A vector of network IDs where the hotkey is a member. - /// * `weight` - The weight of the transaction. - /// - pub fn swap_uids( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - netuid_is_member: &[u16], - weight: &mut Weight, - ) { - for netuid in netuid_is_member.iter() { - if let Ok(uid) = Uids::::try_get(netuid, old_hotkey) { - Uids::::remove(netuid, old_hotkey); - Uids::::insert(netuid, new_hotkey, uid); - weight.saturating_accrue(T::DbWeight::get().writes(2)); - } - } - } - - /// Swaps the Prometheus data of the hotkey. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `netuid_is_member` - A vector of network IDs where the hotkey is a member. - /// * `weight` - The weight of the transaction. - /// - /// # Weight Calculation - /// - /// * Reads: 1 for each network ID if the old hotkey exists in that network. - /// * Writes: 2 for each network ID if the old hotkey exists in that network (one for removal and one for insertion). - pub fn swap_prometheus( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - netuid_is_member: &[u16], - weight: &mut Weight, - ) { - for netuid in netuid_is_member.iter() { - if let Ok(prometheus_info) = Prometheus::::try_get(netuid, old_hotkey) { - Prometheus::::remove(netuid, old_hotkey); - Prometheus::::insert(netuid, new_hotkey, prometheus_info); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } else { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - } - } - } - - /// Swaps the total hotkey-coldkey stakes for the current interval. - /// - /// # Arguments - /// - /// * `old_hotkey` - The old hotkey. - /// * `new_hotkey` - The new hotkey. - /// * `weight` - The weight of the transaction. - /// - pub fn swap_total_hotkey_coldkey_stakes_this_interval( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - weight: &mut Weight, - ) { - let stakes: Vec<(T::AccountId, (u64, u64))> = - TotalHotkeyColdkeyStakesThisInterval::::iter_prefix(old_hotkey).collect(); - log::info!("Stakes to swap: {:?}", stakes); - for (coldkey, stake) in stakes { - log::info!( - "Swapping stake for coldkey: {:?}, stake: {:?}", - coldkey, - stake - ); - TotalHotkeyColdkeyStakesThisInterval::::insert(new_hotkey, &coldkey, stake); - TotalHotkeyColdkeyStakesThisInterval::::remove(old_hotkey, &coldkey); - weight.saturating_accrue(T::DbWeight::get().writes(2)); // One write for insert and one for remove - } - } - - /// Swaps the total stake associated with a coldkey from the old coldkey to the new coldkey. - /// - /// # Arguments - /// - /// * `old_coldkey` - The AccountId of the old coldkey. - /// * `new_coldkey` - The AccountId of the new coldkey. - /// * `weight` - Mutable reference to the weight of the transaction. - /// - /// # Effects - /// - /// * Removes the total stake from the old coldkey. - /// * Inserts the total stake for the new coldkey. - /// * Updates the transaction weight. - pub fn swap_total_coldkey_stake( - old_coldkey: &T::AccountId, - new_coldkey: &T::AccountId, - weight: &mut Weight, - ) { - let stake = TotalColdkeyStake::::get(old_coldkey); - TotalColdkeyStake::::remove(old_coldkey); - TotalColdkeyStake::::insert(new_coldkey, stake); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } - - /// Swaps the stake associated with a coldkey from the old coldkey to the new coldkey. - /// - /// # Arguments - /// - /// * `old_coldkey` - The AccountId of the old coldkey. - /// * `new_coldkey` - The AccountId of the new coldkey. - /// * `weight` - Mutable reference to the weight of the transaction. - /// - /// # Effects - /// - /// * Transfers all stakes from the old coldkey to the new coldkey. - /// * Updates the ownership of hotkeys. - /// * Updates the total stake for both old and new coldkeys. - /// * Updates the transaction weight. - /// - - pub fn swap_stake_for_coldkey( - old_coldkey: &T::AccountId, - new_coldkey: &T::AccountId, - weight: &mut Weight, - ) { - // Retrieve the list of hotkeys owned by the old coldkey - let old_owned_hotkeys: Vec = OwnedHotkeys::::get(old_coldkey); - - // Initialize the total transferred stake to zero - let mut total_transferred_stake: u64 = 0u64; - - // Log the total stake of old and new coldkeys before the swap - log::info!( - "Before swap - Old coldkey total stake: {}", - TotalColdkeyStake::::get(old_coldkey) - ); - log::info!( - "Before swap - New coldkey total stake: {}", - TotalColdkeyStake::::get(new_coldkey) - ); - - // Iterate over each hotkey owned by the old coldkey - for hotkey in old_owned_hotkeys.iter() { - // Retrieve and remove the stake associated with the hotkey and old coldkey - let stake: u64 = Stake::::take(hotkey, old_coldkey); - log::info!("Transferring stake for hotkey {:?}: {}", hotkey, stake); - if stake > 0 { - // Insert the stake for the hotkey and new coldkey - let old_stake = Stake::::get(hotkey, new_coldkey); - Stake::::insert(hotkey, new_coldkey, stake.saturating_add(old_stake)); - total_transferred_stake = total_transferred_stake.saturating_add(stake); - - // Update the owner of the hotkey to the new coldkey - Owner::::insert(hotkey, new_coldkey); - - // Update the transaction weight - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } - } - log::info!( - "Starting transfer of delegated stakes for old coldkey: {:?}", - old_coldkey - ); - - for staking_hotkey in StakingHotkeys::::get(old_coldkey) { - log::info!("Processing staking hotkey: {:?}", staking_hotkey); - if Stake::::contains_key(staking_hotkey.clone(), old_coldkey) { - let hotkey = &staking_hotkey; - // Retrieve and remove the stake associated with the hotkey and old coldkey - let stake: u64 = Stake::::get(hotkey, old_coldkey); - Stake::::remove(hotkey, old_coldkey); - log::info!( - "Transferring delegated stake for hotkey {:?}: {}", - hotkey, - stake - ); - if stake > 0 { - // Insert the stake for the hotkey and new coldkey - let old_stake = Stake::::get(hotkey, new_coldkey); - Stake::::insert(hotkey, new_coldkey, stake.saturating_add(old_stake)); - total_transferred_stake = total_transferred_stake.saturating_add(stake); - log::info!( - "Updated stake for hotkey {:?} under new coldkey {:?}: {}", - hotkey, - new_coldkey, - stake.saturating_add(old_stake) - ); - - // Update the transaction weight - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 1)); - } - } else { - log::info!( - "No stake found for staking hotkey {:?} under old coldkey {:?}", - staking_hotkey, - old_coldkey - ); - weight.saturating_accrue(T::DbWeight::get().reads(1)); - } - } - - log::info!( - "Completed transfer of delegated stakes for old coldkey: {:?}", - old_coldkey - ); - - // Log the total transferred stake - log::info!("Total transferred stake: {}", total_transferred_stake); - - // Update the total stake for both old and new coldkeys if any stake was transferred - if total_transferred_stake > 0 { - let old_coldkey_stake: u64 = TotalColdkeyStake::::take(old_coldkey); // Remove it here. - let new_coldkey_stake: u64 = TotalColdkeyStake::::get(new_coldkey); - - TotalColdkeyStake::::insert(old_coldkey, 0); - TotalColdkeyStake::::insert( - new_coldkey, - new_coldkey_stake.saturating_add(old_coldkey_stake), - ); - - log::info!("Updated old coldkey stake from {} to 0", old_coldkey_stake); - log::info!( - "Updated new coldkey stake from {} to {}", - new_coldkey_stake, - new_coldkey_stake.saturating_add(old_coldkey_stake) - ); - - // Update the transaction weight - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } - - // Update the list of owned hotkeys for both old and new coldkeys - - let mut new_owned_hotkeys = OwnedHotkeys::::get(new_coldkey); - for hotkey in old_owned_hotkeys { - if !new_owned_hotkeys.contains(&hotkey) { - new_owned_hotkeys.push(hotkey); - } - } - - OwnedHotkeys::::insert(new_coldkey, new_owned_hotkeys); - OwnedHotkeys::::remove(old_coldkey); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - - // Update the staking hotkeys for both old and new coldkeys - let staking_hotkeys: Vec = StakingHotkeys::::get(old_coldkey); - - let mut existing_staking_hotkeys = StakingHotkeys::::get(new_coldkey); - for hotkey in staking_hotkeys { - if !existing_staking_hotkeys.contains(&hotkey) { - existing_staking_hotkeys.push(hotkey); - } - } - - StakingHotkeys::::remove(old_coldkey); - StakingHotkeys::::insert(new_coldkey, existing_staking_hotkeys); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - - // Log the total stake of old and new coldkeys after the swap - log::info!( - "After swap - Old coldkey total stake: {}", - TotalColdkeyStake::::get(old_coldkey) - ); - log::info!( - "After swap - New coldkey total stake: {}", - TotalColdkeyStake::::get(new_coldkey) - ); - } - - /// Swaps the total hotkey-coldkey stakes for the current interval from the old coldkey to the new coldkey. - /// - /// # Arguments - /// - /// * `old_coldkey` - The AccountId of the old coldkey. - /// * `new_coldkey` - The AccountId of the new coldkey. - /// * `weight` - Mutable reference to the weight of the transaction. - /// - /// # Effects - /// - /// * Removes all total hotkey-coldkey stakes for the current interval associated with the old coldkey. - /// * Inserts all total hotkey-coldkey stakes for the current interval for the new coldkey. - /// * Updates the transaction weight. - pub fn swap_total_hotkey_coldkey_stakes_this_interval_for_coldkey( - old_coldkey: &T::AccountId, - new_coldkey: &T::AccountId, - weight: &mut Weight, - ) { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 0)); - for hotkey in OwnedHotkeys::::get(old_coldkey).iter() { - let (stake, block) = - TotalHotkeyColdkeyStakesThisInterval::::get(&hotkey, old_coldkey); - TotalHotkeyColdkeyStakesThisInterval::::remove(&hotkey, old_coldkey); - TotalHotkeyColdkeyStakesThisInterval::::insert(&hotkey, new_coldkey, (stake, block)); - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } - } - - /// Checks if a coldkey has any associated hotkeys. - /// - /// # Arguments - /// - /// * `coldkey` - The AccountId of the coldkey to check. - /// - /// # Returns - /// - /// * `bool` - True if the coldkey has any associated hotkeys, false otherwise. - pub fn coldkey_has_associated_hotkeys(coldkey: &T::AccountId) -> bool { - !StakingHotkeys::::get(coldkey).is_empty() - } - - /// Swaps the subnet owner from the old coldkey to the new coldkey for all networks where the old coldkey is the owner. - /// - /// # Arguments - /// - /// * `old_coldkey` - The AccountId of the old coldkey. - /// * `new_coldkey` - The AccountId of the new coldkey. - /// * `weight` - Mutable reference to the weight of the transaction. - /// - /// # Effects - /// - /// * Updates the subnet owner to the new coldkey for all networks where the old coldkey was the owner. - /// * Updates the transaction weight. - pub fn swap_subnet_owner_for_coldkey( - old_coldkey: &T::AccountId, - new_coldkey: &T::AccountId, - weight: &mut Weight, - ) { - for netuid in 0..=TotalNetworks::::get() { - let subnet_owner = SubnetOwner::::get(netuid); - if subnet_owner == *old_coldkey { - SubnetOwner::::insert(netuid, new_coldkey.clone()); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - } - weight.saturating_accrue(T::DbWeight::get().reads(TotalNetworks::::get() as u64)); - } - - pub fn swap_senate_member( - old_hotkey: &T::AccountId, - new_hotkey: &T::AccountId, - weight: &mut Weight, - ) -> DispatchResult { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - if T::SenateMembers::is_member(old_hotkey) { - T::SenateMembers::swap_member(old_hotkey, new_hotkey).map_err(|e| e.error)?; - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } - Ok(()) - } -} diff --git a/pallets/subtensor/src/swap/mod.rs b/pallets/subtensor/src/swap/mod.rs new file mode 100644 index 000000000..4e4b92907 --- /dev/null +++ b/pallets/subtensor/src/swap/mod.rs @@ -0,0 +1,3 @@ +use super::*; +pub mod swap_coldkey; +pub mod swap_hotkey; diff --git a/pallets/subtensor/src/swap/swap_coldkey.rs b/pallets/subtensor/src/swap/swap_coldkey.rs new file mode 100644 index 000000000..bcbd2a330 --- /dev/null +++ b/pallets/subtensor/src/swap/swap_coldkey.rs @@ -0,0 +1,232 @@ +use super::*; +use frame_support::weights::Weight; +use sp_core::Get; + +impl Pallet { + /// Swaps the coldkey associated with a set of hotkeys from an old coldkey to a new coldkey. + /// + /// # Arguments + /// + /// * `origin` - The origin of the call, which must be signed by the old coldkey. + /// * `new_coldkey` - The account ID of the new coldkey. + /// + /// # Returns + /// + /// Returns a `DispatchResultWithPostInfo` indicating success or failure, along with the weight consumed. + /// + /// # Errors + /// + /// This function will return an error if: + /// - The caller is not a valid signed origin. + /// - The old coldkey (caller) is in arbitration. + /// - The new coldkey is already associated with other hotkeys or is a hotkey itself. + /// - There's not enough balance to pay for the swap. + /// + /// # Events + /// + /// Emits a `ColdkeySwapped` event when successful. + /// + /// # Weight + /// + /// Weight is tracked and updated throughout the function execution. + pub fn do_swap_coldkey( + old_coldkey: &T::AccountId, + new_coldkey: &T::AccountId, + ) -> DispatchResultWithPostInfo { + // 2. Initialize the weight for this operation + let mut weight: Weight = T::DbWeight::get().reads(2); + // 3. Ensure the new coldkey is not associated with any hotkeys + ensure!( + StakingHotkeys::::get(new_coldkey).is_empty(), + Error::::ColdKeyAlreadyAssociated + ); + weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + // 4. Ensure the new coldkey is not a hotkey + ensure!( + !Self::hotkey_account_exists(new_coldkey), + Error::::NewColdKeyIsHotkey + ); + weight = weight.saturating_add(T::DbWeight::get().reads(1)); + + // 5. Swap the identity if the old coldkey has one + if let Some(identity) = Identities::::take(old_coldkey) { + Identities::::insert(new_coldkey, identity); + } + + // 6. Calculate the swap cost and ensure sufficient balance + let swap_cost = Self::get_key_swap_cost(); + ensure!( + Self::can_remove_balance_from_coldkey_account(old_coldkey, swap_cost), + Error::::NotEnoughBalanceToPaySwapColdKey + ); + + // 7. Remove and burn the swap cost from the old coldkey's account + let actual_burn_amount = Self::remove_balance_from_coldkey_account(old_coldkey, swap_cost)?; + Self::burn_tokens(actual_burn_amount); + + // 8. Update the weight for the balance operations + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + // 9. Perform the actual coldkey swap + let _ = Self::perform_swap_coldkey(old_coldkey, new_coldkey, &mut weight); + + // 10. Update the last transaction block for the new coldkey + Self::set_last_tx_block(new_coldkey, Self::get_current_block_as_u64()); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + // 11. Remove the coldkey swap scheduled record + ColdkeySwapScheduled::::remove(old_coldkey); + + // 12. Emit the ColdkeySwapped event + Self::deposit_event(Event::ColdkeySwapped { + old_coldkey: old_coldkey.clone(), + new_coldkey: new_coldkey.clone(), + }); + + // 12. Return the result with the updated weight + Ok(Some(weight).into()) + } + + /// Performs the actual coldkey swap operation, transferring all associated data and balances from the old coldkey to the new coldkey. + /// + /// # Arguments + /// + /// * `old_coldkey` - The account ID of the old coldkey. + /// * `new_coldkey` - The account ID of the new coldkey. + /// * `weight` - A mutable reference to the current transaction weight. + /// + /// # Returns + /// + /// Returns a `DispatchResult` indicating success or failure of the operation. + /// + /// # Steps + /// + /// 1. Swap TotalHotkeyColdkeyStakesThisInterval: + /// - For each hotkey owned by the old coldkey, transfer its stake and block data to the new coldkey. + /// + /// 2. Swap subnet ownership: + /// - For each subnet, if the old coldkey is the owner, transfer ownership to the new coldkey. + /// + /// 3. Swap Stakes: + /// - For each hotkey staking for the old coldkey, transfer its stake to the new coldkey. + /// + /// 4. Swap total coldkey stake: + /// - Transfer the total stake from the old coldkey to the new coldkey. + /// + /// 5. Swap StakingHotkeys: + /// - Transfer the list of staking hotkeys from the old coldkey to the new coldkey. + /// + /// 6. Swap hotkey owners: + /// - For each hotkey owned by the old coldkey, transfer ownership to the new coldkey. + /// - Update the list of owned hotkeys for both old and new coldkeys. + /// + /// 7. Transfer remaining balance: + /// - Transfer any remaining balance from the old coldkey to the new coldkey. + /// + /// Throughout the process, the function updates the transaction weight to reflect the operations performed. + /// + /// # Notes + /// + /// This function is a critical part of the coldkey swap process and should be called only after all necessary checks and validations have been performed. + pub fn perform_swap_coldkey( + old_coldkey: &T::AccountId, + new_coldkey: &T::AccountId, + weight: &mut Weight, + ) -> DispatchResult { + // 1. Swap TotalHotkeyColdkeyStakesThisInterval + // TotalHotkeyColdkeyStakesThisInterval: MAP ( hotkey, coldkey ) --> ( stake, block ) | Stake of the hotkey for the coldkey. + for hotkey in OwnedHotkeys::::get(old_coldkey).iter() { + let (stake, block) = + TotalHotkeyColdkeyStakesThisInterval::::get(&hotkey, old_coldkey); + TotalHotkeyColdkeyStakesThisInterval::::remove(&hotkey, old_coldkey); + TotalHotkeyColdkeyStakesThisInterval::::insert(&hotkey, new_coldkey, (stake, block)); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + + // 2. Swap subnet owner. + // SubnetOwner: MAP ( netuid ) --> (coldkey) | Owner of the subnet. + for netuid in Self::get_all_subnet_netuids() { + let subnet_owner = SubnetOwner::::get(netuid); + if subnet_owner == *old_coldkey { + SubnetOwner::::insert(netuid, new_coldkey.clone()); + } + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + // 3. Swap Stake. + // Stake: MAP ( hotkey, coldkey ) --> u64 | Stake of the hotkey for the coldkey. + for hotkey in StakingHotkeys::::get(old_coldkey) { + // Get the stake on the old (hot,coldkey) account. + let old_stake: u64 = Stake::::get(&hotkey, old_coldkey); + // Get the stake on the new (hot,coldkey) account. + let new_stake: u64 = Stake::::get(&hotkey, new_coldkey); + // Add the stake to new account. + Stake::::insert(&hotkey, new_coldkey, new_stake.saturating_add(old_stake)); + // Remove the value from the old account. + Stake::::remove(&hotkey, old_coldkey); + // Add the weight for the read and write. + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } + + // 4. Swap total coldkey stake. + // TotalColdkeyStake: MAP ( coldkey ) --> u64 | Total stake of the coldkey. + let old_coldkey_stake: u64 = TotalColdkeyStake::::get(old_coldkey); + // Get the stake of the new coldkey. + let new_coldkey_stake: u64 = TotalColdkeyStake::::get(new_coldkey); + // Remove the value from the old account. + TotalColdkeyStake::::insert(old_coldkey, 0); + // Add the stake to new account. + TotalColdkeyStake::::insert( + new_coldkey, + new_coldkey_stake.saturating_add(old_coldkey_stake), + ); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + + // 5. Swap StakingHotkeys. + // StakingHotkeys: MAP ( coldkey ) --> Vec | Hotkeys staking for the coldkey. + let old_staking_hotkeys: Vec = StakingHotkeys::::get(old_coldkey); + let mut new_staking_hotkeys: Vec = StakingHotkeys::::get(new_coldkey); + for hotkey in old_staking_hotkeys { + // If the hotkey is not already in the new coldkey, add it. + if !new_staking_hotkeys.contains(&hotkey) { + new_staking_hotkeys.push(hotkey); + } + } + StakingHotkeys::::remove(old_coldkey); + StakingHotkeys::::insert(new_coldkey, new_staking_hotkeys); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + + // 6. Swap hotkey owners. + // Owner: MAP ( hotkey ) --> coldkey | Owner of the hotkey. + // OwnedHotkeys: MAP ( coldkey ) --> Vec | Hotkeys owned by the coldkey. + let old_owned_hotkeys: Vec = OwnedHotkeys::::get(old_coldkey); + let mut new_owned_hotkeys: Vec = OwnedHotkeys::::get(new_coldkey); + for owned_hotkey in old_owned_hotkeys.iter() { + // Remove the hotkey from the old coldkey. + Owner::::remove(owned_hotkey); + // Add the hotkey to the new coldkey. + Owner::::insert(owned_hotkey, new_coldkey.clone()); + // Addd the owned hotkey to the new set of owned hotkeys. + if !new_owned_hotkeys.contains(owned_hotkey) { + new_owned_hotkeys.push(owned_hotkey.clone()); + } + } + OwnedHotkeys::::remove(old_coldkey); + OwnedHotkeys::::insert(new_coldkey, new_owned_hotkeys); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + + // 7. Transfer remaining balance. + // Balance: MAP ( coldkey ) --> u64 | Balance of the coldkey. + // Transfer any remaining balance from old_coldkey to new_coldkey + let remaining_balance = Self::get_coldkey_balance(old_coldkey); + if remaining_balance > 0 { + Self::kill_coldkey_account(old_coldkey, remaining_balance)?; + Self::add_balance_to_coldkey_account(new_coldkey, remaining_balance); + } + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + + // Return ok. + Ok(()) + } +} diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs new file mode 100644 index 000000000..793e34bff --- /dev/null +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -0,0 +1,362 @@ +use super::*; +use frame_support::weights::Weight; +use sp_core::Get; + +impl Pallet { + /// Swaps the hotkey of a coldkey account. + /// + /// # Arguments + /// + /// * `origin` - The origin of the transaction, and also the coldkey account. + /// * `old_hotkey` - The old hotkey to be swapped. + /// * `new_hotkey` - The new hotkey to replace the old one. + /// + /// # Returns + /// + /// * `DispatchResultWithPostInfo` - The result of the dispatch. + /// + /// # Errors + /// + /// * `NonAssociatedColdKey` - If the coldkey does not own the old hotkey. + /// * `HotKeySetTxRateLimitExceeded` - If the transaction rate limit is exceeded. + /// * `NewHotKeyIsSameWithOld` - If the new hotkey is the same as the old hotkey. + /// * `HotKeyAlreadyRegisteredInSubNet` - If the new hotkey is already registered in the subnet. + /// * `NotEnoughBalanceToPaySwapHotKey` - If there is not enough balance to pay for the swap. + pub fn do_swap_hotkey( + origin: T::RuntimeOrigin, + old_hotkey: &T::AccountId, + new_hotkey: &T::AccountId, + ) -> DispatchResultWithPostInfo { + // 1. Ensure the origin is signed and get the coldkey + let coldkey = ensure_signed(origin)?; + + // 2. Initialize the weight for this operation + let mut weight = T::DbWeight::get().reads(2); + + // 3. Ensure the new hotkey is different from the old one + ensure!(old_hotkey != new_hotkey, Error::::NewHotKeyIsSameWithOld); + + // 4. Ensure the new hotkey is not already registered on any network + ensure!( + !Self::is_hotkey_registered_on_any_network(new_hotkey), + Error::::HotKeyAlreadyRegisteredInSubNet + ); + + // 5. Update the weight for the checks above + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 0)); + + // 6. Ensure the coldkey owns the old hotkey + ensure!( + Self::coldkey_owns_hotkey(&coldkey, old_hotkey), + Error::::NonAssociatedColdKey + ); + + // 7. Get the current block number + let block: u64 = Self::get_current_block_as_u64(); + + // 8. Ensure the transaction rate limit is not exceeded + ensure!( + !Self::exceeds_tx_rate_limit(Self::get_last_tx_block(&coldkey), block), + Error::::HotKeySetTxRateLimitExceeded + ); + + // 9. Update the weight for reading the total networks + weight.saturating_accrue( + T::DbWeight::get().reads((TotalNetworks::::get().saturating_add(1u16)) as u64), + ); + + // 10. Get the cost for swapping the key + let swap_cost = Self::get_key_swap_cost(); + log::debug!("Swap cost: {:?}", swap_cost); + + // 11. Ensure the coldkey has enough balance to pay for the swap + ensure!( + Self::can_remove_balance_from_coldkey_account(&coldkey, swap_cost), + Error::::NotEnoughBalanceToPaySwapHotKey + ); + + // 12. Remove the swap cost from the coldkey's account + let actual_burn_amount = Self::remove_balance_from_coldkey_account(&coldkey, swap_cost)?; + + // 13. Burn the tokens + Self::burn_tokens(actual_burn_amount); + + // 14. Perform the hotkey swap + let _ = Self::perform_hotkey_swap(old_hotkey, new_hotkey, &coldkey, &mut weight); + + // 15. Update the last transaction block for the coldkey + Self::set_last_tx_block(&coldkey, block); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + // 16. Emit an event for the hotkey swap + Self::deposit_event(Event::HotkeySwapped { + coldkey, + old_hotkey: old_hotkey.clone(), + new_hotkey: new_hotkey.clone(), + }); + + // 17. Return the weight of the operation + Ok(Some(weight).into()) + } + + /// Performs the hotkey swap operation, transferring all associated data and state from the old hotkey to the new hotkey. + /// + /// This function executes a series of steps to ensure a complete transfer of all relevant information: + /// 1. Swaps the owner of the hotkey. + /// 2. Updates the list of owned hotkeys for the coldkey. + /// 3. Transfers the total hotkey stake. + /// 4. Moves all stake-related data for the interval. + /// 5. Updates the last transaction block for the new hotkey. + /// 6. Transfers the delegate take information. + /// 7. Swaps Senate membership if applicable. + /// 8. Updates delegate information. + /// 9. For each subnet: + /// - Updates network membership status. + /// - Transfers UID and key information. + /// - Moves Prometheus data. + /// - Updates axon information. + /// - Transfers weight commits. + /// - Updates loaded emission data. + /// 10. Transfers all stake information, including updating staking hotkeys for each coldkey. + /// + /// Throughout the process, the function accumulates the computational weight of operations performed. + /// + /// # Arguments + /// * `old_hotkey` - The AccountId of the current hotkey to be replaced. + /// * `new_hotkey` - The AccountId of the new hotkey to replace the old one. + /// * `coldkey` - The AccountId of the coldkey that owns both hotkeys. + /// * `weight` - A mutable reference to the Weight, updated as operations are performed. + /// + /// # Returns + /// * `DispatchResult` - Ok(()) if the swap was successful, or an error if any operation failed. + /// + /// # Note + /// This function performs extensive storage reads and writes, which can be computationally expensive. + /// The accumulated weight should be carefully considered in the context of block limits. + pub fn perform_hotkey_swap( + old_hotkey: &T::AccountId, + new_hotkey: &T::AccountId, + coldkey: &T::AccountId, + weight: &mut Weight, + ) -> DispatchResult { + // 1. Swap owner. + // Owner( hotkey ) -> coldkey -- the coldkey that owns the hotkey. + Owner::::remove(old_hotkey); + Owner::::insert(new_hotkey, coldkey.clone()); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + // 2. Swap OwnedHotkeys. + // OwnedHotkeys( coldkey ) -> Vec -- the hotkeys that the coldkey owns. + let mut hotkeys = OwnedHotkeys::::get(coldkey); + // Add the new key if needed. + if !hotkeys.contains(new_hotkey) { + hotkeys.push(new_hotkey.clone()); + } + // Remove the old key. + hotkeys.retain(|hk| *hk != *old_hotkey); + OwnedHotkeys::::insert(coldkey, hotkeys); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + // 3. Swap total hotkey stake. + // TotalHotkeyStake( hotkey ) -> stake -- the total stake that the hotkey has across all delegates. + let old_total_hotkey_stake = TotalHotkeyStake::::get(old_hotkey); // Get the old total hotkey stake. + let new_total_hotkey_stake = TotalHotkeyStake::::get(new_hotkey); // Get the new total hotkey stake. + TotalHotkeyStake::::remove(old_hotkey); // Remove the old total hotkey stake. + TotalHotkeyStake::::insert( + new_hotkey, + old_total_hotkey_stake.saturating_add(new_total_hotkey_stake), + ); // Insert the new total hotkey stake via the addition. + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + + // 4. Swap total hotkey stakes. + // TotalHotkeyColdkeyStakesThisInterval( hotkey ) --> (u64: stakes, u64: block_number) + let stake_tuples: Vec<(T::AccountId, (u64, u64))> = + TotalHotkeyColdkeyStakesThisInterval::::iter_prefix(old_hotkey).collect(); + for (coldkey, stake_tup) in stake_tuples { + // NOTE: You could use this to increase your allowed stake operations but this would cost. + TotalHotkeyColdkeyStakesThisInterval::::insert(new_hotkey, &coldkey, stake_tup); + TotalHotkeyColdkeyStakesThisInterval::::remove(old_hotkey, &coldkey); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + + // 5. Swap LastTxBlock + // LastTxBlock( hotkey ) --> u64 -- the last transaction block for the hotkey. + LastTxBlock::::remove(old_hotkey); + LastTxBlock::::insert(new_hotkey, Self::get_current_block_as_u64()); + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 2)); + + // 6. Swap LastTxBlockDelegateTake + // LastTxBlockDelegateTake( hotkey ) --> u64 -- the last transaction block for the hotkey delegate take. + LastTxBlockDelegateTake::::remove(old_hotkey); + LastTxBlockDelegateTake::::insert(new_hotkey, Self::get_current_block_as_u64()); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + + // 7. Swap Senate members. + // Senate( hotkey ) --> ? + if T::SenateMembers::is_member(old_hotkey) { + T::SenateMembers::swap_member(old_hotkey, new_hotkey).map_err(|e| e.error)?; + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + + // 8. Swap delegates. + // Delegates( hotkey ) -> take value -- the hotkey delegate take value. + if Delegates::::contains_key(old_hotkey) { + let old_delegate_take = Delegates::::get(old_hotkey); + Delegates::::remove(old_hotkey); + Delegates::::insert(new_hotkey, old_delegate_take); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } + // 9. Swap all subnet specific info. + let all_netuids: Vec = Self::get_all_subnet_netuids(); + for netuid in all_netuids { + // 9.1 Remove the previous hotkey and insert the new hotkey from membership. + // IsNetworkMember( hotkey, netuid ) -> bool -- is the hotkey a subnet member. + let is_network_member: bool = IsNetworkMember::::get(old_hotkey, netuid); + IsNetworkMember::::remove(old_hotkey, netuid); + IsNetworkMember::::insert(new_hotkey, netuid, is_network_member); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + + // 9.2 Swap Uids + Keys. + // Keys( netuid, hotkey ) -> uid -- the uid the hotkey has in the network if it is a member. + // Uids( netuid, hotkey ) -> uid -- the uids that the hotkey has. + if is_network_member { + // 9.2.1 Swap the UIDS + if let Ok(old_uid) = Uids::::try_get(netuid, old_hotkey) { + Uids::::remove(netuid, old_hotkey); + Uids::::insert(netuid, new_hotkey, old_uid); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + + // 9.2.2 Swap the keys. + Keys::::insert(netuid, old_uid, new_hotkey.clone()); + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); + } + } + + // 9.3 Swap Prometheus. + // Prometheus( netuid, hotkey ) -> prometheus -- the prometheus data that a hotkey has in the network. + if is_network_member { + if let Ok(old_prometheus_info) = Prometheus::::try_get(netuid, old_hotkey) { + Prometheus::::remove(netuid, old_hotkey); + Prometheus::::insert(netuid, new_hotkey, old_prometheus_info); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + } + + // 9.4. Swap axons. + // Axons( netuid, hotkey ) -> axon -- the axon that the hotkey has. + if is_network_member { + if let Ok(old_axon_info) = Axons::::try_get(netuid, old_hotkey) { + Axons::::remove(netuid, old_hotkey); + Axons::::insert(netuid, new_hotkey, old_axon_info); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + } + + // 9.5 Swap WeightCommits + // WeightCommits( hotkey ) --> Vec -- the weight commits for the hotkey. + if is_network_member { + if let Ok(old_weight_commits) = WeightCommits::::try_get(netuid, old_hotkey) { + WeightCommits::::remove(netuid, old_hotkey); + WeightCommits::::insert(netuid, new_hotkey, old_weight_commits); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + } + + // 9.6. Swap the subnet loaded emission. + // LoadedEmission( netuid ) --> Vec<(hotkey, u64)> -- the loaded emission for the subnet. + if is_network_member { + if let Some(mut old_loaded_emission) = LoadedEmission::::get(netuid) { + for emission in old_loaded_emission.iter_mut() { + if emission.0 == *old_hotkey { + emission.0 = new_hotkey.clone(); + } + } + LoadedEmission::::remove(netuid); + LoadedEmission::::insert(netuid, old_loaded_emission); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + } + } + + // 10. Swap Stake. + // Stake( hotkey, coldkey ) -> stake -- the stake that the hotkey controls on behalf of the coldkey. + let stakes: Vec<(T::AccountId, u64)> = Stake::::iter_prefix(old_hotkey).collect(); + // Clear the entire old prefix here. + let _ = Stake::::clear_prefix(old_hotkey, stakes.len() as u32, None); + // Iterate over all the staking rows and insert them into the new hotkey. + for (coldkey, old_stake_amount) in stakes { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + + // Swap Stake value + // Stake( hotkey, coldkey ) -> stake -- the stake that the hotkey controls on behalf of the coldkey. + // Get the new stake value. + let new_stake_value: u64 = Stake::::get(new_hotkey, &coldkey); + // Insert the new stake value. + Stake::::insert( + new_hotkey, + &coldkey, + new_stake_value.saturating_add(old_stake_amount), + ); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + // Swap StakingHotkeys. + // StakingHotkeys( coldkey ) --> Vec -- the hotkeys that the coldkey stakes. + let mut staking_hotkeys = StakingHotkeys::::get(&coldkey); + staking_hotkeys.retain(|hk| *hk != *old_hotkey && *hk != *new_hotkey); + staking_hotkeys.push(new_hotkey.clone()); + StakingHotkeys::::insert(coldkey.clone(), staking_hotkeys); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + // 11. Swap ChildKeys. + // ChildKeys( parent, netuid ) --> Vec<(proportion,child)> -- the child keys of the parent. + for netuid in Self::get_all_subnet_netuids() { + // Get the children of the old hotkey for this subnet + let my_children: Vec<(u64, T::AccountId)> = ChildKeys::::get(old_hotkey, netuid); + // Remove the old hotkey's child entries + ChildKeys::::remove(old_hotkey, netuid); + // Insert the same child entries for the new hotkey + ChildKeys::::insert(new_hotkey, netuid, my_children); + } + + // 12. Swap ParentKeys. + // ParentKeys( child, netuid ) --> Vec<(proportion,parent)> -- the parent keys of the child. + for netuid in Self::get_all_subnet_netuids() { + // Get the parents of the old hotkey for this subnet + let parents: Vec<(u64, T::AccountId)> = ParentKeys::::get(old_hotkey, netuid); + // Remove the old hotkey's parent entries + ParentKeys::::remove(old_hotkey, netuid); + // Insert the same parent entries for the new hotkey + ParentKeys::::insert(new_hotkey, netuid, parents.clone()); + for (_, parent_key_i) in parents { + // For each parent, update their children list + let mut parent_children: Vec<(u64, T::AccountId)> = + ChildKeys::::get(parent_key_i.clone(), netuid); + for child in parent_children.iter_mut() { + // If the child is the old hotkey, replace it with the new hotkey + if child.1 == *old_hotkey { + child.1 = new_hotkey.clone(); + } + } + // Update the parent's children list + ChildKeys::::insert(parent_key_i, netuid, parent_children); + } + } + + // Return successful after swapping all the relevant terms. + Ok(()) + } + + pub fn swap_senate_member( + old_hotkey: &T::AccountId, + new_hotkey: &T::AccountId, + weight: &mut Weight, + ) -> DispatchResult { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if T::SenateMembers::is_member(old_hotkey) { + T::SenateMembers::swap_member(old_hotkey, new_hotkey).map_err(|e| e.error)?; + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + Ok(()) + } +} diff --git a/pallets/subtensor/src/utils/identity.rs b/pallets/subtensor/src/utils/identity.rs new file mode 100644 index 000000000..7babb04f4 --- /dev/null +++ b/pallets/subtensor/src/utils/identity.rs @@ -0,0 +1,194 @@ +use super::*; +use frame_support::ensure; +use frame_system::ensure_signed; +use sp_std::vec::Vec; + +impl Pallet { + /// Sets the identity for a coldkey. + /// + /// This function allows a user to set or update their identity information associated with their coldkey. + /// It checks if the caller has at least one registered hotkey, validates the provided identity information, + /// and then stores it in the blockchain state. + /// + /// # Arguments + /// + /// * `origin` - The origin of the call, which should be a signed extrinsic. + /// * `name` - The name to be associated with the identity. + /// * `url` - A URL associated with the identity. + /// * `image` - An image URL or identifier for the identity. + /// * `discord` - Discord information for the identity. + /// * `description` - A description of the identity. + /// * `additional` - Any additional information for the identity. + /// + /// # Returns + /// + /// Returns `Ok(())` if the identity is successfully set, otherwise returns an error. + pub fn do_set_identity( + origin: T::RuntimeOrigin, + name: Vec, + url: Vec, + image: Vec, + discord: Vec, + description: Vec, + additional: Vec, + ) -> dispatch::DispatchResult { + // Ensure the call is signed and get the signer's (coldkey) account + let coldkey = ensure_signed(origin)?; + + // Retrieve all hotkeys associated with this coldkey + let hotkeys: Vec = OwnedHotkeys::::get(coldkey.clone()); + + // Ensure that at least one of the associated hotkeys is registered on any network + ensure!( + hotkeys + .iter() + .any(|hotkey| Self::is_hotkey_registered_on_any_network(hotkey)), + Error::::HotKeyNotRegisteredInNetwork + ); + + // Create the identity struct with the provided information + let identity = ChainIdentityOf { + name, + url, + image, + discord, + description, + additional, + }; + + // Validate the created identity + ensure!( + Self::is_valid_identity(&identity), + Error::::InvalidIdentity + ); + + // Store the validated identity in the blockchain state + Identities::::insert(coldkey.clone(), identity.clone()); + + // Log the identity set event + log::debug!("ChainIdentitySet( coldkey:{:?} ) ", coldkey.clone()); + + // Emit an event to notify that an identity has been set + Self::deposit_event(Event::ChainIdentitySet(coldkey.clone())); + + // Return Ok to indicate successful execution + Ok(()) + } + + /// Sets the identity for a subnet. + /// + /// This function allows the owner of a subnet to set or update the identity information associated with the subnet. + /// It verifies that the caller is the owner of the specified subnet, validates the provided identity information, + /// and then stores it in the blockchain state. + /// + /// # Arguments + /// + /// * `origin` - The origin of the call, which should be a signed extrinsic. + /// * `netuid` - The unique identifier for the subnet. + /// * `subnet_name` - The name of the subnet to be associated with the identity. + /// * `github_repo` - The GitHub repository URL associated with the subnet identity. + /// * `subnet_contact` - Contact information for the subnet. + /// + /// # Returns + /// + /// Returns `Ok(())` if the subnet identity is successfully set, otherwise returns an error. + pub fn do_set_subnet_identity( + origin: T::RuntimeOrigin, + netuid: u16, + subnet_name: Vec, + github_repo: Vec, + subnet_contact: Vec, + ) -> dispatch::DispatchResult { + // Ensure the call is signed and get the signer's (coldkey) account + let coldkey = ensure_signed(origin)?; + + // Ensure that the coldkey owns the subnet + ensure!( + Self::get_subnet_owner(netuid) == coldkey, + Error::::NotSubnetOwner + ); + + // Create the identity struct with the provided information + let identity: SubnetIdentityOf = SubnetIdentityOf { + subnet_name, + github_repo, + subnet_contact, + }; + + // Validate the created identity + ensure!( + Self::is_valid_subnet_identity(&identity), + Error::::InvalidIdentity + ); + + // Store the validated identity in the blockchain state + SubnetIdentities::::insert(netuid, identity.clone()); + + // Log the identity set event + log::info!("SubnetIdentitySet( netuid:{:?} ) ", netuid); + + // Emit an event to notify that an identity has been set + Self::deposit_event(Event::SubnetIdentitySet(netuid)); + + // Return Ok to indicate successful execution + Ok(()) + } + + /// Validates the given ChainIdentityOf struct. + /// + /// This function checks if the total length of all fields in the ChainIdentityOf struct + /// is less than or equal to 512 bytes, and if each individual field is also + /// less than or equal to 512 bytes. + /// + /// # Arguments + /// + /// * `identity` - A reference to the ChainIdentityOf struct to be validated. + /// + /// # Returns + /// + /// * `bool` - Returns true if the Identity is valid, false otherwise. + pub fn is_valid_identity(identity: &ChainIdentityOf) -> bool { + let total_length = identity + .name + .len() + .saturating_add(identity.url.len()) + .saturating_add(identity.image.len()) + .saturating_add(identity.discord.len()) + .saturating_add(identity.description.len()) + .saturating_add(identity.additional.len()); + + total_length <= 256 + 256 + 1024 + 256 + 1024 + 1024 + && identity.name.len() <= 256 + && identity.url.len() <= 256 + && identity.image.len() <= 1024 + && identity.discord.len() <= 256 + && identity.description.len() <= 1024 + && identity.additional.len() <= 1024 + } + + /// Validates the given SubnetIdentityOf struct. + /// + /// This function checks if the total length of all fields in the SubnetIdentityOf struct + /// is less than or equal to 2304 bytes, and if each individual field is also + /// within its respective maximum byte limit. + /// + /// # Arguments + /// + /// * `identity` - A reference to the SubnetIdentityOf struct to be validated. + /// + /// # Returns + /// + /// * `bool` - Returns true if the SubnetIdentity is valid, false otherwise. + pub fn is_valid_subnet_identity(identity: &SubnetIdentityOf) -> bool { + let total_length = identity + .subnet_name + .len() + .saturating_add(identity.github_repo.len()) + .saturating_add(identity.subnet_contact.len()); + + total_length <= 256 + 1024 + 1024 + && identity.subnet_name.len() <= 256 + && identity.github_repo.len() <= 1024 + && identity.subnet_contact.len() <= 1024 + } +} diff --git a/pallets/subtensor/src/utils.rs b/pallets/subtensor/src/utils/misc.rs similarity index 83% rename from pallets/subtensor/src/utils.rs rename to pallets/subtensor/src/utils/misc.rs index c61133e94..76546a1a2 100644 --- a/pallets/subtensor/src/utils.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -1,10 +1,11 @@ use super::*; use crate::{ - system::{ensure_root, ensure_signed_or_root}, + system::{ensure_root, ensure_signed_or_root, pallet_prelude::BlockNumberFor}, Error, }; use sp_core::Get; use sp_core::U256; +use sp_runtime::Saturating; use substrate_fixed::types::I32F32; impl Pallet { @@ -122,12 +123,12 @@ impl Pallet { Active::::insert(netuid, updated_active_vec); } pub fn set_pruning_score_for_uid(netuid: u16, uid: u16, pruning_score: u16) { - log::info!("netuid = {:?}", netuid); - log::info!( + log::debug!("netuid = {:?}", netuid); + log::debug!( "SubnetworkN::::get( netuid ) = {:?}", SubnetworkN::::get(netuid) ); - log::info!("uid = {:?}", uid); + log::debug!("uid = {:?}", uid); assert!(uid < SubnetworkN::::get(netuid)); PruningScores::::mutate(netuid, |v| { if let Some(s) = v.get_mut(uid as usize) { @@ -275,38 +276,6 @@ impl Pallet { Ok(()) } - // ======================== - // ==== Rate Limiting ===== - // ======================== - pub fn set_last_tx_block(key: &T::AccountId, block: u64) { - LastTxBlock::::insert(key, block) - } - pub fn get_last_tx_block(key: &T::AccountId) -> u64 { - LastTxBlock::::get(key) - } - pub fn set_last_tx_block_delegate_take(key: &T::AccountId, block: u64) { - LastTxBlockDelegateTake::::insert(key, block) - } - pub fn get_last_tx_block_delegate_take(key: &T::AccountId) -> u64 { - LastTxBlockDelegateTake::::get(key) - } - pub fn exceeds_tx_rate_limit(prev_tx_block: u64, current_block: u64) -> bool { - let rate_limit: u64 = Self::get_tx_rate_limit(); - if rate_limit == 0 || prev_tx_block == 0 { - return false; - } - - current_block.saturating_sub(prev_tx_block) <= rate_limit - } - pub fn exceeds_tx_delegate_take_rate_limit(prev_tx_block: u64, current_block: u64) -> bool { - let rate_limit: u64 = Self::get_tx_delegate_take_rate_limit(); - if rate_limit == 0 || prev_tx_block == 0 { - return false; - } - - current_block.saturating_sub(prev_tx_block) <= rate_limit - } - // ======================== // === Token Management === // ======================== @@ -316,25 +285,20 @@ impl Pallet { pub fn coinbase(amount: u64) { TotalIssuance::::put(TotalIssuance::::get().saturating_add(amount)); } - pub fn get_default_take() -> u16 { - // Default to maximum - MaxTake::::get() - } - pub fn set_max_take(default_take: u16) { - MaxTake::::put(default_take); - Self::deposit_event(Event::DefaultTakeSet(default_take)); - } - pub fn get_min_take() -> u16 { - MinTake::::get() - } pub fn set_subnet_locked_balance(netuid: u16, amount: u64) { SubnetLocked::::insert(netuid, amount); } - pub fn get_subnet_locked_balance(netuid: u16) -> u64 { SubnetLocked::::get(netuid) } + pub fn get_total_subnet_locked() -> u64 { + let mut total_subnet_locked: u64 = 0; + for (_, locked) in SubnetLocked::::iter() { + total_subnet_locked.saturating_accrue(locked); + } + total_subnet_locked + } // ======================== // ========= Sudo ========= @@ -356,18 +320,49 @@ impl Pallet { Self::deposit_event(Event::TxDelegateTakeRateLimitSet(tx_rate_limit)); } pub fn set_min_delegate_take(take: u16) { - MinTake::::put(take); + MinDelegateTake::::put(take); Self::deposit_event(Event::MinDelegateTakeSet(take)); } pub fn set_max_delegate_take(take: u16) { - MaxTake::::put(take); + MaxDelegateTake::::put(take); Self::deposit_event(Event::MaxDelegateTakeSet(take)); } pub fn get_min_delegate_take() -> u16 { - MinTake::::get() + MinDelegateTake::::get() } pub fn get_max_delegate_take() -> u16 { - MaxTake::::get() + MaxDelegateTake::::get() + } + pub fn get_default_delegate_take() -> u16 { + // Default to maximum + MaxDelegateTake::::get() + } + // get_default_childkey_take + pub fn get_default_childkey_take() -> u16 { + // Default to maximum + MinChildkeyTake::::get() + } + pub fn get_tx_childkey_take_rate_limit() -> u64 { + TxChildkeyTakeRateLimit::::get() + } + pub fn set_tx_childkey_take_rate_limit(tx_rate_limit: u64) { + TxChildkeyTakeRateLimit::::put(tx_rate_limit); + Self::deposit_event(Event::TxChildKeyTakeRateLimitSet(tx_rate_limit)); + } + pub fn set_min_childkey_take(take: u16) { + MinChildkeyTake::::put(take); + Self::deposit_event(Event::MinChildKeyTakeSet(take)); + } + pub fn set_max_childkey_take(take: u16) { + MaxChildkeyTake::::put(take); + Self::deposit_event(Event::MaxChildKeyTakeSet(take)); + } + pub fn get_min_childkey_take() -> u16 { + MinChildkeyTake::::get() + } + + pub fn get_max_childkey_take() -> u16 { + MaxChildkeyTake::::get() } pub fn get_serving_rate_limit(netuid: u16) -> u64 { @@ -460,6 +455,13 @@ impl Pallet { ImmunityPeriod::::insert(netuid, immunity_period); Self::deposit_event(Event::ImmunityPeriodSet(netuid, immunity_period)); } + /// Check if a neuron is in immunity based on the current block + pub fn get_neuron_is_immune(netuid: u16, uid: u16) -> bool { + let registered_at = Self::get_neuron_block_at_registration(netuid, uid); + let current_block = Self::get_current_block_as_u64(); + let immunity_period = Self::get_immunity_period(netuid); + current_block.saturating_sub(registered_at) < u64::from(immunity_period) + } pub fn get_min_allowed_weights(netuid: u16) -> u16 { MinAllowedWeights::::get(netuid) @@ -695,4 +697,87 @@ impl Pallet { pub fn get_liquid_alpha_enabled(netuid: u16) -> bool { LiquidAlphaOn::::get(netuid) } + + /// Gets the current hotkey emission tempo. + /// + /// # Returns + /// * `u64` - The current emission tempo value. + pub fn get_hotkey_emission_tempo() -> u64 { + HotkeyEmissionTempo::::get() + } + + /// Sets the hotkey emission tempo. + /// + /// # Arguments + /// * `emission_tempo` - The new emission tempo value to set. + pub fn set_hotkey_emission_tempo(emission_tempo: u64) { + HotkeyEmissionTempo::::set(emission_tempo); + Self::deposit_event(Event::HotkeyEmissionTempoSet(emission_tempo)); + } + + pub fn get_pending_hotkey_emission(hotkey: &T::AccountId) -> u64 { + PendingdHotkeyEmission::::get(hotkey) + } + + /// Retrieves the maximum stake allowed for a given network. + /// + /// # Arguments + /// + /// * `netuid` - The unique identifier of the network. + /// + /// # Returns + /// + /// * `u64` - The maximum stake allowed for the specified network. + pub fn get_network_max_stake(netuid: u16) -> u64 { + NetworkMaxStake::::get(netuid) + } + + /// Sets the maximum stake allowed for a given network. + /// + /// # Arguments + /// + /// * `netuid` - The unique identifier of the network. + /// * `max_stake` - The new maximum stake value to set. + /// + /// # Effects + /// + /// * Updates the NetworkMaxStake storage. + /// * Emits a NetworkMaxStakeSet event. + pub fn set_network_max_stake(netuid: u16, max_stake: u64) { + // Update the NetworkMaxStake storage with the new max_stake value + NetworkMaxStake::::insert(netuid, max_stake); + + // Emit an event to notify listeners about the change + Self::deposit_event(Event::NetworkMaxStakeSet(netuid, max_stake)); + } + + /// Set the duration for coldkey swap + /// + /// # Arguments + /// + /// * `duration` - The blocks for coldkey swap execution. + /// + /// # Effects + /// + /// * Update the ColdkeySwapScheduleDuration storage. + /// * Emits a ColdkeySwapScheduleDurationSet evnet. + pub fn set_coldkey_swap_schedule_duration(duration: BlockNumberFor) { + ColdkeySwapScheduleDuration::::set(duration); + Self::deposit_event(Event::ColdkeySwapScheduleDurationSet(duration)); + } + + /// Set the duration for dissolve network + /// + /// # Arguments + /// + /// * `duration` - The blocks for dissolve network execution. + /// + /// # Effects + /// + /// * Update the DissolveNetworkScheduleDuration storage. + /// * Emits a DissolveNetworkScheduleDurationSet evnet. + pub fn set_dissolve_network_schedule_duration(duration: BlockNumberFor) { + DissolveNetworkScheduleDuration::::set(duration); + Self::deposit_event(Event::DissolveNetworkScheduleDurationSet(duration)); + } } diff --git a/pallets/subtensor/src/utils/mod.rs b/pallets/subtensor/src/utils/mod.rs new file mode 100644 index 000000000..a42c91119 --- /dev/null +++ b/pallets/subtensor/src/utils/mod.rs @@ -0,0 +1,5 @@ +use super::*; +pub mod identity; +pub mod misc; +pub mod rate_limiting; +pub mod try_state; diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs new file mode 100644 index 000000000..b02ad9855 --- /dev/null +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -0,0 +1,125 @@ +use super::*; +use sp_core::Get; + +/// Enum representing different types of transactions +#[derive(Copy, Clone)] +pub enum TransactionType { + SetChildren, + SetChildkeyTake, + Unknown, +} + +/// Implement conversion from TransactionType to u16 +impl From for u16 { + fn from(tx_type: TransactionType) -> Self { + match tx_type { + TransactionType::SetChildren => 0, + TransactionType::SetChildkeyTake => 1, + TransactionType::Unknown => 2, + } + } +} + +/// Implement conversion from u16 to TransactionType +impl From for TransactionType { + fn from(value: u16) -> Self { + match value { + 0 => TransactionType::SetChildren, + 1 => TransactionType::SetChildkeyTake, + _ => TransactionType::Unknown, + } + } +} +impl Pallet { + // ======================== + // ==== Rate Limiting ===== + // ======================== + /// Get the rate limit for a specific transaction type + pub fn get_rate_limit(tx_type: &TransactionType) -> u64 { + match tx_type { + TransactionType::SetChildren => (DefaultTempo::::get().saturating_mul(2)).into(), // Cannot set children twice within the default tempo period. + TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), + TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) + } + } + + /// Check if a transaction should be rate limited on a specific subnet + pub fn passes_rate_limit_on_subnet( + tx_type: &TransactionType, + hotkey: &T::AccountId, + netuid: u16, + ) -> bool { + let block: u64 = Self::get_current_block_as_u64(); + let limit: u64 = Self::get_rate_limit(tx_type); + let last_block: u64 = Self::get_last_transaction_block(hotkey, netuid, tx_type); + + // Allow the first transaction (when last_block is 0) or if the rate limit has passed + last_block == 0 || block.saturating_sub(last_block) >= limit + } + + /// Check if a transaction should be rate limited globally + pub fn passes_rate_limit_globally(tx_type: &TransactionType, hotkey: &T::AccountId) -> bool { + let netuid: u16 = u16::MAX; + let block: u64 = Self::get_current_block_as_u64(); + let limit: u64 = Self::get_rate_limit(tx_type); + let last_block: u64 = Self::get_last_transaction_block(hotkey, netuid, tx_type); + block.saturating_sub(last_block) >= limit + } + + /// Get the block number of the last transaction for a specific hotkey, network, and transaction type + pub fn get_last_transaction_block( + hotkey: &T::AccountId, + netuid: u16, + tx_type: &TransactionType, + ) -> u64 { + let tx_as_u16: u16 = (*tx_type).into(); + TransactionKeyLastBlock::::get((hotkey, netuid, tx_as_u16)) + } + + /// Set the block number of the last transaction for a specific hotkey, network, and transaction type + pub fn set_last_transaction_block( + hotkey: &T::AccountId, + netuid: u16, + tx_type: &TransactionType, + block: u64, + ) { + let tx_as_u16: u16 = (*tx_type).into(); + TransactionKeyLastBlock::::insert((hotkey, netuid, tx_as_u16), block); + } + + pub fn set_last_tx_block(key: &T::AccountId, block: u64) { + LastTxBlock::::insert(key, block) + } + pub fn get_last_tx_block(key: &T::AccountId) -> u64 { + LastTxBlock::::get(key) + } + pub fn set_last_tx_block_delegate_take(key: &T::AccountId, block: u64) { + LastTxBlockDelegateTake::::insert(key, block) + } + pub fn get_last_tx_block_delegate_take(key: &T::AccountId) -> u64 { + LastTxBlockDelegateTake::::get(key) + } + + pub fn set_last_tx_block_childkey_take(key: &T::AccountId, block: u64) { + LastTxBlockChildKeyTake::::insert(key, block) + } + pub fn get_last_tx_block_childkey_take(key: &T::AccountId) -> u64 { + LastTxBlockChildKeyTake::::get(key) + } + pub fn exceeds_tx_rate_limit(prev_tx_block: u64, current_block: u64) -> bool { + let rate_limit: u64 = Self::get_tx_rate_limit(); + if rate_limit == 0 || prev_tx_block == 0 { + return false; + } + + current_block.saturating_sub(prev_tx_block) <= rate_limit + } + pub fn exceeds_tx_delegate_take_rate_limit(prev_tx_block: u64, current_block: u64) -> bool { + let rate_limit: u64 = Self::get_tx_delegate_take_rate_limit(); + if rate_limit == 0 || prev_tx_block == 0 { + return false; + } + + current_block.saturating_sub(prev_tx_block) <= rate_limit + } +} diff --git a/pallets/subtensor/src/utils/try_state.rs b/pallets/subtensor/src/utils/try_state.rs new file mode 100644 index 000000000..4763c0484 --- /dev/null +++ b/pallets/subtensor/src/utils/try_state.rs @@ -0,0 +1,49 @@ +use super::*; + +impl Pallet { + /// Checks if the accounting invariants for [`TotalStake`], [`TotalSubnetLocked`], and [`TotalIssuance`] are correct. + /// + /// This function verifies that: + /// 1. The sum of all stakes matches the [`TotalStake`]. + /// 2. The [`TotalSubnetLocked`] is correctly calculated. + /// 3. The [`TotalIssuance`] equals the sum of currency issuance, total stake, and total subnet locked. + /// + /// # Returns + /// + /// Returns `Ok(())` if all invariants are correct, otherwise returns an error. + #[cfg(feature = "try-runtime")] + pub fn check_accounting_invariants() -> Result<(), sp_runtime::TryRuntimeError> { + use frame_support::traits::fungible::Inspect; + + // Calculate the total staked amount + let mut total_staked: u64 = 0; + for (_account, _netuid, stake) in Stake::::iter() { + total_staked = total_staked.saturating_add(stake); + } + + // Verify that the calculated total stake matches the stored TotalStake + ensure!( + total_staked == TotalStake::::get(), + "TotalStake does not match total staked", + ); + + // Get the total subnet locked amount + let total_subnet_locked: u64 = Self::get_total_subnet_locked(); + + // Get the total currency issuance + let currency_issuance: u64 = T::Currency::total_issuance(); + + // Calculate the expected total issuance + let expected_total_issuance: u64 = currency_issuance + .saturating_add(total_staked) + .saturating_add(total_subnet_locked); + + // Verify that the calculated total issuance matches the stored TotalIssuance + ensure!( + TotalIssuance::::get() == expected_total_issuance, + "TotalIssuance accounting discrepancy", + ); + + Ok(()) + } +} diff --git a/pallets/subtensor/tests/block_step.rs b/pallets/subtensor/tests/block_step.rs deleted file mode 100644 index 87fdb8bcf..000000000 --- a/pallets/subtensor/tests/block_step.rs +++ /dev/null @@ -1,940 +0,0 @@ -#![allow(clippy::unwrap_used)] - -mod mock; -use frame_support::assert_ok; -use frame_system::Config; -use mock::*; -use sp_core::U256; - -#[test] -fn test_loaded_emission() { - new_test_ext(1).execute_with(|| { - let n: u16 = 100; - let netuid: u16 = 1; - let tempo: u16 = 10; - let netuids: Vec = vec![1]; - let emission: Vec = vec![1000000000]; - add_network(netuid, tempo, 0); - SubtensorModule::set_max_allowed_uids(netuid, n); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_emission_values(&netuids, emission).unwrap(); - for i in 0..n { - SubtensorModule::append_neuron(netuid, &U256::from(i), 0); - } - assert!(SubtensorModule::get_loaded_emission_tuples(netuid).is_none()); - - // Try loading at block 0 - let block: u64 = 0; - assert_eq!( - SubtensorModule::blocks_until_next_epoch(netuid, tempo, block), - 8 - ); - SubtensorModule::generate_emission(block); - assert!(SubtensorModule::get_loaded_emission_tuples(netuid).is_none()); - - // Try loading at block = 9; - let block: u64 = 8; - assert_eq!( - SubtensorModule::blocks_until_next_epoch(netuid, tempo, block), - 0 - ); - SubtensorModule::generate_emission(block); - assert!(SubtensorModule::get_loaded_emission_tuples(netuid).is_some()); - assert_eq!( - SubtensorModule::get_loaded_emission_tuples(netuid) - .unwrap() - .len(), - n as usize - ); - - // Try draining the emission tuples - // None remaining because we are at epoch. - let block: u64 = 8; - SubtensorModule::drain_emission(block); - assert!(SubtensorModule::get_loaded_emission_tuples(netuid).is_none()); - - // Generate more emission. - SubtensorModule::generate_emission(8); - assert_eq!( - SubtensorModule::get_loaded_emission_tuples(netuid) - .unwrap() - .len(), - n as usize - ); - - for block in 9..19 { - let mut n_remaining: usize = 0; - let mut n_to_drain: usize = 0; - if let Some(tuples) = SubtensorModule::get_loaded_emission_tuples(netuid) { - n_remaining = tuples.len(); - n_to_drain = - SubtensorModule::tuples_to_drain_this_block(netuid, tempo, block, tuples.len()); - } - SubtensorModule::drain_emission(block); // drain it with 9 more blocks to go - if let Some(tuples) = SubtensorModule::get_loaded_emission_tuples(netuid) { - assert_eq!(tuples.len(), n_remaining - n_to_drain); - } - log::info!("n_to_drain: {:?}", n_to_drain); - log::info!( - "SubtensorModule::get_loaded_emission_tuples( netuid ).len(): {:?}", - n_remaining - n_to_drain - ); - } - }) -} - -#[test] -fn test_tuples_to_drain_this_block() { - new_test_ext(1).execute_with(|| { - // pub fn tuples_to_drain_this_block( netuid: u16, tempo: u16, block_number: u64, n_remaining: usize ) -> usize { - assert_eq!(SubtensorModule::tuples_to_drain_this_block(0, 1, 0, 10), 10); // drain all epoch block. - assert_eq!(SubtensorModule::tuples_to_drain_this_block(0, 0, 0, 10), 10); // drain all no tempo. - assert_eq!(SubtensorModule::tuples_to_drain_this_block(0, 10, 0, 10), 2); // drain 10 / ( 10 / 2 ) = 2 - assert_eq!(SubtensorModule::tuples_to_drain_this_block(0, 20, 0, 10), 1); // drain 10 / ( 20 / 2 ) = 1 - assert_eq!(SubtensorModule::tuples_to_drain_this_block(0, 10, 0, 20), 5); // drain 20 / ( 9 / 2 ) = 5 - assert_eq!(SubtensorModule::tuples_to_drain_this_block(0, 20, 0, 0), 0); // nothing to drain. - assert_eq!(SubtensorModule::tuples_to_drain_this_block(0, 10, 1, 20), 5); // drain 19 / ( 10 / 2 ) = 4 - assert_eq!( - SubtensorModule::tuples_to_drain_this_block(0, 10, 10, 20), - 4 - ); // drain 19 / ( 10 / 2 ) = 4 - assert_eq!( - SubtensorModule::tuples_to_drain_this_block(0, 10, 15, 20), - 10 - ); // drain 19 / ( 10 / 2 ) = 4 - assert_eq!( - SubtensorModule::tuples_to_drain_this_block(0, 10, 19, 20), - 20 - ); // drain 19 / ( 10 / 2 ) = 4 - assert_eq!( - SubtensorModule::tuples_to_drain_this_block(0, 10, 20, 20), - 20 - ); // drain 19 / ( 10 / 2 ) = 4 - for i in 0..10 { - for j in 0..10 { - for k in 0..10 { - for l in 0..10 { - assert!(SubtensorModule::tuples_to_drain_this_block(i, j, k, l) <= 10); - } - } - } - } - }) -} - -#[test] -fn test_blocks_until_epoch() { - new_test_ext(1).execute_with(|| { - // Check tempo = 0 block = * netuid = * - assert_eq!(SubtensorModule::blocks_until_next_epoch(0, 0, 0), 1000); - - // Check tempo = 1 block = * netuid = * - assert_eq!(SubtensorModule::blocks_until_next_epoch(0, 1, 0), 0); - assert_eq!(SubtensorModule::blocks_until_next_epoch(1, 1, 0), 1); - assert_eq!(SubtensorModule::blocks_until_next_epoch(0, 1, 1), 1); - assert_eq!(SubtensorModule::blocks_until_next_epoch(1, 1, 1), 0); - assert_eq!(SubtensorModule::blocks_until_next_epoch(0, 1, 2), 0); - assert_eq!(SubtensorModule::blocks_until_next_epoch(1, 1, 2), 1); - for i in 0..100 { - if i % 2 == 0 { - assert_eq!(SubtensorModule::blocks_until_next_epoch(0, 1, i), 0); - assert_eq!(SubtensorModule::blocks_until_next_epoch(1, 1, i), 1); - } else { - assert_eq!(SubtensorModule::blocks_until_next_epoch(0, 1, i), 1); - assert_eq!(SubtensorModule::blocks_until_next_epoch(1, 1, i), 0); - } - } - - // Check general case. - for netuid in 0..30_u16 { - for block in 0..30_u64 { - for tempo in 1..30_u16 { - assert_eq!( - SubtensorModule::blocks_until_next_epoch(netuid, tempo, block), - tempo as u64 - (block + netuid as u64 + 1) % (tempo as u64 + 1) - ); - } - } - } - }); -} - -// /******************************************** -// block_step::adjust_registration_terms_for_networks tests -// *********************************************/ -#[test] -fn test_burn_adjustment() { - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let burn_cost: u64 = 1000; - let adjustment_interval = 1; - let target_registrations_per_interval = 1; - add_network(netuid, tempo, 0); - SubtensorModule::set_burn(netuid, burn_cost); - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - assert_eq!( - SubtensorModule::get_adjustment_interval(netuid), - adjustment_interval - ); // Sanity check the adjustment interval. - - // Register key 1. - let hotkey_account_id_1 = U256::from(1); - let coldkey_account_id_1 = U256::from(1); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_1), - netuid, - hotkey_account_id_1 - )); - - // Register key 2. - let hotkey_account_id_2 = U256::from(2); - let coldkey_account_id_2 = U256::from(2); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_2), - netuid, - hotkey_account_id_2 - )); - - // We are over the number of regs allowed this interval. - // Step the block and trigger the adjustment. - step_block(1); - - // Check the adjusted burn. - assert_eq!(SubtensorModule::get_burn_as_u64(netuid), 1500); - }); -} - -#[test] -fn test_burn_adjustment_with_moving_average() { - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let burn_cost: u64 = 1000; - let adjustment_interval = 1; - let target_registrations_per_interval = 1; - add_network(netuid, tempo, 0); - SubtensorModule::set_burn(netuid, burn_cost); - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - // Set alpha here. - SubtensorModule::set_adjustment_alpha(netuid, u64::MAX / 2); - - // Register key 1. - let hotkey_account_id_1 = U256::from(1); - let coldkey_account_id_1 = U256::from(1); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_1), - netuid, - hotkey_account_id_1 - )); - - // Register key 2. - let hotkey_account_id_2 = U256::from(2); - let coldkey_account_id_2 = U256::from(2); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_2), - netuid, - hotkey_account_id_2 - )); - - // We are over the number of regs allowed this interval. - // Step the block and trigger the adjustment. - step_block(1); - - // Check the adjusted burn. - // 0.5 * 1000 + 0.5 * 1500 = 1250 - assert_eq!(SubtensorModule::get_burn_as_u64(netuid), 1250); - }); -} - -#[test] -#[allow(unused_assignments)] -fn test_burn_adjustment_case_a() { - // Test case A of the difficulty and burn adjustment algorithm. - // ==================== - // There are too many registrations this interval and most of them are pow registrations - // this triggers an increase in the pow difficulty. - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let burn_cost: u64 = 1000; - let adjustment_interval = 1; - let target_registrations_per_interval = 1; - let start_diff: u64 = 10_000; - let mut curr_block_num = 0; - add_network(netuid, tempo, 0); - SubtensorModule::set_burn(netuid, burn_cost); - SubtensorModule::set_difficulty(netuid, start_diff); - SubtensorModule::set_min_difficulty(netuid, start_diff); - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - - // Register key 1. This is a burn registration. - let hotkey_account_id_1 = U256::from(1); - let coldkey_account_id_1 = U256::from(1); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_1), - netuid, - hotkey_account_id_1 - )); - - // Register key 2. This is a POW registration - let hotkey_account_id_2 = U256::from(2); - let coldkey_account_id_2 = U256::from(2); - let (nonce0, work0): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - curr_block_num, - 0, - &hotkey_account_id_2, - ); - let result0 = SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id_2), - netuid, - curr_block_num, - nonce0, - work0, - hotkey_account_id_2, - coldkey_account_id_2, - ); - assert_ok!(result0); - - // Register key 3. This is a POW registration - let hotkey_account_id_3 = U256::from(3); - let coldkey_account_id_3 = U256::from(3); - let (nonce1, work1): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - curr_block_num, - 11231312312, - &hotkey_account_id_3, - ); - let result1 = SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id_3), - netuid, - curr_block_num, - nonce1, - work1, - hotkey_account_id_3, - coldkey_account_id_3, - ); - assert_ok!(result1); - - // We are over the number of regs allowed this interval. - // Most of them are POW registrations (2 out of 3) - // Step the block and trigger the adjustment. - step_block(1); - curr_block_num += 1; - - // Check the adjusted POW difficulty has INCREASED. - // and the burn has not changed. - let adjusted_burn = SubtensorModule::get_burn_as_u64(netuid); - assert_eq!(adjusted_burn, burn_cost); - - let adjusted_diff = SubtensorModule::get_difficulty_as_u64(netuid); - assert!(adjusted_diff > start_diff); - assert_eq!(adjusted_diff, 20_000); - }); -} - -#[test] -#[allow(unused_assignments)] -fn test_burn_adjustment_case_b() { - // Test case B of the difficulty and burn adjustment algorithm. - // ==================== - // There are too many registrations this interval and most of them are burn registrations - // this triggers an increase in the burn cost. - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let burn_cost: u64 = 1000; - let adjustment_interval = 1; - let target_registrations_per_interval = 1; - let start_diff: u64 = 10_000; - let mut curr_block_num = 0; - add_network(netuid, tempo, 0); - SubtensorModule::set_burn(netuid, burn_cost); - SubtensorModule::set_difficulty(netuid, start_diff); - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - - // Register key 1. - let hotkey_account_id_1 = U256::from(1); - let coldkey_account_id_1 = U256::from(1); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_1), - netuid, - hotkey_account_id_1 - )); - - // Register key 2. - let hotkey_account_id_2 = U256::from(2); - let coldkey_account_id_2 = U256::from(2); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_2), - netuid, - hotkey_account_id_2 - )); - - // Register key 3. This one is a POW registration - let hotkey_account_id_3 = U256::from(3); - let coldkey_account_id_3 = U256::from(3); - let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - curr_block_num, - 0, - &hotkey_account_id_3, - ); - let result = SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id_3), - netuid, - curr_block_num, - nonce, - work, - hotkey_account_id_3, - coldkey_account_id_3, - ); - assert_ok!(result); - - // We are over the number of regs allowed this interval. - // Most of them are burn registrations (2 out of 3) - // Step the block and trigger the adjustment. - step_block(1); - curr_block_num += 1; - - // Check the adjusted burn has INCREASED. - // and the difficulty has not changed. - let adjusted_burn = SubtensorModule::get_burn_as_u64(netuid); - assert!(adjusted_burn > burn_cost); - assert_eq!(adjusted_burn, 2_000); - - let adjusted_diff = SubtensorModule::get_difficulty_as_u64(netuid); - assert_eq!(adjusted_diff, start_diff); - }); -} - -#[test] -#[allow(unused_assignments)] -fn test_burn_adjustment_case_c() { - // Test case C of the difficulty and burn adjustment algorithm. - // ==================== - // There are not enough registrations this interval and most of them are POW registrations - // this triggers a decrease in the burn cost - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let burn_cost: u64 = 1000; - let adjustment_interval = 1; - let target_registrations_per_interval = 4; // Needs registrations < 4 to trigger - let start_diff: u64 = 10_000; - let mut curr_block_num = 0; - add_network(netuid, tempo, 0); - SubtensorModule::set_burn(netuid, burn_cost); - SubtensorModule::set_difficulty(netuid, start_diff); - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - - // Register key 1. This is a BURN registration - let hotkey_account_id_1 = U256::from(1); - let coldkey_account_id_1 = U256::from(1); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_1), - netuid, - hotkey_account_id_1 - )); - - // Register key 2. This is a POW registration - let hotkey_account_id_2 = U256::from(2); - let coldkey_account_id_2 = U256::from(2); - let (nonce0, work0): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - curr_block_num, - 0, - &hotkey_account_id_2, - ); - let result0 = SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id_2), - netuid, - curr_block_num, - nonce0, - work0, - hotkey_account_id_2, - coldkey_account_id_2, - ); - assert_ok!(result0); - - // Register key 3. This is a POW registration - let hotkey_account_id_3 = U256::from(3); - let coldkey_account_id_3 = U256::from(3); - let (nonce1, work1): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - curr_block_num, - 11231312312, - &hotkey_account_id_3, - ); - let result1 = SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id_3), - netuid, - curr_block_num, - nonce1, - work1, - hotkey_account_id_3, - coldkey_account_id_3, - ); - assert_ok!(result1); - - // We are UNDER the number of regs allowed this interval. - // Most of them are POW registrations (2 out of 3) - // Step the block and trigger the adjustment. - step_block(1); - curr_block_num += 1; - - // Check the adjusted burn has DECREASED. - // and the difficulty has not changed. - let adjusted_burn = SubtensorModule::get_burn_as_u64(netuid); - assert!(adjusted_burn < burn_cost); - assert_eq!(adjusted_burn, 875); - - let adjusted_diff = SubtensorModule::get_difficulty_as_u64(netuid); - assert_eq!(adjusted_diff, start_diff); - }); -} - -#[test] -#[allow(unused_assignments)] -fn test_burn_adjustment_case_d() { - // Test case D of the difficulty and burn adjustment algorithm. - // ==================== - // There are not enough registrations this interval and most of them are BURN registrations - // this triggers a decrease in the POW difficulty - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let burn_cost: u64 = 1000; - let adjustment_interval = 1; - let target_registrations_per_interval = 4; // Needs registrations < 4 to trigger - let start_diff: u64 = 10_000; - let mut curr_block_num = 0; - add_network(netuid, tempo, 0); - SubtensorModule::set_burn(netuid, burn_cost); - SubtensorModule::set_difficulty(netuid, start_diff); - SubtensorModule::set_min_difficulty(netuid, 1); - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - - // Register key 1. This is a BURN registration - let hotkey_account_id_1 = U256::from(1); - let coldkey_account_id_1 = U256::from(1); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_1, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_1), - netuid, - hotkey_account_id_1 - )); - - // Register key 2. This is a BURN registration - let hotkey_account_id_2 = U256::from(2); - let coldkey_account_id_2 = U256::from(2); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_2), - netuid, - hotkey_account_id_2 - )); - - // Register key 3. This is a POW registration - let hotkey_account_id_3 = U256::from(3); - let coldkey_account_id_3 = U256::from(3); - let (nonce1, work1): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - curr_block_num, - 11231312312, - &hotkey_account_id_3, - ); - let result1 = SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id_3), - netuid, - curr_block_num, - nonce1, - work1, - hotkey_account_id_3, - coldkey_account_id_3, - ); - assert_ok!(result1); - - // We are UNDER the number of regs allowed this interval. - // Most of them are BURN registrations (2 out of 3) - // Step the block and trigger the adjustment. - step_block(1); - curr_block_num += 1; - - // Check the adjusted POW difficulty has DECREASED. - // and the burn has not changed. - let adjusted_burn = SubtensorModule::get_burn_as_u64(netuid); - assert_eq!(adjusted_burn, burn_cost); - - let adjusted_diff = SubtensorModule::get_difficulty_as_u64(netuid); - assert!(adjusted_diff < start_diff); - assert_eq!(adjusted_diff, 8750); - }); -} - -#[test] -#[allow(unused_assignments)] -fn test_burn_adjustment_case_e() { - // Test case E of the difficulty and burn adjustment algorithm. - // ==================== - // There are not enough registrations this interval and nobody registered either POW or BURN - // this triggers a decrease in the BURN cost and POW difficulty - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let burn_cost: u64 = 1000; - let adjustment_interval = 1; - let target_registrations_per_interval: u16 = 3; - let start_diff: u64 = 10_000; - let mut curr_block_num = 0; - add_network(netuid, tempo, 0); - SubtensorModule::set_max_registrations_per_block(netuid, 10); - SubtensorModule::set_burn(netuid, burn_cost); - SubtensorModule::set_difficulty(netuid, start_diff); - SubtensorModule::set_min_difficulty(netuid, 1); - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - - // Register key 1. This is a POW registration - let hotkey_account_id_1 = U256::from(1); - let coldkey_account_id_1 = U256::from(1); - let (nonce1, work1): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - curr_block_num, - 11231312312, - &hotkey_account_id_1, - ); - let result1 = SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id_1), - netuid, - curr_block_num, - nonce1, - work1, - hotkey_account_id_1, - coldkey_account_id_1, - ); - assert_ok!(result1); - - // Register key 2. This is a BURN registration - let hotkey_account_id_2 = U256::from(2); - let coldkey_account_id_2 = U256::from(2); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_2), - netuid, - hotkey_account_id_2 - )); - - step_block(1); - curr_block_num += 1; - - // We are UNDER the number of regs allowed this interval. - // And the number of regs of each type is equal - - // Check the adjusted BURN has DECREASED. - let adjusted_burn = SubtensorModule::get_burn_as_u64(netuid); - assert!(adjusted_burn < burn_cost); - assert_eq!(adjusted_burn, 833); - - // Check the adjusted POW difficulty has DECREASED. - let adjusted_diff = SubtensorModule::get_difficulty_as_u64(netuid); - assert!(adjusted_diff < start_diff); - assert_eq!(adjusted_diff, 8_333); - }); -} - -#[test] -#[allow(unused_assignments)] -fn test_burn_adjustment_case_f() { - // Test case F of the difficulty and burn adjustment algorithm. - // ==================== - // There are too many registrations this interval and the pow and burn registrations are equal - // this triggers an increase in the burn cost and pow difficulty - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let burn_cost: u64 = 1000; - let adjustment_interval = 1; - let target_registrations_per_interval: u16 = 1; - let start_diff: u64 = 10_000; - let mut curr_block_num = 0; - add_network(netuid, tempo, 0); - SubtensorModule::set_max_registrations_per_block(netuid, 10); - SubtensorModule::set_burn(netuid, burn_cost); - SubtensorModule::set_difficulty(netuid, start_diff); - SubtensorModule::set_min_difficulty(netuid, start_diff); - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - - // Register key 1. This is a POW registration - let hotkey_account_id_1 = U256::from(1); - let coldkey_account_id_1 = U256::from(1); - let (nonce1, work1): (u64, Vec) = SubtensorModule::create_work_for_block_number( - netuid, - curr_block_num, - 11231312312, - &hotkey_account_id_1, - ); - let result1 = SubtensorModule::register( - <::RuntimeOrigin>::signed(hotkey_account_id_1), - netuid, - curr_block_num, - nonce1, - work1, - hotkey_account_id_1, - coldkey_account_id_1, - ); - assert_ok!(result1); - - // Register key 2. This is a BURN registration - let hotkey_account_id_2 = U256::from(2); - let coldkey_account_id_2 = U256::from(2); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id_2, 10000); - assert_ok!(SubtensorModule::burned_register( - <::RuntimeOrigin>::signed(hotkey_account_id_2), - netuid, - hotkey_account_id_2 - )); - - step_block(1); - curr_block_num += 1; - // We are OVER the number of regs allowed this interval. - // And the number of regs of each type is equal - - // Check the adjusted BURN has INCREASED. - let adjusted_burn = SubtensorModule::get_burn_as_u64(netuid); - assert!(adjusted_burn > burn_cost); - assert_eq!(adjusted_burn, 1_500); - - // Check the adjusted POW difficulty has INCREASED. - let adjusted_diff = SubtensorModule::get_difficulty_as_u64(netuid); - assert!(adjusted_diff > start_diff); - assert_eq!(adjusted_diff, 15_000); - }); -} - -#[test] -fn test_burn_adjustment_case_e_zero_registrations() { - // Test case E of the difficulty and burn adjustment algorithm. - // ==================== - // There are not enough registrations this interval and nobody registered either POW or BURN - // this triggers a decrease in the BURN cost and POW difficulty - - // BUT there are zero registrations this interval. - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let burn_cost: u64 = 1000; - let adjustment_interval = 1; - let target_registrations_per_interval: u16 = 1; - let start_diff: u64 = 10_000; - add_network(netuid, tempo, 0); - SubtensorModule::set_max_registrations_per_block(netuid, 10); - SubtensorModule::set_burn(netuid, burn_cost); - SubtensorModule::set_difficulty(netuid, start_diff); - SubtensorModule::set_min_difficulty(netuid, 1); - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - SubtensorModule::set_adjustment_alpha(netuid, 58000); // Set to old value. - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - - // No registrations this interval of any kind. - step_block(1); - - // We are UNDER the number of regs allowed this interval. - // And the number of regs of each type is equal - - // Check the adjusted BURN has DECREASED. - let adjusted_burn = SubtensorModule::get_burn_as_u64(netuid); - assert!(adjusted_burn < burn_cost); - assert_eq!(adjusted_burn, 500); - - // Check the adjusted POW difficulty has DECREASED. - let adjusted_diff = SubtensorModule::get_difficulty_as_u64(netuid); - assert!(adjusted_diff < start_diff); - assert_eq!(adjusted_diff, 5_000); - }); -} - -#[test] -fn test_emission_based_on_registration_status() { - new_test_ext(1).execute_with(|| { - let n: u16 = 100; - let netuid_off: u16 = 1; - let netuid_on: u16 = 2; - let tempo: u16 = 1; - let netuids: Vec = vec![netuid_off, netuid_on]; - let emissions: Vec = vec![1000000000, 1000000000]; - - // Add subnets with registration turned off and on - add_network(netuid_off, tempo, 0); - add_network(netuid_on, tempo, 0); - SubtensorModule::set_max_allowed_uids(netuid_off, n); - SubtensorModule::set_max_allowed_uids(netuid_on, n); - SubtensorModule::set_emission_values(&netuids, emissions).unwrap(); - SubtensorModule::set_network_registration_allowed(netuid_off, false); - SubtensorModule::set_network_registration_allowed(netuid_on, true); - - // Populate the subnets with neurons - for i in 0..n { - SubtensorModule::append_neuron(netuid_off, &U256::from(i), 0); - SubtensorModule::append_neuron(netuid_on, &U256::from(i), 0); - } - - // Generate emission at block 0 - let block: u64 = 0; - SubtensorModule::generate_emission(block); - - // Verify that no emission tuples are loaded for the subnet with registration off - assert!(SubtensorModule::get_loaded_emission_tuples(netuid_off).is_none()); - - // Verify that emission tuples are loaded for the subnet with registration on - assert!(SubtensorModule::get_loaded_emission_tuples(netuid_on).is_some()); - assert_eq!( - SubtensorModule::get_loaded_emission_tuples(netuid_on) - .unwrap() - .len(), - n as usize - ); - - // Step to the next epoch block - let epoch_block: u16 = tempo; - step_block(epoch_block); - - // Verify that no emission tuples are loaded for the subnet with registration off - assert!(SubtensorModule::get_loaded_emission_tuples(netuid_off).is_none()); - log::info!( - "Emissions for netuid with registration off: {:?}", - SubtensorModule::get_loaded_emission_tuples(netuid_off) - ); - - // Verify that emission tuples are loaded for the subnet with registration on - assert!(SubtensorModule::get_loaded_emission_tuples(netuid_on).is_some()); - log::info!( - "Emissions for netuid with registration on: {:?}", - SubtensorModule::get_loaded_emission_tuples(netuid_on) - ); - assert_eq!( - SubtensorModule::get_loaded_emission_tuples(netuid_on) - .unwrap() - .len(), - n as usize - ); - - let block: u64 = 0; - // drain the emission tuples for the subnet with registration on - SubtensorModule::drain_emission(block); - // Turn on registration for the subnet with registration off - SubtensorModule::set_network_registration_allowed(netuid_off, true); - SubtensorModule::set_network_registration_allowed(netuid_on, false); - - // Generate emission at the next block - let next_block: u64 = block + 1; - SubtensorModule::generate_emission(next_block); - - // Verify that emission tuples are now loaded for the subnet with registration turned on - assert!(SubtensorModule::get_loaded_emission_tuples(netuid_off).is_some()); - log::info!( - "Emissions for netuid with registration on: {:?}", - SubtensorModule::get_loaded_emission_tuples(netuid_on) - ); - assert!(SubtensorModule::get_loaded_emission_tuples(netuid_on).is_none()); - assert_eq!( - SubtensorModule::get_loaded_emission_tuples(netuid_off) - .unwrap() - .len(), - n as usize - ); - }); -} - -#[test] -fn test_epoch_runs_when_registration_disabled() { - new_test_ext(1).execute_with(|| { - let n: u16 = 100; - let netuid_off: u16 = 1; - let tempo: u16 = 1; - let netuids: Vec = vec![netuid_off]; - let emissions: Vec = vec![1000000000]; - - // Add subnets with registration turned off and on - add_network(netuid_off, tempo, 0); - SubtensorModule::set_max_allowed_uids(netuid_off, n); - SubtensorModule::set_emission_values(&netuids, emissions).unwrap(); - SubtensorModule::set_network_registration_allowed(netuid_off, false); - - // Populate the subnets with neurons - for i in 0..n { - SubtensorModule::append_neuron(netuid_off, &U256::from(i), 0); - } - - // Generate emission at block 1 - let block: u64 = 1; - SubtensorModule::generate_emission(block); - - step_block(1); // Now block 2 - - // Verify blocks since last step was set - assert_eq!(SubtensorModule::get_blocks_since_last_step(netuid_off), 1); - - // Step to the next epoch block - let epoch_block: u16 = tempo; - step_block(epoch_block); - - // Verify blocks since last step was set, this indicates we ran the epoch - assert_eq!( - SubtensorModule::get_blocks_since_last_step(netuid_off), - 0_u64 - ); - assert!(SubtensorModule::get_loaded_emission_tuples(netuid_off).is_some()); - }); -} diff --git a/pallets/subtensor/tests/children.rs b/pallets/subtensor/tests/children.rs new file mode 100644 index 000000000..2b99030ab --- /dev/null +++ b/pallets/subtensor/tests/children.rs @@ -0,0 +1,3239 @@ +#![allow(clippy::indexing_slicing)] +use crate::mock::*; +use frame_support::{assert_err, assert_noop, assert_ok}; +mod mock; +use pallet_subtensor::{utils::rate_limiting::TransactionType, *}; +use sp_core::U256; + +// 1: Successful setting of a single child +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_child_singular_success --exact --nocapture +#[test] +fn test_do_set_child_singular_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child)] + )); + + // Verify child assignment + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!(children, vec![(proportion, child)]); + }); +} + +// 2: Attempt to set child in non-existent network +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_child_singular_network_does_not_exist --exact --nocapture +#[test] +fn test_do_set_child_singular_network_does_not_exist() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = 999; // Non-existent network + let proportion: u64 = 1000; + + // Attempt to set child + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child)] + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +// 3: Attempt to set invalid child (same as hotkey) +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_child_singular_invalid_child --exact --nocapture +#[test] +fn test_do_set_child_singular_invalid_child() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Attempt to set child as the same hotkey + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![ + (proportion, hotkey) // Invalid child + ] + ), + Error::::InvalidChild + ); + }); +} + +// 4: Attempt to set child with non-associated coldkey +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_child_singular_non_associated_coldkey --exact --nocapture +#[test] +fn test_do_set_child_singular_non_associated_coldkey() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey with a different coldkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, U256::from(999), 0); + + // Attempt to set child + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child)] + ), + Error::::NonAssociatedColdKey + ); + }); +} + +// 5: Attempt to set child in root network +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_child_singular_root_network --exact --nocapture +#[test] +fn test_do_set_child_singular_root_network() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = SubtensorModule::get_root_netuid(); // Root network + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + + // Attempt to set child + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child)] + ), + Error::::RegistrationNotPermittedOnRootSubnet + ); + }); +} + +// 6: Cleanup of old children when setting new ones +// This test verifies that when new children are set, the old ones are properly removed. +// It checks: +// - Setting an initial child +// - Replacing it with a new child +// - Ensuring the old child is no longer associated +// - Confirming the new child is correctly assigned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_child_singular_old_children_cleanup --exact --nocapture +#[test] +fn test_do_set_child_singular_old_children_cleanup() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let old_child = U256::from(3); + let new_child = U256::from(4); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set old child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, old_child)] + )); + + // Set new child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, new_child)] + )); + + // Verify old child is removed + let old_child_parents = SubtensorModule::get_parents(&old_child, netuid); + assert!(old_child_parents.is_empty()); + + // Verify new child assignment + let new_child_parents = SubtensorModule::get_parents(&new_child, netuid); + assert_eq!(new_child_parents, vec![(proportion, hotkey)]); + }); +} + +// 7: Verify new children assignment +// This test checks if new children are correctly assigned to a parent. +// It verifies: +// - Setting a child for a parent +// - Confirming the child is correctly listed under the parent +// - Ensuring the parent is correctly listed for the child +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_child_singular_new_children_assignment --exact --nocapture +#[test] +fn test_do_set_child_singular_new_children_assignment() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child)] + )); + + // Verify child assignment + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!(children, vec![(proportion, child)]); + + // Verify parent assignment + let parents = SubtensorModule::get_parents(&child, netuid); + assert_eq!(parents, vec![(proportion, hotkey)]); + }); +} + +// 8: Test edge cases for proportion values +// This test verifies that the system correctly handles minimum and maximum proportion values. +// It checks: +// - Setting a child with the minimum possible proportion (0) +// - Setting a child with the maximum possible proportion (u64::MAX) +// - Confirming both assignments are processed correctly +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_child_singular_proportion_edge_cases --exact --nocapture +#[test] +fn test_do_set_child_singular_proportion_edge_cases() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = 1; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set child with minimum proportion + let min_proportion: u64 = 0; + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(min_proportion, child)] + )); + + // Verify child assignment with minimum proportion + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!(children, vec![(min_proportion, child)]); + + // Set child with maximum proportion + let max_proportion: u64 = u64::MAX; + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(max_proportion, child)] + )); + + // Verify child assignment with maximum proportion + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!(children, vec![(max_proportion, child)]); + }); +} + +// 9: Test setting multiple children +// This test verifies that when multiple children are set, only the last one remains. +// It checks: +// - Setting an initial child +// - Setting a second child +// - Confirming only the second child remains associated +// - Verifying the first child is no longer associated +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_child_singular_multiple_children --exact --nocapture +#[test] +fn test_do_set_child_singular_multiple_children() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let netuid: u16 = 1; + let proportion1: u64 = 500; + let proportion2: u64 = 500; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set first child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion1, child1)] + )); + + // Set second child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion2, child2)] + )); + + // Verify children assignment + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!(children, vec![(proportion2, child2)]); + + // Verify parent assignment for both children + let parents1 = SubtensorModule::get_parents(&child1, netuid); + assert!(parents1.is_empty()); // Old child should be removed + + let parents2 = SubtensorModule::get_parents(&child2, netuid); + assert_eq!(parents2, vec![(proportion2, hotkey)]); + }); +} + +// 10: Test adding a singular child with various error conditions +// This test checks different scenarios when adding a child, including: +// - Attempting to set a child in a non-existent network +// - Trying to set a child with an unassociated coldkey +// - Setting an invalid child +// - Successfully setting a valid child +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_add_singular_child --exact --nocapture +#[test] +fn test_add_singular_child() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let child = U256::from(1); + let hotkey = U256::from(1); + let coldkey = U256::from(2); + assert_eq!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(u64::MAX, child)] + ), + Err(Error::::SubNetworkDoesNotExist.into()) + ); + add_network(netuid, 0, 0); + assert_eq!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(u64::MAX, child)] + ), + Err(Error::::NonAssociatedColdKey.into()) + ); + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + assert_eq!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(u64::MAX, child)] + ), + Err(Error::::InvalidChild.into()) + ); + let child = U256::from(3); + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(u64::MAX, child)] + )); + }) +} + +// 11: Test getting stake for a hotkey on a subnet +// This test verifies the correct calculation of stake for a parent and child neuron: +// - Sets up a network with a parent and child neuron +// - Stakes tokens to both parent and child from different coldkeys +// - Establishes a parent-child relationship with 100% stake allocation +// - Checks that the parent's stake is correctly transferred to the child +// - Ensures the total stake is preserved in the system +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_get_stake_for_hotkey_on_subnet --exact --nocapture +#[test] +fn test_get_stake_for_hotkey_on_subnet() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let parent = U256::from(1); + let child = U256::from(2); + let coldkey1 = U256::from(3); + let coldkey2 = U256::from(4); + + add_network(netuid, 0, 0); + register_ok_neuron(netuid, parent, coldkey1, 0); + register_ok_neuron(netuid, child, coldkey2, 0); + + // Stake 1000 to parent from coldkey1 + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey1, &parent, 1000); + // Stake 1000 to parent from coldkey2 + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey2, &parent, 1000); + // Stake 1000 to child from coldkey1 + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey1, &child, 1000); + // Stake 1000 to child from coldkey2 + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey2, &child, 1000); + + // Set parent-child relationship with 100% stake allocation + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey1), + parent, + netuid, + vec![(u64::MAX, child)] + )); + + let parent_stake = SubtensorModule::get_stake_for_hotkey_on_subnet(&parent, netuid); + let child_stake = SubtensorModule::get_stake_for_hotkey_on_subnet(&child, netuid); + + log::info!("Parent stake: {}", parent_stake); + log::info!("Child stake: {}", child_stake); + + // The parent should have 0 stake as it's all allocated to the child + assert_eq!(parent_stake, 0); + // The child should have its original stake (2000) plus the parent's stake (2000) + assert_eq!(child_stake, 4000); + + // Ensure total stake is preserved + assert_eq!(parent_stake + child_stake, 4000); + }); +} + +// 12: Test revoking a singular child successfully +// This test checks the process of revoking a child neuron: +// - Sets up a network with a parent and child neuron +// - Establishes a parent-child relationship +// - Revokes the child relationship +// - Verifies that the child is removed from the parent's children list +// - Ensures the parent is removed from the child's parents list +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_child_singular_success --exact --nocapture +#[test] +fn test_do_revoke_child_singular_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child)] + )); + + // Verify child assignment + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!(children, vec![(proportion, child)]); + + // Revoke child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![] + )); + + // Verify child removal + let children = SubtensorModule::get_children(&hotkey, netuid); + assert!(children.is_empty()); + + // Verify parent removal + let parents = SubtensorModule::get_parents(&child, netuid); + assert!(parents.is_empty()); + }); +} + +// 13: Test revoking a child in a non-existent network +// This test verifies that attempting to revoke a child in a non-existent network results in an error: +// - Attempts to revoke a child in a network that doesn't exist +// - Checks that the appropriate error is returned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_child_singular_network_does_not_exist --exact --nocapture +#[test] +fn test_do_revoke_child_singular_network_does_not_exist() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 999; // Non-existent network + + // Attempt to revoke child + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![] + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +// 14: Test revoking a child with a non-associated coldkey +// This test ensures that attempting to revoke a child using an unassociated coldkey results in an error: +// - Sets up a network with a hotkey registered to a different coldkey +// - Attempts to revoke a child using an unassociated coldkey +// - Verifies that the appropriate error is returned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_child_singular_non_associated_coldkey --exact --nocapture +#[test] +fn test_do_revoke_child_singular_non_associated_coldkey() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + + // Add network and register hotkey with a different coldkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, U256::from(999), 0); + + // Attempt to revoke child + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![] + ), + Error::::NonAssociatedColdKey + ); + }); +} + +// 15: Test revoking a non-associated child +// This test verifies that attempting to revoke a child that is not associated with the parent results in an error: +// - Sets up a network and registers a hotkey +// - Attempts to revoke a child that was never associated with the parent +// - Checks that the appropriate error is returned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_child_singular_child_not_associated --exact --nocapture +#[test] +fn test_do_revoke_child_singular_child_not_associated() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = 1; + + // Add network and register hotkey + add_network(netuid, 13, 0); + // Attempt to revoke child that is not associated + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(u64::MAX, child)] + ), + Error::::NonAssociatedColdKey + ); + }); +} + +// 16: Test setting multiple children successfully +// This test verifies that multiple children can be set for a parent successfully: +// - Sets up a network and registers a hotkey +// - Sets multiple children with different proportions +// - Verifies that the children are correctly assigned to the parent +// - Checks that the parent is correctly assigned to each child +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_children_multiple_success --exact --nocapture +#[test] +fn test_do_set_children_multiple_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let netuid: u16 = 1; + let proportion1: u64 = 1000; + let proportion2: u64 = 2000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set multiple children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion1, child1), (proportion2, child2)] + )); + + // Verify children assignment + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!(children, vec![(proportion1, child1), (proportion2, child2)]); + + // Verify parent assignment for both children + let parents1 = SubtensorModule::get_parents(&child1, netuid); + assert_eq!(parents1, vec![(proportion1, hotkey)]); + + let parents2 = SubtensorModule::get_parents(&child2, netuid); + assert_eq!(parents2, vec![(proportion2, hotkey)]); + }); +} + +// 17: Test setting multiple children in a non-existent network +// This test ensures that attempting to set multiple children in a non-existent network results in an error: +// - Attempts to set children in a network that doesn't exist +// - Verifies that the appropriate error is returned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_children_multiple_network_does_not_exist --exact --nocapture +#[test] +fn test_do_set_children_multiple_network_does_not_exist() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let netuid: u16 = 999; // Non-existent network + let proportion: u64 = 1000; + + // Attempt to set children + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child1)] + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +// 18: Test setting multiple children with an invalid child +// This test verifies that attempting to set multiple children with an invalid child (same as parent) results in an error: +// - Sets up a network and registers a hotkey +// - Attempts to set a child that is the same as the parent hotkey +// - Checks that the appropriate error is returned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_children_multiple_invalid_child --exact --nocapture +#[test] +fn test_do_set_children_multiple_invalid_child() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Attempt to set child as the same hotkey + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, hotkey)] + ), + Error::::InvalidChild + ); + }); +} + +// 19: Test setting multiple children with a non-associated coldkey +// This test ensures that attempting to set multiple children using an unassociated coldkey results in an error: +// - Sets up a network with a hotkey registered to a different coldkey +// - Attempts to set children using an unassociated coldkey +// - Verifies that the appropriate error is returned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_children_multiple_non_associated_coldkey --exact --nocapture +#[test] +fn test_do_set_children_multiple_non_associated_coldkey() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey with a different coldkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, U256::from(999), 0); + + // Attempt to set children + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child)] + ), + Error::::NonAssociatedColdKey + ); + }); +} + +// 20: Test setting multiple children in root network +// This test verifies that attempting to set children in the root network results in an error: +// - Sets up the root network +// - Attempts to set children in the root network +// - Checks that the appropriate error is returned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_children_multiple_root_network --exact --nocapture +#[test] +fn test_do_set_children_multiple_root_network() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child = U256::from(3); + let netuid: u16 = SubtensorModule::get_root_netuid(); // Root network + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + + // Attempt to set children + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child)] + ), + Error::::RegistrationNotPermittedOnRootSubnet + ); + }); +} + +// 21: Test cleanup of old children when setting multiple new ones +// This test ensures that when new children are set, the old ones are properly removed: +// - Sets up a network and registers a hotkey +// - Sets an initial child +// - Replaces it with multiple new children +// - Verifies that the old child is no longer associated +// - Confirms the new children are correctly assigned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_children_multiple_old_children_cleanup --exact --nocapture +#[test] +fn test_do_set_children_multiple_old_children_cleanup() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let old_child = U256::from(3); + let new_child1 = U256::from(4); + let new_child2 = U256::from(5); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set old child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, old_child)] + )); + + // Set new children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, new_child1), (proportion, new_child2)] + )); + + // Verify old child is removed + let old_child_parents = SubtensorModule::get_parents(&old_child, netuid); + assert!(old_child_parents.is_empty()); + + // Verify new children assignment + let new_child1_parents = SubtensorModule::get_parents(&new_child1, netuid); + assert_eq!(new_child1_parents, vec![(proportion, hotkey)]); + + let new_child2_parents = SubtensorModule::get_parents(&new_child2, netuid); + assert_eq!(new_child2_parents, vec![(proportion, hotkey)]); + }); +} + +// 22: Test setting multiple children with edge case proportions +// This test verifies the behavior when setting multiple children with minimum and maximum proportions: +// - Sets up a network and registers a hotkey +// - Sets two children with minimum and maximum proportions respectively +// - Verifies that the children are correctly assigned with their respective proportions +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_children_multiple_proportion_edge_cases --exact --nocapture +#[test] +fn test_do_set_children_multiple_proportion_edge_cases() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let netuid: u16 = 1; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set children with minimum and maximum proportions + let min_proportion: u64 = 0; + let max_proportion: u64 = u64::MAX; + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(min_proportion, child1), (max_proportion, child2)] + )); + + // Verify children assignment + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!( + children, + vec![(min_proportion, child1), (max_proportion, child2)] + ); + }); +} + +// 23: Test overwriting existing children with new ones +// This test ensures that when new children are set, they correctly overwrite the existing ones: +// - Sets up a network and registers a hotkey +// - Sets initial children +// - Overwrites with new children +// - Verifies that the final children assignment is correct +// - Checks that old children are properly removed and new ones are correctly assigned +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_children_multiple_overwrite_existing --exact --nocapture +#[test] +fn test_do_set_children_multiple_overwrite_existing() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let child3 = U256::from(5); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set initial children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child1), (proportion, child2)] + )); + + // Overwrite with new children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion * 2, child2), (proportion * 3, child3)] + )); + + // Verify final children assignment + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!( + children, + vec![(proportion * 2, child2), (proportion * 3, child3)] + ); + + // Verify parent assignment for all children + let parents1 = SubtensorModule::get_parents(&child1, netuid); + assert!(parents1.is_empty()); + + let parents2 = SubtensorModule::get_parents(&child2, netuid); + assert_eq!(parents2, vec![(proportion * 2, hotkey)]); + + let parents3 = SubtensorModule::get_parents(&child3, netuid); + assert_eq!(parents3, vec![(proportion * 3, hotkey)]); + }); +} + +// 24: Test childkey take functionality +// This test verifies the functionality of setting and getting childkey take: +// - Sets up a network and registers a hotkey +// - Checks default and maximum childkey take values +// - Sets a new childkey take value +// - Verifies the new take value is stored correctly +// - Attempts to set an invalid take value and checks for appropriate error +// - Tries to set take with a non-associated coldkey and verifies the error +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_childkey_take_functionality --exact --nocapture +#[test] +fn test_childkey_take_functionality() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Test default and max childkey take + let default_take = SubtensorModule::get_default_childkey_take(); + let min_take = SubtensorModule::get_min_childkey_take(); + log::info!("Default take: {}, Max take: {}", default_take, min_take); + + // Check if default take and max take are the same + assert_eq!( + default_take, min_take, + "Default take should be equal to max take" + ); + + // Log the actual value of MaxChildkeyTake + log::info!( + "MaxChildkeyTake value: {:?}", + MaxChildkeyTake::::get() + ); + + // Test setting childkey take + let new_take: u16 = SubtensorModule::get_max_childkey_take() / 2; // 50% of max_take + assert_ok!(SubtensorModule::set_childkey_take( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + new_take + )); + + // Verify childkey take was set correctly + let stored_take = SubtensorModule::get_childkey_take(&hotkey, netuid); + log::info!("Stored take: {}", stored_take); + assert_eq!(stored_take, new_take); + + // Test setting childkey take outside of allowed range + let invalid_take: u16 = SubtensorModule::get_max_childkey_take() + 1; + assert_noop!( + SubtensorModule::set_childkey_take( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + invalid_take + ), + Error::::InvalidChildkeyTake + ); + + // Test setting childkey take with non-associated coldkey + let non_associated_coldkey = U256::from(999); + assert_noop!( + SubtensorModule::set_childkey_take( + RuntimeOrigin::signed(non_associated_coldkey), + hotkey, + netuid, + new_take + ), + Error::::NonAssociatedColdKey + ); + }); +} + +// 25: Test childkey take rate limiting +// This test verifies the rate limiting functionality for setting childkey take: +// - Sets up a network and registers a hotkey +// - Sets a rate limit for childkey take changes +// - Performs multiple attempts to set childkey take +// - Verifies that rate limiting prevents frequent changes +// - Advances blocks to bypass rate limit and confirms successful change +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_childkey_take_rate_limiting --exact --nocapture +#[test] +fn test_childkey_take_rate_limiting() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set a rate limit for childkey take changes + let rate_limit: u64 = 100; + SubtensorModule::set_tx_childkey_take_rate_limit(rate_limit); + + log::info!( + "Set TxChildkeyTakeRateLimit: {:?}", + TxChildkeyTakeRateLimit::::get() + ); + + // Helper function to log rate limit information + let log_rate_limit_info = || { + let current_block = SubtensorModule::get_current_block_as_u64(); + let last_block = SubtensorModule::get_last_transaction_block( + &hotkey, + netuid, + &TransactionType::SetChildkeyTake, + ); + let passes = SubtensorModule::passes_rate_limit_on_subnet( + &TransactionType::SetChildkeyTake, + &hotkey, + netuid, + ); + let limit = SubtensorModule::get_rate_limit(&TransactionType::SetChildkeyTake); + log::info!( + "Rate limit info: current_block: {}, last_block: {}, limit: {}, passes: {}, diff: {}", + current_block, + last_block, + limit, + passes, + current_block.saturating_sub(last_block) + ); + }; + + // First transaction (should succeed) + log_rate_limit_info(); + assert_ok!(SubtensorModule::set_childkey_take( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + 500 + )); + log_rate_limit_info(); + + // Second transaction (should fail due to rate limit) + log_rate_limit_info(); + assert_noop!( + SubtensorModule::set_childkey_take(RuntimeOrigin::signed(coldkey), hotkey, netuid, 600), + Error::::TxChildkeyTakeRateLimitExceeded + ); + log_rate_limit_info(); + + // Advance the block number to just before the rate limit + run_to_block(rate_limit - 1); + + // Third transaction (should still fail) + log_rate_limit_info(); + assert_noop!( + SubtensorModule::set_childkey_take(RuntimeOrigin::signed(coldkey), hotkey, netuid, 650), + Error::::TxChildkeyTakeRateLimitExceeded + ); + log_rate_limit_info(); + + // Advance the block number to just after the rate limit + run_to_block(rate_limit + 1); + + // Fourth transaction (should succeed) + log_rate_limit_info(); + assert_ok!(SubtensorModule::set_childkey_take( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + 700 + )); + log_rate_limit_info(); + + // Verify the final take was set + let stored_take = SubtensorModule::get_childkey_take(&hotkey, netuid); + assert_eq!(stored_take, 700); + }); +} + +// 26: Test childkey take functionality across multiple networks +// This test verifies the childkey take functionality across multiple networks: +// - Creates multiple networks and sets up neurons +// - Sets unique childkey take values for each network +// - Verifies that each network has a different childkey take value +// - Attempts to set childkey take again (should fail due to rate limit) +// - Advances blocks to bypass rate limit and successfully updates take value +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_multiple_networks_childkey_take --exact --nocapture +#[test] +fn test_multiple_networks_childkey_take() { + new_test_ext(1).execute_with(|| { + const NUM_NETWORKS: u16 = 10; + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + // Create 10 networks and set up neurons (skip network 0) + for netuid in 1..NUM_NETWORKS { + // Add network + add_network(netuid, 13, 0); + + // Register neuron + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set a unique childkey take value for each network + let take_value = (netuid + 1) * 100; // Values will be 200, 300, ..., 1000 + assert_ok!(SubtensorModule::set_childkey_take( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + take_value + )); + + // Verify the childkey take was set correctly + let stored_take = SubtensorModule::get_childkey_take(&hotkey, netuid); + assert_eq!( + stored_take, take_value, + "Childkey take not set correctly for network {}", + netuid + ); + + // Log the set value + log::info!("Network {}: Childkey take set to {}", netuid, take_value); + } + + // Verify all networks have different childkey take values + for i in 1..NUM_NETWORKS { + for j in (i + 1)..NUM_NETWORKS { + let take_i = SubtensorModule::get_childkey_take(&hotkey, i); + let take_j = SubtensorModule::get_childkey_take(&hotkey, j); + assert_ne!( + take_i, take_j, + "Childkey take values should be different for networks {} and {}", + i, j + ); + } + } + + // Attempt to set childkey take again (should fail due to rate limit) + let result = + SubtensorModule::set_childkey_take(RuntimeOrigin::signed(coldkey), hotkey, 1, 1100); + assert_noop!(result, Error::::TxChildkeyTakeRateLimitExceeded); + + // Advance blocks to bypass rate limit + run_to_block(SubtensorModule::get_tx_childkey_take_rate_limit() + 1); + + // Now setting childkey take should succeed + assert_ok!(SubtensorModule::set_childkey_take( + RuntimeOrigin::signed(coldkey), + hotkey, + 1, + 1100 + )); + + // Verify the new take value + let new_take = SubtensorModule::get_childkey_take(&hotkey, 1); + assert_eq!(new_take, 1100, "Childkey take not updated after rate limit"); + }); +} + +// 27: Test setting children with an empty list +// This test verifies the behavior of setting an empty children list: +// - Adds a network and registers a hotkey +// - Sets an empty children list for the hotkey +// - Verifies that the children assignment is empty +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_set_children_multiple_empty_list --exact --nocapture +#[test] +fn test_do_set_children_multiple_empty_list() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set empty children list + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![] + )); + + // Verify children assignment is empty + let children = SubtensorModule::get_children(&hotkey, netuid); + assert!(children.is_empty()); + }); +} + +// 28: Test revoking multiple children successfully +// This test verifies the successful revocation of multiple children: +// - Adds a network and registers a hotkey +// - Sets multiple children for the hotkey +// - Revokes all children by setting an empty list +// - Verifies that the children list is empty +// - Verifies that the parent-child relationships are removed for both children +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_children_multiple_success --exact --nocapture +#[test] +fn test_do_revoke_children_multiple_success() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let netuid: u16 = 1; + let proportion1: u64 = 1000; + let proportion2: u64 = 2000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set multiple children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion1, child1), (proportion2, child2)] + )); + + // Revoke multiple children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![] + )); + + // Verify children removal + let children = SubtensorModule::get_children(&hotkey, netuid); + assert!(children.is_empty()); + + // Verify parent removal for both children + let parents1 = SubtensorModule::get_parents(&child1, netuid); + assert!(parents1.is_empty()); + + let parents2 = SubtensorModule::get_parents(&child2, netuid); + assert!(parents2.is_empty()); + }); +} + +// 29: Test revoking children when network does not exist +// This test verifies the behavior when attempting to revoke children on a non-existent network: +// - Attempts to revoke children on a network that doesn't exist +// - Verifies that the operation fails with the correct error +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_children_multiple_network_does_not_exist --exact --nocapture +#[test] +fn test_do_revoke_children_multiple_network_does_not_exist() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let netuid: u16 = 999; // Non-existent network + // Attempt to revoke children + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(u64::MAX / 2, child1), (u64::MAX / 2, child2)] + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +// 30: Test revoking children with non-associated coldkey +// This test verifies the behavior when attempting to revoke children using a non-associated coldkey: +// - Adds a network and registers a hotkey with a different coldkey +// - Attempts to revoke children using an unassociated coldkey +// - Verifies that the operation fails with the correct error +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_children_multiple_non_associated_coldkey --exact --nocapture +#[test] +fn test_do_revoke_children_multiple_non_associated_coldkey() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let netuid: u16 = 1; + + // Add network and register hotkey with a different coldkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, U256::from(999), 0); + + // Attempt to revoke children + assert_err!( + SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(u64::MAX / 2, child1), (u64::MAX / 2, child2)] + ), + Error::::NonAssociatedColdKey + ); + }); +} + +// 31: Test partial revocation of children +// This test verifies the behavior when partially revoking children: +// - Adds a network and registers a hotkey +// - Sets multiple children for the hotkey +// - Revokes one of the children +// - Verifies that the correct children remain and the revoked child is removed +// - Checks the parent-child relationships after partial revocation +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_children_multiple_partial_revocation --exact --nocapture +#[test] +fn test_do_revoke_children_multiple_partial_revocation() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let child3 = U256::from(5); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set multiple children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![ + (proportion, child1), + (proportion, child2), + (proportion, child3) + ] + )); + + // Revoke only child3 + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child1), (proportion, child2)] + )); + + // Verify children removal + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!(children, vec![(proportion, child1), (proportion, child2)]); + + // Verify parents. + let parents1 = SubtensorModule::get_parents(&child3, netuid); + assert!(parents1.is_empty()); + let parents1 = SubtensorModule::get_parents(&child1, netuid); + assert_eq!(parents1, vec![(proportion, hotkey)]); + let parents2 = SubtensorModule::get_parents(&child2, netuid); + assert_eq!(parents2, vec![(proportion, hotkey)]); + }); +} + +// 32: Test revoking non-existent children +// This test verifies the behavior when attempting to revoke non-existent children: +// - Adds a network and registers a hotkey +// - Sets one child for the hotkey +// - Attempts to revoke all children (including non-existent ones) +// - Verifies that all children are removed, including the existing one +// - Checks that the parent-child relationship is properly updated +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_children_multiple_non_existent_children --exact --nocapture +#[test] +fn test_do_revoke_children_multiple_non_existent_children() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let netuid: u16 = 1; + let proportion: u64 = 1000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set one child + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion, child1)] + )); + + // Attempt to revoke existing and non-existent children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![] + )); + + // Verify all children are removed + let children = SubtensorModule::get_children(&hotkey, netuid); + assert!(children.is_empty()); + + // Verify parent removal for the existing child + let parents1 = SubtensorModule::get_parents(&child1, netuid); + assert!(parents1.is_empty()); + }); +} + +// 33: Test revoking children with an empty list +// This test verifies the behavior when attempting to revoke children using an empty list: +// - Adds a network and registers a hotkey +// - Attempts to revoke children with an empty list +// - Verifies that no changes occur in the children list +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_children_multiple_empty_list --exact --nocapture +#[test] +fn test_do_revoke_children_multiple_empty_list() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid: u16 = 1; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Attempt to revoke with an empty list + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![] + )); + + // Verify no changes in children + let children = SubtensorModule::get_children(&hotkey, netuid); + assert!(children.is_empty()); + }); +} + +// 34: Test complex scenario for revoking multiple children +// This test verifies a complex scenario involving setting and revoking multiple children: +// - Adds a network and registers a hotkey +// - Sets multiple children with different proportions +// - Revokes one child and verifies the remaining children +// - Revokes all remaining children +// - Verifies that all parent-child relationships are properly updated +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_do_revoke_children_multiple_complex_scenario --exact --nocapture +#[test] +fn test_do_revoke_children_multiple_complex_scenario() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let child3 = U256::from(5); + let netuid: u16 = 1; + let proportion1: u64 = 1000; + let proportion2: u64 = 2000; + let proportion3: u64 = 3000; + + // Add network and register hotkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set multiple children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![ + (proportion1, child1), + (proportion2, child2), + (proportion3, child3) + ] + )); + + // Revoke child2 + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![(proportion1, child1), (proportion3, child3)] + )); + + // Verify remaining children + let children = SubtensorModule::get_children(&hotkey, netuid); + assert_eq!(children, vec![(proportion1, child1), (proportion3, child3)]); + + // Verify parent removal for child2 + let parents2 = SubtensorModule::get_parents(&child2, netuid); + assert!(parents2.is_empty()); + + // Revoke remaining children + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![] + )); + + // Verify all children are removed + let children = SubtensorModule::get_children(&hotkey, netuid); + assert!(children.is_empty()); + + // Verify parent removal for all children + let parents1 = SubtensorModule::get_parents(&child1, netuid); + assert!(parents1.is_empty()); + let parents3 = SubtensorModule::get_parents(&child3, netuid); + assert!(parents3.is_empty()); + }); +} + +// 35: Test getting network max stake +// This test verifies the functionality of getting the network max stake: +// - Checks the default max stake value +// - Sets a new max stake value +// - Verifies that the new value is retrieved correctly +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_get_network_max_stake --exact --nocapture +#[test] +fn test_get_network_max_stake() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let default_max_stake = SubtensorModule::get_network_max_stake(netuid); + + // Check that the default value is set correctly + assert_eq!(default_max_stake, u64::MAX); + + // Set a new max stake value + let new_max_stake: u64 = 1_000_000; + SubtensorModule::set_network_max_stake(netuid, new_max_stake); + + // Check that the new value is retrieved correctly + assert_eq!( + SubtensorModule::get_network_max_stake(netuid), + new_max_stake + ); + }); +} + +// 36: Test setting network max stake +// This test verifies the functionality of setting the network max stake: +// - Checks the initial max stake value +// - Sets a new max stake value +// - Verifies that the new value is set correctly +// - Checks that the appropriate event is emitted +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_set_network_max_stake --exact --nocapture +#[test] +fn test_set_network_max_stake() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let initial_max_stake = SubtensorModule::get_network_max_stake(netuid); + + // Set a new max stake value + let new_max_stake: u64 = 500_000; + SubtensorModule::set_network_max_stake(netuid, new_max_stake); + + // Check that the new value is set correctly + assert_eq!( + SubtensorModule::get_network_max_stake(netuid), + new_max_stake + ); + assert_ne!( + SubtensorModule::get_network_max_stake(netuid), + initial_max_stake + ); + + // Check that the event is emitted + System::assert_last_event(Event::NetworkMaxStakeSet(netuid, new_max_stake).into()); + }); +} + +// 37: Test setting network max stake for multiple networks +// This test verifies the functionality of setting different max stake values for multiple networks: +// - Sets different max stake values for two networks +// - Verifies that the values are set correctly for each network +// - Checks that the values are different between networks +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_set_network_max_stake_multiple_networks --exact --nocapture +#[test] +fn test_set_network_max_stake_multiple_networks() { + new_test_ext(1).execute_with(|| { + let netuid1: u16 = 1; + let netuid2: u16 = 2; + + // Set different max stake values for two networks + let max_stake1: u64 = 1_000_000; + let max_stake2: u64 = 2_000_000; + SubtensorModule::set_network_max_stake(netuid1, max_stake1); + SubtensorModule::set_network_max_stake(netuid2, max_stake2); + + // Check that the values are set correctly for each network + assert_eq!(SubtensorModule::get_network_max_stake(netuid1), max_stake1); + assert_eq!(SubtensorModule::get_network_max_stake(netuid2), max_stake2); + assert_ne!( + SubtensorModule::get_network_max_stake(netuid1), + SubtensorModule::get_network_max_stake(netuid2) + ); + }); +} + +// 38: Test updating network max stake +// This test verifies the functionality of updating an existing network max stake value: +// - Sets an initial max stake value +// - Updates the max stake value +// - Verifies that the value is updated correctly +// - Checks that the appropriate event is emitted for the update +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_set_network_max_stake_update --exact --nocapture +#[test] +fn test_set_network_max_stake_update() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + + // Set an initial max stake value + let initial_max_stake: u64 = 1_000_000; + SubtensorModule::set_network_max_stake(netuid, initial_max_stake); + + // Update the max stake value + let updated_max_stake: u64 = 1_500_000; + SubtensorModule::set_network_max_stake(netuid, updated_max_stake); + + // Check that the value is updated correctly + assert_eq!( + SubtensorModule::get_network_max_stake(netuid), + updated_max_stake + ); + assert_ne!( + SubtensorModule::get_network_max_stake(netuid), + initial_max_stake + ); + + // Check that the event is emitted for the update + System::assert_last_event(Event::NetworkMaxStakeSet(netuid, updated_max_stake).into()); + }); +} + +// 39: Test children stake values +// This test verifies the correct distribution of stake among parent and child neurons: +// - Sets up a network with a parent neuron and multiple child neurons +// - Assigns stake to the parent neuron +// - Sets child neurons with specific proportions +// - Verifies that the stake is correctly distributed among parent and child neurons +// - Checks that the total stake remains constant across all neurons +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_children_stake_values --exact --nocapture +#[test] +fn test_children_stake_values() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let child1 = U256::from(3); + let child2 = U256::from(4); + let child3 = U256::from(5); + let netuid: u16 = 1; + let proportion1: u64 = u64::MAX / 4; + let proportion2: u64 = u64::MAX / 4; + let proportion3: u64 = u64::MAX / 4; + + // Add network and register hotkey + add_network(netuid, 13, 0); + SubtensorModule::set_max_registrations_per_block(netuid, 4); + SubtensorModule::set_target_registrations_per_interval(netuid, 4); + register_ok_neuron(netuid, hotkey, coldkey, 0); + register_ok_neuron(netuid, child1, coldkey, 0); + register_ok_neuron(netuid, child2, coldkey, 0); + register_ok_neuron(netuid, child3, coldkey, 0); + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &coldkey, + &hotkey, + 100_000_000_000_000, + ); + + // Set multiple children with proportions. + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + vec![ + (proportion1, child1), + (proportion2, child2), + (proportion3, child3) + ] + )); + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey, netuid), + 25_000_000_069_852 + ); + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&child1, netuid), + 24_999_999_976_716 + ); + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&child2, netuid), + 24_999_999_976_716 + ); + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&child3, netuid), + 24_999_999_976_716 + ); + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&child3, netuid) + + SubtensorModule::get_stake_for_hotkey_on_subnet(&child2, netuid) + + SubtensorModule::get_stake_for_hotkey_on_subnet(&child1, netuid) + + SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey, netuid), + 100_000_000_000_000 + ); + }); +} + +// 40: Test getting parents chain +// This test verifies the correct implementation of parent-child relationships and the get_parents function: +// - Sets up a network with multiple neurons in a chain of parent-child relationships +// - Verifies that each neuron has the correct parent +// - Tests the root neuron has no parents +// - Tests a neuron with multiple parents +// - Verifies correct behavior when adding a new parent to an existing child +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test children -- test_get_parents_chain --exact --nocapture +#[test] +fn test_get_parents_chain() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let coldkey = U256::from(1); + let num_keys: usize = 5; + let proportion = u64::MAX / 2; // 50% stake allocation + + log::info!( + "Test setup: netuid={}, coldkey={}, num_keys={}, proportion={}", + netuid, + coldkey, + num_keys, + proportion + ); + + // Create a vector of hotkeys + let hotkeys: Vec = (0..num_keys).map(|i| U256::from(i as u64 + 2)).collect(); + log::info!("Created hotkeys: {:?}", hotkeys); + + // Add network + add_network(netuid, 13, 0); + SubtensorModule::set_max_registrations_per_block(netuid, 1000); + SubtensorModule::set_target_registrations_per_interval(netuid, 1000); + log::info!("Network added and parameters set: netuid={}", netuid); + + // Register all neurons + for hotkey in &hotkeys { + register_ok_neuron(netuid, *hotkey, coldkey, 0); + log::info!( + "Registered neuron: hotkey={}, coldkey={}, netuid={}", + hotkey, + coldkey, + netuid + ); + } + + // Set up parent-child relationships + for i in 0..num_keys - 1 { + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + hotkeys[i], + netuid, + vec![(proportion, hotkeys[i + 1])] + )); + log::info!( + "Set parent-child relationship: parent={}, child={}, proportion={}", + hotkeys[i], + hotkeys[i + 1], + proportion + ); + } + + // Test get_parents for each hotkey + for i in 1..num_keys { + let parents = SubtensorModule::get_parents(&hotkeys[i], netuid); + log::info!( + "Testing get_parents for hotkey {}: {:?}", + hotkeys[i], + parents + ); + assert_eq!( + parents.len(), + 1, + "Hotkey {} should have exactly one parent", + i + ); + assert_eq!( + parents[0], + (proportion, hotkeys[i - 1]), + "Incorrect parent for hotkey {}", + i + ); + } + + // Test get_parents for the root (should be empty) + let root_parents = SubtensorModule::get_parents(&hotkeys[0], netuid); + log::info!( + "Testing get_parents for root hotkey {}: {:?}", + hotkeys[0], + root_parents + ); + assert!( + root_parents.is_empty(), + "Root hotkey should have no parents" + ); + + // Test multiple parents + let last_hotkey = hotkeys[num_keys - 1]; + let new_parent = U256::from(num_keys as u64 + 2); + register_ok_neuron(netuid, new_parent, coldkey, 0); + log::info!( + "Registered new parent neuron: new_parent={}, coldkey={}, netuid={}", + new_parent, + coldkey, + netuid + ); + + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + new_parent, + netuid, + vec![(proportion / 2, last_hotkey)] + )); + log::info!( + "Set additional parent-child relationship: parent={}, child={}, proportion={}", + new_parent, + last_hotkey, + proportion / 2 + ); + + let last_hotkey_parents = SubtensorModule::get_parents(&last_hotkey, netuid); + log::info!( + "Testing get_parents for last hotkey {} with multiple parents: {:?}", + last_hotkey, + last_hotkey_parents + ); + assert_eq!( + last_hotkey_parents.len(), + 2, + "Last hotkey should have two parents" + ); + assert!( + last_hotkey_parents.contains(&(proportion, hotkeys[num_keys - 2])), + "Last hotkey should still have its original parent" + ); + assert!( + last_hotkey_parents.contains(&(proportion / 2, new_parent)), + "Last hotkey should have the new parent" + ); + }); +} + +// 41: Test emission distribution between a childkey and a single parent +// This test verifies the correct distribution of emissions between a child and a single parent: +// - Sets up a network with a parent, child, and weight setter +// - Establishes a parent-child relationship +// - Sets weights on the child +// - Runs an epoch with a hardcoded emission value +// - Checks the emission distribution among parent, child, and weight setter +// - Verifies that all parties received emissions and the weight setter received the most +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children test_childkey_single_parent_emission -- --nocapture +#[test] +fn test_childkey_single_parent_emission() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + add_network(netuid, 1, 0); + + // Define hotkeys + let parent: U256 = U256::from(1); + let child: U256 = U256::from(2); + let weight_setter: U256 = U256::from(3); + + // Define coldkeys with more readable names + let coldkey_parent: U256 = U256::from(100); + let coldkey_child: U256 = U256::from(101); + let coldkey_weight_setter: U256 = U256::from(102); + + // Register parent with minimal stake and child with high stake + SubtensorModule::add_balance_to_coldkey_account(&coldkey_parent, 1); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_child, 109_999); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_weight_setter, 1_000_000); + + // Add neurons for parent, child and weight_setter + register_ok_neuron(netuid, parent, coldkey_parent, 1); + register_ok_neuron(netuid, child, coldkey_child, 1); + register_ok_neuron(netuid, weight_setter, coldkey_weight_setter, 1); + + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &coldkey_parent, + &parent, + 109_999, + ); + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &coldkey_weight_setter, + &weight_setter, + 1_000_000, + ); + + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + // Set parent-child relationship + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey_parent), + parent, + netuid, + vec![(u64::MAX, child)] + )); + step_block(7200 + 1); + // Set weights on the child using the weight_setter account + let origin = RuntimeOrigin::signed(weight_setter); + let uids: Vec = vec![1]; // Only set weight for the child (UID 1) + let values: Vec = vec![u16::MAX]; // Use maximum value for u16 + let version_key = SubtensorModule::get_weights_version_key(netuid); + assert_ok!(SubtensorModule::set_weights( + origin, + netuid, + uids, + values, + version_key + )); + + // Run epoch with a hardcoded emission value + let hardcoded_emission: u64 = 1_000_000_000; // 1 TAO + let hotkey_emission: Vec<(U256, u64, u64)> = + SubtensorModule::epoch(netuid, hardcoded_emission); + + // Process the hotkey emission results + for (hotkey, mining_emission, validator_emission) in hotkey_emission { + SubtensorModule::accumulate_hotkey_emission( + &hotkey, + netuid, + validator_emission, + mining_emission, + ); + log::debug!( + "Accumulated emissions on hotkey {:?} for netuid {:?}: mining {:?}, validator {:?}", + hotkey, + netuid, + mining_emission, + validator_emission + ); + } + step_block(7200 + 1); + // Check emission distribution + let parent_stake: u64 = + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey_parent, &parent); + let parent_stake_on_subnet: u64 = + SubtensorModule::get_stake_for_hotkey_on_subnet(&parent, netuid); + + log::debug!( + "Parent stake: {:?}, Parent stake on subnet: {:?}", + parent_stake, + parent_stake_on_subnet + ); + + let child_stake: u64 = + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey_child, &child); + let child_stake_on_subnet: u64 = + SubtensorModule::get_stake_for_hotkey_on_subnet(&child, netuid); + + log::debug!( + "Child stake: {:?}, Child stake on subnet: {:?}", + child_stake, + child_stake_on_subnet + ); + + let weight_setter_stake: u64 = SubtensorModule::get_stake_for_coldkey_and_hotkey( + &coldkey_weight_setter, + &weight_setter, + ); + let weight_setter_stake_on_subnet: u64 = + SubtensorModule::get_stake_for_hotkey_on_subnet(&weight_setter, netuid); + + log::debug!( + "Weight setter stake: {:?}, Weight setter stake on subnet: {:?}", + weight_setter_stake, + weight_setter_stake_on_subnet + ); + + assert!(parent_stake > 1, "Parent should have received emission"); + assert!(child_stake > 109_999, "Child should have received emission"); + assert!( + weight_setter_stake > 1_000_000, + "Weight setter should have received emission" + ); + + // Additional assertion to verify that the weight setter received the most emission + assert!( + weight_setter_stake > parent_stake && weight_setter_stake > child_stake, + "Weight setter should have received the most emission" + ); + }); +} + +// 43: Test emission distribution between a childkey and multiple parents +// This test verifies the correct distribution of emissions between a child and multiple parents: +// - Sets up a network with two parents, a child, and a weight setter +// - Establishes parent-child relationships with different stake proportions +// - Sets weights on the child and one parent +// - Runs an epoch with a hardcoded emission value +// - Checks the emission distribution among parents, child, and weight setter +// - Verifies that all parties received emissions and the total stake increased correctly +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test coinbase test_childkey_multiple_parents_emission -- --nocapture +#[test] +fn test_childkey_multiple_parents_emission() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + add_network(netuid, 1, 0); + + // Set registration parameters and emission tempo + SubtensorModule::set_max_registrations_per_block(netuid, 1000); + SubtensorModule::set_target_registrations_per_interval(netuid, 1000); + SubtensorModule::set_hotkey_emission_tempo(10); + + // Define hotkeys and coldkeys + let parent1: U256 = U256::from(1); + let parent2: U256 = U256::from(2); + let child: U256 = U256::from(3); + let weight_setter: U256 = U256::from(4); + let coldkey_parent1: U256 = U256::from(100); + let coldkey_parent2: U256 = U256::from(101); + let coldkey_child: U256 = U256::from(102); + let coldkey_weight_setter: U256 = U256::from(103); + + // Register neurons and add initial stakes + let initial_stakes: Vec<(U256, U256, u64)> = vec![ + (coldkey_parent1, parent1, 200_000), + (coldkey_parent2, parent2, 150_000), + (coldkey_child, child, 20_000), + (coldkey_weight_setter, weight_setter, 100_000), + ]; + + for (coldkey, hotkey, stake) in initial_stakes.iter() { + SubtensorModule::add_balance_to_coldkey_account(coldkey, *stake); + register_ok_neuron(netuid, *hotkey, *coldkey, 0); + SubtensorModule::increase_stake_on_coldkey_hotkey_account(coldkey, hotkey, *stake); + } + + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + step_block(2); + + // Set parent-child relationships + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey_parent1), + parent1, + netuid, + vec![(100_000, child)] + )); + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey_parent2), + parent2, + netuid, + vec![(75_000, child)] + )); + + // Set weights + let uids: Vec = vec![0, 1, 2]; + let values: Vec = vec![0, 65354, 65354]; + let version_key = SubtensorModule::get_weights_version_key(netuid); + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(weight_setter), + netuid, + uids, + values, + version_key + )); + + // Run epoch with a hardcoded emission value + let hardcoded_emission: u64 = 1_000_000_000; // 1 billion + let hotkey_emission: Vec<(U256, u64, u64)> = + SubtensorModule::epoch(netuid, hardcoded_emission); + + // Process the hotkey emission results + for (hotkey, mining_emission, validator_emission) in hotkey_emission { + SubtensorModule::accumulate_hotkey_emission( + &hotkey, + netuid, + validator_emission, + mining_emission, + ); + log::debug!( + "Accumulated emissions on hotkey {:?} for netuid {:?}: mining {:?}, validator {:?}", + hotkey, + netuid, + mining_emission, + validator_emission + ); + } + + step_block(11); + + // Check emission distribution + let stakes: Vec<(U256, U256, &str)> = vec![ + (coldkey_parent1, parent1, "Parent1"), + (coldkey_parent2, parent2, "Parent2"), + (coldkey_child, child, "Child"), + (coldkey_weight_setter, weight_setter, "Weight setter"), + ]; + + for (coldkey, hotkey, name) in stakes.iter() { + let stake = SubtensorModule::get_stake_for_coldkey_and_hotkey(coldkey, hotkey); + let stake_on_subnet = SubtensorModule::get_stake_for_hotkey_on_subnet(hotkey, netuid); + log::debug!( + "{} stake: {:?}, {} stake on subnet: {:?}", + name, + stake, + name, + stake_on_subnet + ); + } + + let parent1_stake = + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey_parent1, &parent1); + let parent2_stake = + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey_parent2, &parent2); + let child_stake = SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey_child, &child); + let weight_setter_stake = SubtensorModule::get_stake_for_coldkey_and_hotkey( + &coldkey_weight_setter, + &weight_setter, + ); + + assert!( + parent1_stake > 200_000, + "Parent1 should have received emission" + ); + assert!( + parent2_stake > 150_000, + "Parent2 should have received emission" + ); + assert!(child_stake > 20_000, "Child should have received emission"); + assert!( + weight_setter_stake > 100_000, + "Weight setter should have received emission" + ); + + // Check individual stake increases + let parent1_stake_increase = parent1_stake - 200_000; + let parent2_stake_increase = parent2_stake - 150_000; + let child_stake_increase = child_stake - 20_000; + + log::debug!( + "Stake increases - Parent1: {}, Parent2: {}, Child: {}", + parent1_stake_increase, + parent2_stake_increase, + child_stake_increase + ); + + // Assert that all neurons received some emission + assert!( + parent1_stake_increase > 0, + "Parent1 should have received some emission" + ); + assert!( + parent2_stake_increase > 0, + "Parent2 should have received some emission" + ); + assert!( + child_stake_increase > 0, + "Child should have received some emission" + ); + + // Check that the total stake has increased by the hardcoded emission amount + let total_stake = parent1_stake + parent2_stake + child_stake + weight_setter_stake; + let initial_total_stake: u64 = initial_stakes.iter().map(|(_, _, stake)| stake).sum(); + assert_eq!( + total_stake, + initial_total_stake + hardcoded_emission - 2, // U64::MAX normalization rounding error + "Total stake should have increased by the hardcoded emission amount" + ); + }); +} + +// 44: Test with a chain of parent-child relationships (e.g., A -> B -> C) +// This test verifies the correct distribution of emissions in a chain of parent-child relationships: +// - Sets up a network with three neurons A, B, and C in a chain (A -> B -> C) +// - Establishes parent-child relationships with different stake proportions +// - Sets weights for all neurons +// - Runs an epoch with a hardcoded emission value +// - Checks the emission distribution among A, B, and C +// - Verifies that all parties received emissions and the total stake increased correctly +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test coinbase test_parent_child_chain_emission -- --nocapture +#[test] +fn test_parent_child_chain_emission() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + add_network(netuid, 1, 0); + + // Define hotkeys and coldkeys + let hotkey_a: U256 = U256::from(1); + let hotkey_b: U256 = U256::from(2); + let hotkey_c: U256 = U256::from(3); + let coldkey_a: U256 = U256::from(100); + let coldkey_b: U256 = U256::from(101); + let coldkey_c: U256 = U256::from(102); + + // Register neurons with decreasing stakes + register_ok_neuron(netuid, hotkey_a, coldkey_a, 0); + register_ok_neuron(netuid, hotkey_b, coldkey_b, 0); + register_ok_neuron(netuid, hotkey_c, coldkey_c, 0); + + // Add initial stakes + SubtensorModule::add_balance_to_coldkey_account(&coldkey_a, 300_000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_b, 100_000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_c, 50_000); + + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey_a, &hotkey_a, 300_000); + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey_b, &hotkey_b, 100_000); + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey_c, &hotkey_c, 50_000); + + // Set parent-child relationships + // A -> B (50% of A's stake) + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey_a), + hotkey_a, + netuid, + vec![(u64::MAX / 2, hotkey_b)] + )); + // B -> C (50% of B's stake) + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey_b), + hotkey_b, + netuid, + vec![(u64::MAX / 2, hotkey_c)] + )); + + step_block(2); + + // Set weights + let origin = RuntimeOrigin::signed(hotkey_a); + let uids: Vec = vec![0, 1, 2]; // UIDs for hotkey_a, hotkey_b, hotkey_c + let values: Vec = vec![65535, 65535, 65535]; // Set equal weights for all hotkeys + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Ensure we can set weights without rate limiting + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + assert_ok!(SubtensorModule::set_weights( + origin, + netuid, + uids, + values, + version_key + )); + + // Run epoch with a hardcoded emission value + let hardcoded_emission: u64 = 1_000_000; // 1 million (adjust as needed) + let hotkey_emission: Vec<(U256, u64, u64)> = + SubtensorModule::epoch(netuid, hardcoded_emission); + + // Process the hotkey emission results + for (hotkey, mining_emission, validator_emission) in hotkey_emission { + SubtensorModule::accumulate_hotkey_emission( + &hotkey, + netuid, + validator_emission, + mining_emission, + ); + } + + // Log PendingEmission Tuple for a, b, c + let pending_emission_a = SubtensorModule::get_pending_hotkey_emission(&hotkey_a); + let pending_emission_b = SubtensorModule::get_pending_hotkey_emission(&hotkey_b); + let pending_emission_c = SubtensorModule::get_pending_hotkey_emission(&hotkey_c); + + log::info!("Pending Emission for A: {:?}", pending_emission_a); + log::info!("Pending Emission for B: {:?}", pending_emission_b); + log::info!("Pending Emission for C: {:?}", pending_emission_c); + + // Assert that pending emissions are non-zero + // A's pending emission: 2/3 of total emission (due to having 2/3 of total stake) + assert!( + pending_emission_a == 666667, + "A should have pending emission of 2/3 of total emission" + ); + // B's pending emission: 2/9 of total emission (1/3 of A's emission + 1/3 of total emission) + assert!( + pending_emission_b == 222222, + "B should have pending emission of 2/9 of total emission" + ); + // C's pending emission: 1/9 of total emission (1/2 of B's emission) + assert!( + pending_emission_c == 111109, + "C should have pending emission of 1/9 of total emission" + ); + + SubtensorModule::set_hotkey_emission_tempo(10); + + step_block(10 + 1); + // Retrieve the current stake for each hotkey on the subnet + let stake_a: u64 = SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey_a, netuid); + let stake_b: u64 = SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey_b, netuid); + let stake_c: u64 = SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey_c, netuid); + + // Log the current stakes for debugging purposes + log::info!("Stake for hotkey A: {:?}", stake_a); + log::info!("Stake for hotkey B: {:?}", stake_b); + log::info!("Stake for hotkey C: {:?}", stake_c); + + // Assert that the stakes have been updated correctly after emission distribution + assert_eq!( + stake_a, 483334, + "A's stake should be 483334 (initial 300_000 + 666667 emission - 483333 given to B)" + ); + assert_eq!( + stake_b, 644445, + "B's stake should be 644445 (initial 100_000 + 222222 emission + 483333 from A - 161110 given to C)" + ); + assert_eq!( + stake_c, 322219, + "C's stake should be 322219 (initial 50_000 + 111109 emission + 161110 from B)" + ); + + // Check that the total stake has increased by the hardcoded emission amount + let total_stake = stake_a + stake_b + stake_c; + let initial_total_stake = 300_000 + 100_000 + 50_000; + let hardcoded_emission = 1_000_000; // Define the hardcoded emission value + assert_eq!( + total_stake, + initial_total_stake + hardcoded_emission - 2, // U64::MAX normalization rounding error + "Total stake should have increased by the hardcoded emission amount" + ); + }); +} + +// 46: Test emission distribution when adding/removing parent-child relationships mid-epoch +// This test verifies the correct distribution of emissions when parent-child relationships change: +// - Sets up a network with three neurons: parent, child1, and child2 +// - Establishes initial parent-child relationship between parent and child1 +// - Runs first epoch and distributes emissions +// - Changes parent-child relationships to include both child1 and child2 +// - Runs second epoch and distributes emissions +// - Checks final emission distribution and stake updates +// - Verifies correct parent-child relationships and stake proportions +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_dynamic_parent_child_relationships --exact --nocapture +#[test] +fn test_dynamic_parent_child_relationships() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + add_network(netuid, 1, 0); + + // Define hotkeys and coldkeys + let parent: U256 = U256::from(1); + let child1: U256 = U256::from(2); + let child2: U256 = U256::from(3); + let coldkey_parent: U256 = U256::from(100); + let coldkey_child1: U256 = U256::from(101); + let coldkey_child2: U256 = U256::from(102); + + // Register neurons with varying stakes + register_ok_neuron(netuid, parent, coldkey_parent, 0); + register_ok_neuron(netuid, child1, coldkey_child1, 0); + register_ok_neuron(netuid, child2, coldkey_child2, 0); + + // Add initial stakes + SubtensorModule::add_balance_to_coldkey_account(&coldkey_parent, 500_000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_child1, 50_000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey_child2, 30_000); + + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey_parent, &parent, 500_000); + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey_child1, &child1, 50_000); + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey_child2, &child2, 30_000); + + // Set initial parent-child relationship + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey_parent), + parent, + netuid, + vec![(u64::MAX / 2, child1)] + )); + + step_block(2); + + // Set weights + let origin = RuntimeOrigin::signed(parent); + let uids: Vec = vec![0, 1, 2]; // UIDs for parent, child1, child2 + let values: Vec = vec![65535, 65535, 65535]; // Set equal weights for all hotkeys + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Ensure we can set weights without rate limiting + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + assert_ok!(SubtensorModule::set_weights( + origin, + netuid, + uids, + values, + version_key + )); + + // Set hotkey emission tempo + SubtensorModule::set_hotkey_emission_tempo(10); + + // Run first epoch + let hardcoded_emission: u64 = 1_000_000; // 1 million (adjust as needed) + let hotkey_emission: Vec<(U256, u64, u64)> = SubtensorModule::epoch(netuid, hardcoded_emission); + + // Process the hotkey emission results + for (hotkey, mining_emission, validator_emission) in hotkey_emission { + SubtensorModule::accumulate_hotkey_emission(&hotkey, netuid, validator_emission, mining_emission); + } + + // Step blocks to allow for emission distribution + step_block(11); + + // Change parent-child relationships + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey_parent), + parent, + netuid, + vec![(u64::MAX / 4, child1), (u64::MAX / 3, child2)] + )); + + // Run second epoch + let hotkey_emission: Vec<(U256, u64, u64)> = SubtensorModule::epoch(netuid, hardcoded_emission); + + // Process the hotkey emission results + for (hotkey, mining_emission, validator_emission) in hotkey_emission { + SubtensorModule::accumulate_hotkey_emission(&hotkey, netuid, validator_emission, mining_emission); + } + + // Step blocks again to allow for emission distribution + step_block(11); + + // Check final emission distribution + let parent_stake: u64 = SubtensorModule::get_stake_for_hotkey_on_subnet(&parent, netuid); + let child1_stake: u64 = SubtensorModule::get_stake_for_hotkey_on_subnet(&child1, netuid); + let child2_stake: u64 = SubtensorModule::get_stake_for_hotkey_on_subnet(&child2, netuid); + + log::info!("Final stakes:"); + log::info!("Parent stake: {}", parent_stake); + log::info!("Child1 stake: {}", child1_stake); + log::info!("Child2 stake: {}", child2_stake); + + const TOLERANCE: u64 = 5; // Allow for a small discrepancy due to potential rounding + + // Precise assertions with tolerance + assert!( + (parent_stake as i64 - 926725).abs() <= TOLERANCE as i64, + "Parent stake should be close to 926,725, but was {}", + parent_stake + ); + // Parent stake calculation: + // Initial stake: 500,000 + // First epoch: ~862,500 (500,000 + 725,000 * 1/2) + // Second epoch: ~926,725 (862,500 + 725,000 * 5/12) + + assert!( + (child1_stake as i64 - 778446).abs() <= TOLERANCE as i64, + "Child1 stake should be close to 778,446, but was {}", + child1_stake + ); + // Child1 stake calculation: + // Initial stake: 50,000 + // First epoch: ~412,500 (50,000 + 725,000 * 1/2) + // Second epoch: ~778,446 (412,500 + 725,000 * 1/2 * 1/4 + 137,500) + + assert!( + (child2_stake as i64 - 874826).abs() <= TOLERANCE as i64, + "Child2 stake should be close to 874,826, but was {}", + child2_stake + ); + // Child2 stake calculation: + // Initial stake: 30,000 + // First epoch: ~167,500 (30,000 + 137,500) + // Second epoch: ~874,826 (167,500 + 725,000 * 1/2 * 1/3 + 137,500) + + // Check that the total stake has increased by approximately twice the hardcoded emission amount + let total_stake: u64 = parent_stake + child1_stake + child2_stake; + let initial_total_stake: u64 = 500_000 + 50_000 + 30_000; + let total_emission: u64 = 2 * hardcoded_emission; + assert!( + (total_stake as i64 - (initial_total_stake + total_emission) as i64).abs() <= TOLERANCE as i64, + "Total stake should have increased by approximately twice the hardcoded emission amount" + ); + // Total stake calculation: + // Initial total stake: 500,000 + 50,000 + 30,000 = 580,000 + // Total emission: 2 * 1,000,000 = 2,000,000 + // Expected total stake: 580,000 + 2,000,000 = 2,580,000 + + // Additional checks for parent-child relationships + let parent_children: Vec<(u64, U256)> = SubtensorModule::get_children(&parent, netuid); + assert_eq!( + parent_children, + vec![(u64::MAX / 4, child1), (u64::MAX / 3, child2)], + "Parent should have both children with correct proportions" + ); + // Parent-child relationship: + // child1: 1/4 of parent's stake + // child2: 1/3 of parent's stake + + let child1_parents: Vec<(u64, U256)> = SubtensorModule::get_parents(&child1, netuid); + assert_eq!( + child1_parents, + vec![(u64::MAX / 4, parent)], + "Child1 should have parent as its parent with correct proportion" + ); + // Child1-parent relationship: + // parent: 1/4 of child1's stake + + let child2_parents: Vec<(u64, U256)> = SubtensorModule::get_parents(&child2, netuid); + assert_eq!( + child2_parents, + vec![(u64::MAX / 3, parent)], + "Child2 should have parent as its parent with correct proportion" + ); + // Child2-parent relationship: + // parent: 1/3 of child2's stake + + // Check that child2 has received more stake than child1 + assert!( + child2_stake > child1_stake, + "Child2 should have received more emission than Child1 due to higher proportion" + ); + // Child2 stake (874,826) > Child1 stake (778,446) + + // Check the approximate difference between child2 and child1 stakes + let stake_difference: u64 = child2_stake - child1_stake; + assert!( + (stake_difference as i64 - 96_380).abs() <= TOLERANCE as i64, + "The difference between Child2 and Child1 stakes should be close to 96,380, but was {}", + stake_difference + ); + // Stake difference calculation: + // Child2 stake: 874,826 + // Child1 stake: 778,446 + // Difference: 874,826 - 778,446 = 96,380 + }); +} + +// 47: Test basic stake retrieval for a single hotkey on a subnet +/// This test verifies the basic functionality of retrieving stake for a single hotkey on a subnet: +/// - Sets up a network with one neuron +/// - Increases stake for the neuron +/// - Checks if the retrieved stake matches the increased amount +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_get_stake_for_hotkey_on_subnet_basic --exact --nocapture +#[test] +fn test_get_stake_for_hotkey_on_subnet_basic() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let coldkey = U256::from(2); + + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, 1000); + + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey, netuid), + 1000 + ); + }); +} + +// 48: Test stake retrieval for a hotkey with multiple coldkeys on a subnet +/// This test verifies the functionality of retrieving stake for a hotkey with multiple coldkeys on a subnet: +/// - Sets up a network with one neuron and two coldkeys +/// - Increases stake from both coldkeys +/// - Checks if the retrieved stake matches the total increased amount +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_get_stake_for_hotkey_on_subnet_multiple_coldkeys --exact --nocapture +#[test] +fn test_get_stake_for_hotkey_on_subnet_multiple_coldkeys() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let coldkey1 = U256::from(2); + let coldkey2 = U256::from(3); + + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey, coldkey1, 0); + + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey1, &hotkey, 1000); + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey2, &hotkey, 2000); + + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey, netuid), + 3000 + ); + }); +} + +// 49: Test stake retrieval for a single parent-child relationship on a subnet +/// This test verifies the functionality of retrieving stake for a single parent-child relationship on a subnet: +/// - Sets up a network with a parent and child neuron +/// - Increases stake for the parent +/// - Sets the child as the parent's only child with 100% stake allocation +/// - Checks if the retrieved stake for both parent and child is correct +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_get_stake_for_hotkey_on_subnet_single_parent_child --exact --nocapture +#[test] +fn test_get_stake_for_hotkey_on_subnet_single_parent_child() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let parent = U256::from(1); + let child = U256::from(2); + let coldkey = U256::from(3); + + add_network(netuid, 0, 0); + register_ok_neuron(netuid, parent, coldkey, 0); + register_ok_neuron(netuid, child, coldkey, 0); + + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey, &parent, 1000); + + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + parent, + netuid, + vec![(u64::MAX, child)] + )); + + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&parent, netuid), + 0 + ); + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&child, netuid), + 1000 + ); + }); +} + +// 50: Test stake retrieval for multiple parents and a single child on a subnet +/// This test verifies the functionality of retrieving stake for multiple parents and a single child on a subnet: +/// - Sets up a network with two parents and one child neuron +/// - Increases stake for both parents +/// - Sets the child as a 50% stake recipient for both parents +/// - Checks if the retrieved stake for parents and child is correct +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_get_stake_for_hotkey_on_subnet_multiple_parents_single_child --exact --nocapture +#[test] +fn test_get_stake_for_hotkey_on_subnet_multiple_parents_single_child() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let parent1 = U256::from(1); + let parent2 = U256::from(2); + let child = U256::from(3); + let coldkey = U256::from(4); + + add_network(netuid, 0, 0); + register_ok_neuron(netuid, parent1, coldkey, 0); + register_ok_neuron(netuid, parent2, coldkey, 0); + register_ok_neuron(netuid, child, coldkey, 0); + + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey, &parent1, 1000); + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey, &parent2, 2000); + + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + parent1, + netuid, + vec![(u64::MAX / 2, child)] + )); + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + parent2, + netuid, + vec![(u64::MAX / 2, child)] + )); + + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&parent1, netuid), + 501 + ); + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&parent2, netuid), + 1001 + ); + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&child, netuid), + 1498 + ); + }); +} + +// 51: Test stake retrieval for a single parent with multiple children on a subnet +/// This test verifies the functionality of retrieving stake for a single parent with multiple children on a subnet: +/// - Sets up a network with one parent and two child neurons +/// - Increases stake for the parent +/// - Sets both children as 1/3 stake recipients of the parent +/// - Checks if the retrieved stake for parent and children is correct and preserves total stake +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_get_stake_for_hotkey_on_subnet_single_parent_multiple_children --exact --nocapture +#[test] +fn test_get_stake_for_hotkey_on_subnet_single_parent_multiple_children() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let parent = U256::from(1); + let child1 = U256::from(2); + let child2 = U256::from(3); + let coldkey = U256::from(4); + + add_network(netuid, 0, 0); + register_ok_neuron(netuid, parent, coldkey, 0); + register_ok_neuron(netuid, child1, coldkey, 0); + register_ok_neuron(netuid, child2, coldkey, 0); + + let total_stake = 3000; + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey, &parent, total_stake); + + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + parent, + netuid, + vec![(u64::MAX / 3, child1), (u64::MAX / 3, child2)] + )); + + let parent_stake = SubtensorModule::get_stake_for_hotkey_on_subnet(&parent, netuid); + let child1_stake = SubtensorModule::get_stake_for_hotkey_on_subnet(&child1, netuid); + let child2_stake = SubtensorModule::get_stake_for_hotkey_on_subnet(&child2, netuid); + + // Check that the total stake is preserved + assert_eq!(parent_stake + child1_stake + child2_stake, total_stake); + + // Check that the parent stake is slightly higher due to rounding + assert_eq!(parent_stake, 1002); + + // Check that each child gets an equal share of the remaining stake + assert_eq!(child1_stake, 999); + assert_eq!(child2_stake, 999); + + // Log the actual stake values + log::info!("Parent stake: {}", parent_stake); + log::info!("Child1 stake: {}", child1_stake); + log::info!("Child2 stake: {}", child2_stake); + }); +} + +// 52: Test stake retrieval for edge cases on a subnet +/// This test verifies the functionality of retrieving stake for edge cases on a subnet: +/// - Sets up a network with one parent and two child neurons +/// - Increases stake to the network maximum +/// - Sets children with 0% and 100% stake allocation +/// - Checks if the retrieved stake for parent and children is correct and preserves total stake +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_get_stake_for_hotkey_on_subnet_edge_cases --exact --nocapture +#[test] +fn test_get_stake_for_hotkey_on_subnet_edge_cases() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let parent = U256::from(1); + let child1 = U256::from(2); + let child2 = U256::from(3); + let coldkey = U256::from(4); + + add_network(netuid, 0, 0); + register_ok_neuron(netuid, parent, coldkey, 0); + register_ok_neuron(netuid, child1, coldkey, 0); + register_ok_neuron(netuid, child2, coldkey, 0); + + // Set network max stake + let network_max_stake: u64 = 500_000_000_000_000; // 500_000 TAO + SubtensorModule::set_network_max_stake(netuid, network_max_stake); + + // Increase stake to the network max + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &coldkey, + &parent, + network_max_stake, + ); + + // Test with 0% and 100% stake allocation + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey), + parent, + netuid, + vec![(0, child1), (u64::MAX, child2)] + )); + + let parent_stake = SubtensorModule::get_stake_for_hotkey_on_subnet(&parent, netuid); + let child1_stake = SubtensorModule::get_stake_for_hotkey_on_subnet(&child1, netuid); + let child2_stake = SubtensorModule::get_stake_for_hotkey_on_subnet(&child2, netuid); + + log::info!("Parent stake: {}", parent_stake); + log::info!("Child1 stake: {}", child1_stake); + log::info!("Child2 stake: {}", child2_stake); + + assert_eq!(parent_stake, 0, "Parent should have 0 stake"); + assert_eq!(child1_stake, 0, "Child1 should have 0 stake"); + assert_eq!( + child2_stake, network_max_stake, + "Child2 should have all the stake" + ); + + // Check that the total stake is preserved and equal to the network max stake + assert_eq!( + parent_stake + child1_stake + child2_stake, + network_max_stake, + "Total stake should equal the network max stake" + ); + }); +} + +// 53: Test stake distribution in a complex hierarchy of parent-child relationships +// This test verifies the correct distribution of stake in a multi-level parent-child hierarchy: +// - Sets up a network with four neurons: parent, child1, child2, and grandchild +// - Establishes parent-child relationships between parent and its children, and child1 and grandchild +// - Adds initial stake to the parent +// - Checks stake distribution after setting up the first level of relationships +// - Checks stake distribution after setting up the second level of relationships +// - Verifies correct stake calculations, parent-child relationships, and preservation of total stake +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_get_stake_for_hotkey_on_subnet_complex_hierarchy --exact --nocapture + +#[test] +fn test_get_stake_for_hotkey_on_subnet_complex_hierarchy() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let parent = U256::from(1); + let child1 = U256::from(2); + let child2 = U256::from(3); + let grandchild = U256::from(4); + let coldkey_parent = U256::from(5); + let coldkey_child1 = U256::from(6); + let coldkey_child2 = U256::from(7); + let coldkey_grandchild = U256::from(8); + + add_network(netuid, 0, 0); + SubtensorModule::set_max_registrations_per_block(netuid, 1000); + SubtensorModule::set_target_registrations_per_interval(netuid, 1000); + register_ok_neuron(netuid, parent, coldkey_parent, 0); + register_ok_neuron(netuid, child1, coldkey_child1, 0); + register_ok_neuron(netuid, child2, coldkey_child2, 0); + register_ok_neuron(netuid, grandchild, coldkey_grandchild, 0); + + let total_stake = 1000; + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &coldkey_parent, + &parent, + total_stake, + ); + + log::info!("Initial stakes:"); + log::info!( + "Parent stake: {}", + SubtensorModule::get_stake_for_hotkey_on_subnet(&parent, netuid) + ); + log::info!( + "Child1 stake: {}", + SubtensorModule::get_stake_for_hotkey_on_subnet(&child1, netuid) + ); + log::info!( + "Child2 stake: {}", + SubtensorModule::get_stake_for_hotkey_on_subnet(&child2, netuid) + ); + log::info!( + "Grandchild stake: {}", + SubtensorModule::get_stake_for_hotkey_on_subnet(&grandchild, netuid) + ); + + // Step 1: Set children for parent + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey_parent), + parent, + netuid, + vec![(u64::MAX / 2, child1), (u64::MAX / 2, child2)] + )); + + log::info!("After setting parent's children:"); + log::info!( + "Parent's children: {:?}", + SubtensorModule::get_children(&parent, netuid) + ); + log::info!( + "Child1's parents: {:?}", + SubtensorModule::get_parents(&child1, netuid) + ); + log::info!( + "Child2's parents: {:?}", + SubtensorModule::get_parents(&child2, netuid) + ); + + let parent_stake_1 = SubtensorModule::get_stake_for_hotkey_on_subnet(&parent, netuid); + let child1_stake_1 = SubtensorModule::get_stake_for_hotkey_on_subnet(&child1, netuid); + let child2_stake_1 = SubtensorModule::get_stake_for_hotkey_on_subnet(&child2, netuid); + + log::info!("Parent stake: {}", parent_stake_1); + log::info!("Child1 stake: {}", child1_stake_1); + log::info!("Child2 stake: {}", child2_stake_1); + + assert_eq!( + parent_stake_1, 2, + "Parent should have 2 stake due to rounding" + ); + assert_eq!(child1_stake_1, 499, "Child1 should have 499 stake"); + assert_eq!(child2_stake_1, 499, "Child2 should have 499 stake"); + + // Step 2: Set children for child1 + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(coldkey_child1), + child1, + netuid, + vec![(u64::MAX, grandchild)] + )); + + log::info!("After setting child1's children:"); + log::info!( + "Child1's children: {:?}", + SubtensorModule::get_children(&child1, netuid) + ); + log::info!( + "Grandchild's parents: {:?}", + SubtensorModule::get_parents(&grandchild, netuid) + ); + + let parent_stake_2 = SubtensorModule::get_stake_for_hotkey_on_subnet(&parent, netuid); + let child1_stake_2 = SubtensorModule::get_stake_for_hotkey_on_subnet(&child1, netuid); + let child2_stake_2 = SubtensorModule::get_stake_for_hotkey_on_subnet(&child2, netuid); + let grandchild_stake = SubtensorModule::get_stake_for_hotkey_on_subnet(&grandchild, netuid); + + log::info!("Parent stake: {}", parent_stake_2); + log::info!("Child1 stake: {}", child1_stake_2); + log::info!("Child2 stake: {}", child2_stake_2); + log::info!("Grandchild stake: {}", grandchild_stake); + + assert_eq!(parent_stake_2, 2, "Parent stake should remain 2"); + assert_eq!( + child1_stake_2, 499, + "Child1 stake should be be the same , as it doesnt have owned stake" + ); + assert_eq!(child2_stake_2, 499, "Child2 should still have 499 stake"); + assert_eq!( + grandchild_stake, 0, + "Grandchild should have 0 , as child1 doesnt have any owned stake" + ); + + // Check that the total stake is preserved + assert_eq!( + parent_stake_2 + child1_stake_2 + child2_stake_2 + grandchild_stake, + total_stake, + "Total stake should equal the initial stake" + ); + + // Additional checks + log::info!("Final parent-child relationships:"); + log::info!( + "Parent's children: {:?}", + SubtensorModule::get_children(&parent, netuid) + ); + log::info!( + "Child1's parents: {:?}", + SubtensorModule::get_parents(&child1, netuid) + ); + log::info!( + "Child2's parents: {:?}", + SubtensorModule::get_parents(&child2, netuid) + ); + log::info!( + "Child1's children: {:?}", + SubtensorModule::get_children(&child1, netuid) + ); + log::info!( + "Grandchild's parents: {:?}", + SubtensorModule::get_parents(&grandchild, netuid) + ); + + // Check if the parent-child relationships are correct + assert_eq!( + SubtensorModule::get_children(&parent, netuid), + vec![(u64::MAX / 2, child1), (u64::MAX / 2, child2)], + "Parent should have both children" + ); + assert_eq!( + SubtensorModule::get_parents(&child1, netuid), + vec![(u64::MAX / 2, parent)], + "Child1 should have parent as its parent" + ); + assert_eq!( + SubtensorModule::get_parents(&child2, netuid), + vec![(u64::MAX / 2, parent)], + "Child2 should have parent as its parent" + ); + assert_eq!( + SubtensorModule::get_children(&child1, netuid), + vec![(u64::MAX, grandchild)], + "Child1 should have grandchild as its child" + ); + assert_eq!( + SubtensorModule::get_parents(&grandchild, netuid), + vec![(u64::MAX, child1)], + "Grandchild should have child1 as its parent" + ); + }); +} + +// 54: Test stake distribution across multiple networks +// This test verifies the correct distribution of stake for a single neuron across multiple networks: +// - Sets up two networks with a single neuron registered on both +// - Adds initial stake to the neuron +// - Checks that the stake is correctly reflected on both networks +// - Verifies that changes in stake are consistently applied across all networks +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_get_stake_for_hotkey_on_subnet_multiple_networks --exact --nocapture + +#[test] +fn test_get_stake_for_hotkey_on_subnet_multiple_networks() { + new_test_ext(1).execute_with(|| { + let netuid1: u16 = 1; + let netuid2: u16 = 2; + let hotkey = U256::from(1); + let coldkey = U256::from(2); + + add_network(netuid1, 0, 0); + add_network(netuid2, 0, 0); + register_ok_neuron(netuid1, hotkey, coldkey, 0); + register_ok_neuron(netuid2, hotkey, coldkey, 0); + + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, 1000); + + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey, netuid1), + 1000 + ); + assert_eq!( + SubtensorModule::get_stake_for_hotkey_on_subnet(&hotkey, netuid2), + 1000 + ); + }); +} + +/// 55: Test rank, trust, and incentive calculation with parent-child relationships +/// +/// This test verifies the correct calculation and distribution of rank, trust, incentive, and dividends +/// in a network with parent-child relationships: +/// - Sets up a network with validators (including a parent-child pair) and miners +/// - Establishes initial stakes and weights for all validators +/// - Runs a first epoch to establish baseline metrics +/// - Sets up a parent-child relationship +/// - Runs a second epoch to observe changes in metrics +/// - Verifies that the child's metrics improve relative to its initial state and other validators +/// +/// # Test Steps: +/// 1. Initialize test environment with validators (including parent and child) and miners +/// 2. Set up network parameters and register all neurons +/// 3. Set initial stakes for validators +/// 4. Set initial weights for all validators +/// 5. Run first epoch and process emissions +/// 6. Record initial metrics for the child +/// 7. Establish parent-child relationship +/// 8. Run second epoch and process emissions +/// 9. Record final metrics for the child +/// 10. Compare child's initial and final metrics +/// 11. Compare child's final metrics with other validators +/// +/// # Expected Results: +/// - Child's rank should improve (decrease) +/// - Child's trust should increase or remain the same +/// - Child's dividends should increase +/// - Child's final metrics should be better than or equal to other validators' +/// +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test children -- test_rank_trust_incentive_calculation_with_parent_child --exact --nocapture +#[test] +fn test_rank_trust_incentive_calculation_with_parent_child() { + new_test_ext(1).execute_with(|| { + // Initialize test environment + let netuid: u16 = 1; + let parent_hotkey: U256 = U256::from(1); + let parent_coldkey: U256 = U256::from(101); + let child_hotkey: U256 = U256::from(2); + let child_coldkey: U256 = U256::from(102); + let other_validators: Vec<(U256, U256)> = (3..6) + .map(|i| (U256::from(i), U256::from(100 + i))) + .collect(); + let miners: Vec<(U256, U256)> = (6..16) + .map(|i| (U256::from(i), U256::from(100 + i))) + .collect(); // 10 miners + + // Setup network and set registration parameters + add_network(netuid, 1, 0); + SubtensorModule::set_max_registrations_per_block(netuid, 1000); + SubtensorModule::set_target_registrations_per_interval(netuid, 1000); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_hotkey_emission_tempo(10); + + // Register neurons (validators and miners) + register_ok_neuron(netuid, parent_hotkey, parent_coldkey, 0); + register_ok_neuron(netuid, child_hotkey, child_coldkey, 0); + for (hotkey, coldkey) in &other_validators { + register_ok_neuron(netuid, *hotkey, *coldkey, 0); + } + for (hotkey, coldkey) in &miners { + register_ok_neuron(netuid, *hotkey, *coldkey, 0); + } + + step_block(2); + + // Set initial stakes for validators only + let initial_stake: u64 = 1_000_000_000; // 1000 TAO + SubtensorModule::add_balance_to_coldkey_account(&parent_coldkey, initial_stake); + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &parent_coldkey, + &parent_hotkey, + initial_stake, + ); + SubtensorModule::add_balance_to_coldkey_account(&child_coldkey, initial_stake); + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &child_coldkey, + &child_hotkey, + initial_stake, + ); + for (hotkey, coldkey) in &other_validators { + SubtensorModule::add_balance_to_coldkey_account(coldkey, initial_stake); + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + coldkey, + hotkey, + initial_stake, + ); + } + + step_block(2); + + // Set initial weights for all validators + let all_uids: Vec = (0..15).collect(); // 0-4 are validators, 5-14 are miners + let validator_weights: Vec = vec![u16::MAX / 5; 5] // Equal weights for validators + .into_iter() + .chain(vec![u16::MAX / 10; 10]) // Equal weights for miners + .collect(); + + for hotkey in std::iter::once(&parent_hotkey) + .chain(other_validators.iter().map(|(h, _)| h)) + .chain(std::iter::once(&child_hotkey)) + { + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(*hotkey), + netuid, + all_uids.clone(), + validator_weights.clone(), + 0 + )); + } + + step_block(10); + + // Run first epoch + let rao_emission: u64 = 1_000_000_000; + let initial_emission = SubtensorModule::epoch(netuid, rao_emission); + + // Process initial emission + for (hotkey, mining_emission, validator_emission) in initial_emission { + SubtensorModule::accumulate_hotkey_emission( + &hotkey, + netuid, + validator_emission, + mining_emission, + ); + } + + step_block(11); + + // Get initial rank, trust, incentive, and dividends for the child + let initial_child_rank: u16 = SubtensorModule::get_rank_for_uid(netuid, 1); + let initial_child_trust: u16 = SubtensorModule::get_trust_for_uid(netuid, 1); + let initial_child_incentive: u16 = SubtensorModule::get_incentive_for_uid(netuid, 1); + let initial_child_dividends: u16 = SubtensorModule::get_dividends_for_uid(netuid, 1); + + log::debug!("Initial child rank: {:?}", initial_child_rank); + log::debug!("Initial child trust: {:?}", initial_child_trust); + log::debug!("Initial child incentive: {:?}", initial_child_incentive); + log::debug!("Initial child dividends: {:?}", initial_child_dividends); + + // Parent sets the child with 100% of its weight + assert_ok!(SubtensorModule::do_set_children( + RuntimeOrigin::signed(parent_coldkey), + parent_hotkey, + netuid, + vec![(u64::MAX, child_hotkey)] + )); + + // Child now sets weights as a validator + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(child_hotkey), + netuid, + all_uids.clone(), + validator_weights.clone(), + 1 + )); + + step_block(10); + + // Run second epoch + let final_emission = SubtensorModule::epoch(netuid, rao_emission); + + // Process final emission + for (hotkey, mining_emission, validator_emission) in final_emission { + SubtensorModule::accumulate_hotkey_emission( + &hotkey, + netuid, + validator_emission, + mining_emission, + ); + } + + step_block(11); + + // Get final rank, trust, incentive, and dividends for the child + let final_child_rank: u16 = SubtensorModule::get_rank_for_uid(netuid, 1); + let final_child_trust: u16 = SubtensorModule::get_trust_for_uid(netuid, 1); + let final_child_incentive: u16 = SubtensorModule::get_incentive_for_uid(netuid, 1); + let final_child_dividends: u16 = SubtensorModule::get_dividends_for_uid(netuid, 1); + + log::debug!("Final child rank: {:?}", final_child_rank); + log::debug!("Final child trust: {:?}", final_child_trust); + log::debug!("Final child incentive: {:?}", final_child_incentive); + log::debug!("Final child dividends: {:?}", final_child_dividends); + + // Print ranks for all validators + for i in 0..5 { + log::debug!( + "Validator {} rank: {:?}", + i, + SubtensorModule::get_rank_for_uid(netuid, i) + ); + } + + // Assert that rank has improved (decreased) for the child + assert!( + final_child_rank < initial_child_rank, + "Child rank should have improved (decreased). Initial: {}, Final: {}", + initial_child_rank, + final_child_rank + ); + + // Assert that trust has increased or remained the same for the child + assert!( + final_child_trust >= initial_child_trust, + "Child trust should have increased or remained the same. Initial: {}, Final: {}", + initial_child_trust, + final_child_trust + ); + + + // Assert that dividends have increased for the child + assert!( + final_child_dividends > initial_child_dividends, + "Child dividends should have increased. Initial: {}, Final: {}", + initial_child_dividends, + final_child_dividends + ); + + // Compare child's final values with other validators + for i in 2..5 { + let other_rank: u16 = SubtensorModule::get_rank_for_uid(netuid, i); + let other_trust: u16 = SubtensorModule::get_trust_for_uid(netuid, i); + let other_incentive: u16 = SubtensorModule::get_incentive_for_uid(netuid, i); + let other_dividends: u16 = SubtensorModule::get_dividends_for_uid(netuid, i); + + log::debug!( + "Validator {} - Rank: {}, Trust: {}, Incentive: {}, Dividends: {}", + i, other_rank, other_trust, other_incentive, other_dividends + ); + + assert!( + final_child_rank <= other_rank, + "Child rank should be better than or equal to other validators. Child: {}, Other: {}", + final_child_rank, + other_rank + ); + + assert!( + final_child_trust >= other_trust, + "Child trust should be greater than or equal to other validators. Child: {}, Other: {}", + final_child_trust, + other_trust + ); + + assert!( + final_child_dividends >= other_dividends, + "Child dividends should be greater than or equal to other validators. Child: {}, Other: {}", + final_child_dividends, + other_dividends + ); + } + + }); +} diff --git a/pallets/subtensor/tests/coinbase.rs b/pallets/subtensor/tests/coinbase.rs new file mode 100644 index 000000000..a6c1acde1 --- /dev/null +++ b/pallets/subtensor/tests/coinbase.rs @@ -0,0 +1,156 @@ +#![allow(unused, clippy::indexing_slicing, clippy::panic, clippy::unwrap_used)] +use crate::mock::*; +mod mock; +// use frame_support::{assert_err, assert_ok}; +use sp_core::U256; + +// Test the ability to hash all sorts of hotkeys. +#[test] + +fn test_hotkey_hashing() { + new_test_ext(1).execute_with(|| { + for i in 0..10000 { + SubtensorModule::hash_hotkey_to_u64(&U256::from(i)); + } + }); +} + +// Test drain tempo on hotkeys. +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test coinbase test_hotkey_drain_time -- --nocapture +#[test] + +fn test_hotkey_drain_time() { + new_test_ext(1).execute_with(|| { + // Block 0 + assert!(!SubtensorModule::should_drain_hotkey(&U256::from(0), 0, 1)); + assert!(SubtensorModule::should_drain_hotkey(&U256::from(1), 0, 1)); + assert!(SubtensorModule::should_drain_hotkey(&U256::from(2), 0, 1)); + assert!(SubtensorModule::should_drain_hotkey(&U256::from(3), 0, 1)); + assert!(!SubtensorModule::should_drain_hotkey(&U256::from(4), 0, 1)); + assert!(SubtensorModule::should_drain_hotkey(&U256::from(5), 0, 1)); + assert!(!SubtensorModule::should_drain_hotkey(&U256::from(6), 0, 1)); + assert!(!SubtensorModule::should_drain_hotkey(&U256::from(7), 0, 1)); + + // Block 1 + assert!(SubtensorModule::should_drain_hotkey(&U256::from(0), 1, 1)); + assert!(!SubtensorModule::should_drain_hotkey(&U256::from(1), 1, 1)); + assert!(!SubtensorModule::should_drain_hotkey(&U256::from(2), 1, 1)); + assert!(!SubtensorModule::should_drain_hotkey(&U256::from(3), 1, 1)); + assert!(SubtensorModule::should_drain_hotkey(&U256::from(4), 1, 1)); + assert!(!SubtensorModule::should_drain_hotkey(&U256::from(5), 1, 1)); + assert!(SubtensorModule::should_drain_hotkey(&U256::from(6), 1, 1)); + assert!(SubtensorModule::should_drain_hotkey(&U256::from(7), 1, 1)); + }); +} + +// To run this test specifically, use the following command: +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test coinbase test_coinbase_basic -- --nocapture +#[test] + +fn test_coinbase_basic() { + new_test_ext(1).execute_with(|| { + // Define network ID + let netuid: u16 = 1; + let hotkey = U256::from(0); + let coldkey = U256::from(3); + + // Create a network with a tempo 1 + add_network(netuid, 1, 0); + register_ok_neuron(netuid, hotkey, coldkey, 100000); + SubtensorModule::create_account_if_non_existent(&coldkey, &hotkey); + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, 1000); + + // Set the subnet emission value to 1. + SubtensorModule::set_emission_values(&[netuid], vec![1]).unwrap(); + assert_eq!(SubtensorModule::get_subnet_emission_value(netuid), 1); + + // Hotkey has no pending emission + assert_eq!(SubtensorModule::get_pending_hotkey_emission(&hotkey), 0); + + // Hotkey has same stake + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey), 1000); + + // Subnet has no pending emission. + assert_eq!(SubtensorModule::get_pending_emission(netuid), 0); + + // Step block + next_block(); + + // Hotkey has no pending emission + assert_eq!(SubtensorModule::get_pending_hotkey_emission(&hotkey), 0); + + // Hotkey has same stake + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey), 1000); + + // Subnet has no pending emission of 1 ( from coinbase ) + assert_eq!(SubtensorModule::get_pending_emission(netuid), 1); + + // Step block releases + next_block(); + + // Subnet pending has been drained. + assert_eq!(SubtensorModule::get_pending_emission(netuid), 0); + + // Hotkey pending immediately drained. + assert_eq!(SubtensorModule::get_pending_hotkey_emission(&hotkey), 0); + + // Hotkey has NEW stake + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + 1000 + 2 + ); + + // Set the hotkey drain time to 2 block. + SubtensorModule::set_hotkey_emission_tempo(2); + + // Step block releases + next_block(); + + // Subnet pending increased by 1 + assert_eq!(SubtensorModule::get_pending_emission(netuid), 1); + + // Hotkey pending not increased (still on subnet) + assert_eq!(SubtensorModule::get_pending_hotkey_emission(&hotkey), 0); + + // Hotkey has same stake + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + 1000 + 2 + ); + + // Step block releases + next_block(); + + // Subnet pending has been drained. + assert_eq!(SubtensorModule::get_pending_emission(netuid), 0); + + // Hotkey pending drained. + assert_eq!(SubtensorModule::get_pending_hotkey_emission(&hotkey), 0); + + // Hotkey has 2 new TAO. + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + 1000 + 4 + ); + }); +} + +// Test getting and setting hotkey emission tempo +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test coinbase test_set_and_get_hotkey_emission_tempo -- --nocapture +#[test] + +fn test_set_and_get_hotkey_emission_tempo() { + new_test_ext(1).execute_with(|| { + // Get the default hotkey emission tempo + let default_tempo = SubtensorModule::get_hotkey_emission_tempo(); + assert_eq!(default_tempo, 0); // default is 0 in mock.rs + + // Set a new hotkey emission tempo + let new_tempo = 5; + SubtensorModule::set_hotkey_emission_tempo(new_tempo); + + // Get the updated hotkey emission tempo + let updated_tempo = SubtensorModule::get_hotkey_emission_tempo(); + assert_eq!(updated_tempo, new_tempo); + }); +} diff --git a/pallets/subtensor/tests/epoch.rs b/pallets/subtensor/tests/epoch.rs index e2b911525..9c4bf87cc 100644 --- a/pallets/subtensor/tests/epoch.rs +++ b/pallets/subtensor/tests/epoch.rs @@ -7,7 +7,7 @@ use crate::mock::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; -use pallet_subtensor::math::safe_exp; +use pallet_subtensor::epoch::math::safe_exp; use pallet_subtensor::*; use rand::{distributions::Uniform, rngs::StdRng, seq::SliceRandom, thread_rng, Rng, SeedableRng}; use sp_core::U256; @@ -1496,7 +1496,7 @@ fn test_set_alpha_disabled() { // Enable Liquid Alpha and setup SubtensorModule::set_liquid_alpha_enabled(netuid, true); - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); SubtensorModule::add_balance_to_coldkey_account(&coldkey, 1_000_000_000_000_000); assert_ok!(SubtensorModule::root_register(signer.clone(), hotkey,)); assert_ok!(SubtensorModule::add_stake(signer.clone(), hotkey, 1000)); @@ -2573,7 +2573,7 @@ fn test_get_set_alpha() { // Enable Liquid Alpha and setup SubtensorModule::set_liquid_alpha_enabled(netuid, true); - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); SubtensorModule::add_balance_to_coldkey_account(&coldkey, 1_000_000_000_000_000); assert_ok!(SubtensorModule::root_register(signer.clone(), hotkey,)); assert_ok!(SubtensorModule::add_stake(signer.clone(), hotkey, 1000)); @@ -2703,6 +2703,53 @@ fn test_get_set_alpha() { }); } +#[test] +fn test_blocks_since_last_step() { + new_test_ext(1).execute_with(|| { + System::set_block_number(0); + + let netuid: u16 = 1; + let tempo: u16 = 7200; + add_network(netuid, tempo, 0); + + let original_blocks: u64 = SubtensorModule::get_blocks_since_last_step(netuid); + + step_block(5); + + let new_blocks: u64 = SubtensorModule::get_blocks_since_last_step(netuid); + + assert!(new_blocks > original_blocks); + assert_eq!(new_blocks, 5); + + let blocks_to_step: u16 = SubtensorModule::blocks_until_next_epoch( + netuid, + tempo, + SubtensorModule::get_current_block_as_u64(), + ) as u16 + + 10; + step_block(blocks_to_step); + + let post_blocks: u64 = SubtensorModule::get_blocks_since_last_step(netuid); + + assert_eq!(post_blocks, 10); + + let blocks_to_step: u16 = SubtensorModule::blocks_until_next_epoch( + netuid, + tempo, + SubtensorModule::get_current_block_as_u64(), + ) as u16 + + 20; + step_block(blocks_to_step); + + let new_post_blocks: u64 = SubtensorModule::get_blocks_since_last_step(netuid); + + assert_eq!(new_post_blocks, 20); + + step_block(7); + + assert_eq!(SubtensorModule::get_blocks_since_last_step(netuid), 27); + }); +} // // Map the retention graph for consensus guarantees with an single epoch on a graph with 512 nodes, of which the first 64 are validators, the graph is split into a major and minor set, each setting specific weight on itself and the complement on the other. // // // // ```import torch diff --git a/pallets/subtensor/tests/math.rs b/pallets/subtensor/tests/math.rs index 35b383f68..7f70e89f6 100644 --- a/pallets/subtensor/tests/math.rs +++ b/pallets/subtensor/tests/math.rs @@ -5,7 +5,7 @@ )] use substrate_fixed::types::{I32F32, I64F64}; -use pallet_subtensor::math::*; +use pallet_subtensor::epoch::math::*; use rand::{seq::SliceRandom, thread_rng, Rng}; use substrate_fixed::{ transcendental::exp, diff --git a/pallets/subtensor/tests/migration.rs b/pallets/subtensor/tests/migration.rs index d47155862..6c40d7d78 100644 --- a/pallets/subtensor/tests/migration.rs +++ b/pallets/subtensor/tests/migration.rs @@ -1,5 +1,4 @@ -#![allow(clippy::unwrap_used)] - +#![allow(unused, clippy::indexing_slicing, clippy::panic, clippy::unwrap_used)] mod mock; use frame_support::{assert_ok, weights::Weight}; use frame_system::Config; @@ -7,6 +6,29 @@ use mock::*; use pallet_subtensor::*; use sp_core::U256; +#[test] +fn test_initialise_ti() { + use frame_support::traits::OnRuntimeUpgrade; + + new_test_ext(1).execute_with(|| { + pallet_subtensor::SubnetLocked::::insert(1, 100); + pallet_subtensor::SubnetLocked::::insert(2, 5); + pallet_balances::TotalIssuance::::put(1000); + pallet_subtensor::TotalStake::::put(25); + + // Ensure values are NOT initialized prior to running migration + assert!(pallet_subtensor::TotalIssuance::::get() == 0); + + pallet_subtensor::migrations::migrate_init_total_issuance::initialise_total_issuance::Migration::::on_runtime_upgrade(); + + // Ensure values were initialized correctly + assert!( + pallet_subtensor::TotalIssuance::::get() + == 105u64.saturating_add(1000).saturating_add(25) + ); + }); +} + #[test] fn test_migration_fix_total_stake_maps() { new_test_ext(1).execute_with(|| { @@ -67,7 +89,7 @@ fn test_migration_fix_total_stake_maps() { assert_ne!(SubtensorModule::get_total_stake(), total_stake_amount); // Run the migration to fix the total stake maps - pallet_subtensor::migration::migrate_to_v2_fixed_total_stake::(); + pallet_subtensor::migrations::migrate_to_v2_fixed_total_stake::migrate_to_v2_fixed_total_stake::(); // Verify that the total stake is now correct assert_eq!(SubtensorModule::get_total_stake(), total_stake_amount); @@ -107,19 +129,19 @@ fn test_migration_fix_total_stake_maps() { #[test] // To run this test with cargo, use the following command: -// cargo test --package pallet-subtensor --test migration test_migration5_total_issuance -fn test_migration5_total_issuance() { +// cargo test --package pallet-subtensor --test migration test_migrate_total_issuance +fn test_migrate_total_issuance() { new_test_ext(1).execute_with(|| { // Run the migration to check total issuance. let test: bool = true; assert_eq!(SubtensorModule::get_total_issuance(), 0); - pallet_subtensor::migration::migration5_total_issuance::(test); + pallet_subtensor::migrations::migrate_total_issuance::migrate_total_issuance::(test); assert_eq!(SubtensorModule::get_total_issuance(), 0); SubtensorModule::add_balance_to_coldkey_account(&U256::from(1), 10000); assert_eq!(SubtensorModule::get_total_issuance(), 0); - pallet_subtensor::migration::migration5_total_issuance::(test); + pallet_subtensor::migrations::migrate_total_issuance::migrate_total_issuance::(test); assert_eq!(SubtensorModule::get_total_issuance(), 10000); SubtensorModule::increase_stake_on_coldkey_hotkey_account( @@ -128,7 +150,7 @@ fn test_migration5_total_issuance() { 30000, ); assert_eq!(SubtensorModule::get_total_issuance(), 10000); - pallet_subtensor::migration::migration5_total_issuance::(test); + pallet_subtensor::migrations::migrate_total_issuance::migrate_total_issuance::(test); assert_eq!(SubtensorModule::get_total_issuance(), 10000 + 30000); }) } @@ -148,11 +170,11 @@ fn test_total_issuance_global() { SubtensorModule::add_balance_to_coldkey_account(&owner, lockcost); // Add a balance of 20000 to the coldkey account. assert_eq!(SubtensorModule::get_total_issuance(), 0); // initial is zero. assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); SubtensorModule::set_max_allowed_uids(netuid, 1); // Set the maximum allowed unique identifiers for the network to 1. assert_eq!(SubtensorModule::get_total_issuance(), 0); // initial is zero. - pallet_subtensor::migration::migration5_total_issuance::(true); // Pick up lock. + pallet_subtensor::migrations::migrate_total_issuance::migrate_total_issuance::(true); // Pick up lock. assert_eq!(SubtensorModule::get_total_issuance(), lockcost); // Verify the total issuance is updated to 20000 after migration. assert!(SubtensorModule::if_subnet_exist(netuid)); @@ -162,7 +184,7 @@ fn test_total_issuance_global() { let _coldkey_account_id_1 = U256::from(1); // Define a coldkey account ID for further operations. assert_eq!(SubtensorModule::get_total_issuance(), lockcost); // Ensure the total issuance starts at 0 before the migration. SubtensorModule::add_balance_to_coldkey_account(&coldkey, account_balance); // Add a balance of 20000 to the coldkey account. - pallet_subtensor::migration::migration5_total_issuance::(true); // Execute the migration to update total issuance. + pallet_subtensor::migrations::migrate_total_issuance::migrate_total_issuance::(true); // Execute the migration to update total issuance. assert_eq!( SubtensorModule::get_total_issuance(), account_balance + lockcost @@ -185,7 +207,7 @@ fn test_total_issuance_global() { SubtensorModule::get_total_issuance(), account_balance + lockcost - burn_cost ); // Verify the total issuance is reduced to 10000 after burning. - pallet_subtensor::migration::migration5_total_issuance::(true); // Execute the migration to update total issuance. + pallet_subtensor::migrations::migrate_total_issuance::migrate_total_issuance::(true); // Execute the migration to update total issuance. assert_eq!( SubtensorModule::get_total_issuance(), account_balance + lockcost - burn_cost @@ -202,7 +224,7 @@ fn test_total_issuance_global() { SubtensorModule::get_total_issuance(), account_balance + lockcost - burn_cost ); // Same - pallet_subtensor::migration::migration5_total_issuance::(true); // Fix issuance + pallet_subtensor::migrations::migrate_total_issuance::migrate_total_issuance::(true); // Fix issuance assert_eq!( SubtensorModule::get_total_issuance(), account_balance + lockcost - burn_cost + new_stake @@ -222,7 +244,7 @@ fn test_total_issuance_global() { SubtensorModule::get_total_issuance(), account_balance + lockcost - burn_cost + new_stake + emission ); // Verify the total issuance reflects the staked amount and emission value that has been put through the epoch. - pallet_subtensor::migration::migration5_total_issuance::(true); // Test migration does not change amount. + pallet_subtensor::migrations::migrate_total_issuance::migrate_total_issuance::(true); // Test migration does not change amount. assert_eq!( SubtensorModule::get_total_issuance(), account_balance + lockcost - burn_cost + new_stake + emission @@ -244,7 +266,7 @@ fn test_migration_transfer_nets_to_foundation() { // Run the migration to transfer ownership let hex = hex_literal::hex!["feabaafee293d3b76dae304e2f9d885f77d2b17adab9e17e921b321eccd61c77"]; - pallet_subtensor::migration::migrate_transfer_ownership_to_foundation::(hex); + pallet_subtensor::migrations::migrate_transfer_ownership_to_foundation::migrate_transfer_ownership_to_foundation::(hex); log::info!("new owner: {:?}", SubtensorModule::get_subnet_owner(1)); }) @@ -258,7 +280,7 @@ fn test_migration_delete_subnet_3() { assert!(SubtensorModule::if_subnet_exist(3)); // Run the migration to transfer ownership - pallet_subtensor::migration::migrate_delete_subnet_3::(); + pallet_subtensor::migrations::migrate_delete_subnet_3::migrate_delete_subnet_3::(); assert!(!SubtensorModule::if_subnet_exist(3)); }) @@ -272,7 +294,7 @@ fn test_migration_delete_subnet_21() { assert!(SubtensorModule::if_subnet_exist(21)); // Run the migration to transfer ownership - pallet_subtensor::migration::migrate_delete_subnet_21::(); + pallet_subtensor::migrations::migrate_delete_subnet_21::migrate_delete_subnet_21::(); assert!(!SubtensorModule::if_subnet_exist(21)); }) @@ -283,20 +305,14 @@ fn test_migration_delete_subnet_21() { #[test] fn test_migrate_fix_total_coldkey_stake() { new_test_ext(1).execute_with(|| { - let migration_name = "fix_total_coldkey_stake_v7"; + let _migration_name = "fix_total_coldkey_stake_v7"; let coldkey = U256::from(0); TotalColdkeyStake::::insert(coldkey, 0); StakingHotkeys::::insert(coldkey, vec![U256::from(1), U256::from(2), U256::from(3)]); Stake::::insert(U256::from(1), U256::from(0), 10000); Stake::::insert(U256::from(2), U256::from(0), 10000); Stake::::insert(U256::from(3), U256::from(0), 10000); - - let weight = run_migration_and_check(migration_name); - assert!(weight != Weight::zero()); - assert_eq!(TotalColdkeyStake::::get(coldkey), 30000); - - let second_weight = run_migration_and_check(migration_name); - assert_eq!(second_weight, Weight::zero()); + pallet_subtensor::migrations::migrate_fix_total_coldkey_stake::do_migrate_fix_total_coldkey_stake::(); assert_eq!(TotalColdkeyStake::::get(coldkey), 30000); }) } @@ -305,16 +321,14 @@ fn test_migrate_fix_total_coldkey_stake() { #[test] fn test_migrate_fix_total_coldkey_stake_value_already_in_total() { new_test_ext(1).execute_with(|| { - let migration_name = "fix_total_coldkey_stake_v7"; + let _migration_name = "fix_total_coldkey_stake_v7"; let coldkey = U256::from(0); TotalColdkeyStake::::insert(coldkey, 100000000); StakingHotkeys::::insert(coldkey, vec![U256::from(1), U256::from(2), U256::from(3)]); Stake::::insert(U256::from(1), U256::from(0), 10000); Stake::::insert(U256::from(2), U256::from(0), 10000); Stake::::insert(U256::from(3), U256::from(0), 10000); - - let weight = run_migration_and_check(migration_name); - assert!(weight != Weight::zero()); + pallet_subtensor::migrations::migrate_fix_total_coldkey_stake::do_migrate_fix_total_coldkey_stake::(); assert_eq!(TotalColdkeyStake::::get(coldkey), 30000); }) } @@ -323,15 +337,13 @@ fn test_migrate_fix_total_coldkey_stake_value_already_in_total() { #[test] fn test_migrate_fix_total_coldkey_stake_no_entry() { new_test_ext(1).execute_with(|| { - let migration_name = "fix_total_coldkey_stake_v7"; + let _migration_name = "fix_total_coldkey_stake_v7"; let coldkey = U256::from(0); StakingHotkeys::::insert(coldkey, vec![U256::from(1), U256::from(2), U256::from(3)]); Stake::::insert(U256::from(1), U256::from(0), 10000); Stake::::insert(U256::from(2), U256::from(0), 10000); Stake::::insert(U256::from(3), U256::from(0), 10000); - - let weight = run_migration_and_check(migration_name); - assert!(weight != Weight::zero()); + pallet_subtensor::migrations::migrate_fix_total_coldkey_stake::do_migrate_fix_total_coldkey_stake::(); assert_eq!(TotalColdkeyStake::::get(coldkey), 30000); }) } @@ -340,13 +352,11 @@ fn test_migrate_fix_total_coldkey_stake_no_entry() { #[test] fn test_migrate_fix_total_coldkey_stake_no_entry_in_hotkeys() { new_test_ext(1).execute_with(|| { - let migration_name = "fix_total_coldkey_stake_v7"; + let _migration_name = "fix_total_coldkey_stake_v7"; let coldkey = U256::from(0); TotalColdkeyStake::::insert(coldkey, 100000000); StakingHotkeys::::insert(coldkey, vec![U256::from(1), U256::from(2), U256::from(3)]); - - let weight = run_migration_and_check(migration_name); - assert!(weight != Weight::zero()); + pallet_subtensor::migrations::migrate_fix_total_coldkey_stake::do_migrate_fix_total_coldkey_stake::(); assert_eq!(TotalColdkeyStake::::get(coldkey), 0); }) } @@ -355,20 +365,19 @@ fn test_migrate_fix_total_coldkey_stake_no_entry_in_hotkeys() { #[test] fn test_migrate_fix_total_coldkey_stake_one_hotkey_stake_missing() { new_test_ext(1).execute_with(|| { - let migration_name = "fix_total_coldkey_stake_v7"; + let _migration_name = "fix_total_coldkey_stake_v7"; let coldkey = U256::from(0); TotalColdkeyStake::::insert(coldkey, 100000000); StakingHotkeys::::insert(coldkey, vec![U256::from(1), U256::from(2), U256::from(3)]); Stake::::insert(U256::from(1), U256::from(0), 10000); Stake::::insert(U256::from(2), U256::from(0), 10000); - - let weight = run_migration_and_check(migration_name); - assert!(weight != Weight::zero()); + pallet_subtensor::migrations::migrate_fix_total_coldkey_stake::do_migrate_fix_total_coldkey_stake::(); assert_eq!(TotalColdkeyStake::::get(coldkey), 20000); }) } // New test to check if migration runs only once +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test migration -- test_migrate_fix_total_coldkey_stake_runs_once --exact --nocapture #[test] fn test_migrate_fix_total_coldkey_stake_runs_once() { new_test_ext(1).execute_with(|| { @@ -376,9 +385,9 @@ fn test_migrate_fix_total_coldkey_stake_runs_once() { let coldkey = U256::from(0); TotalColdkeyStake::::insert(coldkey, 0); StakingHotkeys::::insert(coldkey, vec![U256::from(1), U256::from(2), U256::from(3)]); - Stake::::insert(U256::from(1), U256::from(0), 10000); - Stake::::insert(U256::from(2), U256::from(0), 10000); - Stake::::insert(U256::from(3), U256::from(0), 10000); + Stake::::insert(U256::from(1), coldkey, 10000); + Stake::::insert(U256::from(2), coldkey, 10000); + Stake::::insert(U256::from(3), coldkey, 10000); // First run let first_weight = run_migration_and_check(migration_name); @@ -405,14 +414,13 @@ fn test_migrate_fix_total_coldkey_stake_starts_with_value_no_stake_map_entries() let weight = run_migration_and_check(migration_name); assert!(weight != Weight::zero()); // Therefore 0 - assert_eq!(TotalColdkeyStake::::get(coldkey), 0); + assert_eq!(TotalColdkeyStake::::get(coldkey), 123_456_789); }) } fn run_migration_and_check(migration_name: &'static str) -> frame_support::weights::Weight { // Execute the migration and store its weight - let weight: frame_support::weights::Weight = - pallet_subtensor::migration::migrate_fix_total_coldkey_stake::(); + let weight: frame_support::weights::Weight = pallet_subtensor::migrations::migrate_fix_total_coldkey_stake::migrate_fix_total_coldkey_stake::(); // Check if the migration has been marked as completed assert!(HasMigrationRun::::get( diff --git a/pallets/subtensor/tests/mock.rs b/pallets/subtensor/tests/mock.rs index 06bca8aff..aa93c3531 100644 --- a/pallets/subtensor/tests/mock.rs +++ b/pallets/subtensor/tests/mock.rs @@ -2,11 +2,10 @@ use frame_support::derive_impl; use frame_support::dispatch::DispatchResultWithPostInfo; use frame_support::weights::constants::RocksDbWeight; -// use frame_support::weights::constants::WEIGHT_PER_SECOND; use frame_support::weights::Weight; use frame_support::{ assert_ok, parameter_types, - traits::{Everything, Hooks}, + traits::{Everything, Hooks, PrivilegeCmp}, }; use frame_system as system; use frame_system::{limits, EnsureNever, EnsureRoot, RawOrigin}; @@ -17,6 +16,7 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; +use sp_std::cmp::Ordering; type Block = frame_system::mocking::MockBlock; @@ -32,6 +32,8 @@ frame_support::construct_runtime!( SenateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config}, SubtensorModule: pallet_subtensor::{Pallet, Call, Storage, Event}, Utility: pallet_utility::{Pallet, Call, Storage, Event}, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event}, } ); @@ -78,7 +80,6 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type MaxReserves = (); type ReserveIdentifier = (); - type RuntimeHoldReason = (); type FreezeIdentifier = (); type MaxFreezes = (); @@ -131,12 +132,16 @@ parameter_types! { pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialStakePruningMin: u16 = 0; pub const InitialFoundationDistribution: u64 = 0; - pub const InitialDefaultTake: u16 = 11_796; // 18%, same as in production - pub const InitialMinTake: u16 =5_898; // 9%; + pub const InitialDefaultDelegateTake: u16 = 11_796; // 18%, same as in production + pub const InitialMinDelegateTake: u16 = 5_898; // 9%; + pub const InitialDefaultChildKeyTake: u16 = 0 ;// 0 % + pub const InitialMinChildKeyTake: u16 = 0; // 0 %; + pub const InitialMaxChildKeyTake: u16 = 11_796; // 18 %; pub const InitialWeightsVersionKey: u16 = 0; pub const InitialServingRateLimit: u64 = 0; // No limit. pub const InitialTxRateLimit: u64 = 0; // Disable rate limit for testing pub const InitialTxDelegateTakeRateLimit: u64 = 1; // 1 block take rate limit for testing + pub const InitialTxChildKeyTakeRateLimit: u64 = 1; // 1 block take rate limit for testing pub const InitialBurn: u64 = 0; pub const InitialMinBurn: u64 = 0; pub const InitialMaxBurn: u64 = 1_000_000_000; @@ -168,7 +173,10 @@ parameter_types! { pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn - pub const SubtensorInitialBaseDifficulty: u64 = 10_000; // Base difficulty + pub const InitialHotkeyEmissionTempo: u64 = 0; // Defaults to draining every block. + pub const InitialNetworkMaxStake: u64 = u64::MAX; // Maximum possible value for u64 + pub const InitialColdkeySwapScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days + pub const InitialDissolveNetworkScheduleDuration: u64 = 5 * 24 * 60 * 60 / 12; // Default as 5 days } // Configure collective pallet for council @@ -262,7 +270,7 @@ impl CollectiveInterface for TriumvirateVotes { } // We call pallet_collective TriumvirateCollective -#[allow(unused)] +#[allow(dead_code)] type TriumvirateCollective = pallet_collective::Instance1; impl pallet_collective::Config for Test { type RuntimeOrigin = RuntimeOrigin; @@ -280,7 +288,7 @@ impl pallet_collective::Config for Test { } // We call council members Triumvirate -#[allow(unused)] +#[allow(dead_code)] type TriumvirateMembership = pallet_membership::Instance1; impl pallet_membership::Config for Test { type RuntimeEvent = RuntimeEvent; @@ -297,7 +305,7 @@ impl pallet_membership::Config for Test { // This is a dummy collective instance for managing senate members // Probably not the best solution, but fastest implementation -#[allow(unused)] +#[allow(dead_code)] type SenateCollective = pallet_collective::Instance2; impl pallet_collective::Config for Test { type RuntimeOrigin = RuntimeOrigin; @@ -315,7 +323,7 @@ impl pallet_collective::Config for Test { } // We call our top K delegates membership Senate -#[allow(unused)] +#[allow(dead_code)] type SenateMembership = pallet_membership::Instance2; impl pallet_membership::Config for Test { type RuntimeEvent = RuntimeEvent; @@ -332,13 +340,14 @@ impl pallet_membership::Config for Test { impl pallet_subtensor::Config for Test { type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type Currency = Balances; type InitialIssuance = InitialIssuance; type SudoRuntimeCall = TestRuntimeCall; type CouncilOrigin = frame_system::EnsureSigned; type SenateMembers = ManageSenateMembers; type TriumvirateInterface = TriumvirateVotes; - + type Scheduler = Scheduler; type InitialMinAllowedWeights = InitialMinAllowedWeights; type InitialEmissionValue = InitialEmissionValue; type InitialMaxWeightsLimit = InitialMaxWeightsLimit; @@ -358,8 +367,12 @@ impl pallet_subtensor::Config for Test { type InitialPruningScore = InitialPruningScore; type InitialBondsMovingAverage = InitialBondsMovingAverage; type InitialMaxAllowedValidators = InitialMaxAllowedValidators; - type InitialDefaultTake = InitialDefaultTake; - type InitialMinTake = InitialMinTake; + type InitialDefaultDelegateTake = InitialDefaultDelegateTake; + type InitialMinDelegateTake = InitialMinDelegateTake; + type InitialDefaultChildKeyTake = InitialDefaultChildKeyTake; + type InitialMinChildKeyTake = InitialMinChildKeyTake; + type InitialMaxChildKeyTake = InitialMaxChildKeyTake; + type InitialTxChildKeyTakeRateLimit = InitialTxChildKeyTakeRateLimit; type InitialWeightsVersionKey = InitialWeightsVersionKey; type InitialMaxDifficulty = InitialMaxDifficulty; type InitialMinDifficulty = InitialMinDifficulty; @@ -383,7 +396,39 @@ impl pallet_subtensor::Config for Test { type AlphaHigh = InitialAlphaHigh; type AlphaLow = InitialAlphaLow; type LiquidAlphaOn = InitialLiquidAlphaOn; - type InitialBaseDifficulty = SubtensorInitialBaseDifficulty; + type InitialHotkeyEmissionTempo = InitialHotkeyEmissionTempo; + type InitialNetworkMaxStake = InitialNetworkMaxStake; + type Preimages = Preimage; + type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; +} + +pub struct OriginPrivilegeCmp; + +impl PrivilegeCmp for OriginPrivilegeCmp { + fn cmp_privilege(_left: &OriginCaller, _right: &OriginCaller) -> Option { + Some(Ordering::Less) + } +} + +parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + BlockWeights::get().max_block; + pub const MaxScheduledPerBlock: u32 = 50; + pub const NoPreimagePostponement: Option = Some(10); +} + +impl pallet_scheduler::Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type RuntimeCall = RuntimeCall; + type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureRoot; + type MaxScheduledPerBlock = MaxScheduledPerBlock; + type WeightInfo = pallet_scheduler::weights::SubstrateWeight; + type OriginPrivilegeCmp = OriginPrivilegeCmp; + type Preimages = Preimage; } impl pallet_utility::Config for Test { @@ -393,6 +438,20 @@ impl pallet_utility::Config for Test { type WeightInfo = pallet_utility::weights::SubstrateWeight; } +parameter_types! { + pub const PreimageMaxSize: u32 = 4096 * 1024; + pub const PreimageBaseDeposit: Balance = 1; + pub const PreimageByteDeposit: Balance = 1; +} + +impl pallet_preimage::Config for Test { + type WeightInfo = pallet_preimage::weights::SubstrateWeight; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type ManagerOrigin = EnsureRoot; + type Consideration = (); +} + #[allow(dead_code)] // Build genesis storage according to the mock runtime. pub fn new_test_ext(block_number: BlockNumber) -> sp_io::TestExternalities { @@ -427,22 +486,30 @@ pub fn test_ext_with_balances(balances: Vec<(U256, u128)>) -> sp_io::TestExterna #[allow(dead_code)] pub(crate) fn step_block(n: u16) { for _ in 0..n { + Scheduler::on_finalize(System::block_number()); SubtensorModule::on_finalize(System::block_number()); System::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); SubtensorModule::on_initialize(System::block_number()); + Scheduler::on_initialize(System::block_number()); } } #[allow(dead_code)] pub(crate) fn run_to_block(n: u64) { while System::block_number() < n { + Scheduler::on_finalize(System::block_number()); SubtensorModule::on_finalize(System::block_number()); System::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); + System::events().iter().for_each(|event| { + log::info!("Event: {:?}", event.event); + }); + System::reset_events(); SubtensorModule::on_initialize(System::block_number()); + Scheduler::on_initialize(System::block_number()); } } @@ -498,3 +565,21 @@ pub fn add_network(netuid: u16, tempo: u16, _modality: u16) { SubtensorModule::set_network_registration_allowed(netuid, true); SubtensorModule::set_network_pow_registration_allowed(netuid, true); } + +// Helper function to set up a neuron with stake +#[allow(dead_code)] +pub fn setup_neuron_with_stake(netuid: u16, hotkey: U256, coldkey: U256, stake: u64) { + register_ok_neuron(netuid, hotkey, coldkey, stake); + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey, &hotkey, stake); +} + +// Helper function to check if a value is within tolerance of an expected value +#[allow(dead_code)] +pub fn is_within_tolerance(actual: u64, expected: u64, tolerance: u64) -> bool { + let difference = if actual > expected { + actual - expected + } else { + expected - actual + }; + difference <= tolerance +} diff --git a/pallets/subtensor/tests/networks.rs b/pallets/subtensor/tests/networks.rs index 93e563683..3d3644236 100644 --- a/pallets/subtensor/tests/networks.rs +++ b/pallets/subtensor/tests/networks.rs @@ -1,420 +1,284 @@ -// DEPRECATED mod mock; -// use frame_support::{ -// assert_ok, -// dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, -// sp_std::vec, -// }; -// use frame_system::Config; -// use frame_system::{EventRecord, Phase}; -// use mock::*; -// use pallet_subtensor::Error; -// use sp_core::{H256, U256}; - -// #[allow(dead_code)] -// fn record(event: RuntimeEvent) -> EventRecord { -// EventRecord { -// phase: Phase::Initialization, -// event, -// topics: vec![], -// } -// } - -// /*TO DO SAM: write test for LatuUpdate after it is set */ -// // --- add network tests ---- -// #[test] -// fn test_add_network_dispatch_info_ok() { -// new_test_ext().execute_with(|| { -// let netuid: u16 = 1; -// let modality = 0; -// let tempo: u16 = 13; -// let call = RuntimeCall::SubtensorModule(SubtensorCall::sudo_add_network { -// netuid, -// tempo, -// modality, -// }); -// assert_eq!( -// call.get_dispatch_info(), -// DispatchInfo { -// weight: frame_support::weights::Weight::from_parts(50000000, 0), -// class: DispatchClass::Operational, -// pays_fee: Pays::No -// } -// ); -// }); -// } - -// #[test] -// fn test_add_network() { -// new_test_ext().execute_with(|| { -// let modality = 0; -// let tempo: u16 = 13; -// add_network(10, tempo, modality); -// assert_eq!(SubtensorModule::get_number_of_subnets(), 1); -// add_network(20, tempo, modality); -// assert_eq!(SubtensorModule::get_number_of_subnets(), 2); -// }); -// } - -// #[test] -// fn test_add_network_check_tempo() { -// new_test_ext().execute_with(|| { -// let modality = 0; -// let tempo: u16 = 13; -// assert_eq!(SubtensorModule::get_tempo(1), 0); -// add_network(1, tempo, modality); -// assert_eq!(SubtensorModule::get_tempo(1), 13); -// }); -// } - -// #[test] -// fn test_clear_min_allowed_weight_for_network() { -// new_test_ext().execute_with(|| { -// let netuid: u16 = 1; -// let min_allowed_weight = 2; -// let tempo: u16 = 13; -// add_network(netuid, tempo, 0); -// register_ok_neuron(1, U256::from(55), U256::from(66), 0); -// SubtensorModule::set_min_allowed_weights(netuid, min_allowed_weight); -// assert_eq!(SubtensorModule::get_min_allowed_weights(netuid), 2); -// assert_ok!(SubtensorModule::do_remove_network( -// <::RuntimeOrigin>::root(), -// netuid -// )); -// assert_eq!(SubtensorModule::get_min_allowed_weights(netuid), 0); -// }); -// } - -// #[test] -// fn test_remove_uid_for_network() { -// new_test_ext().execute_with(|| { -// let netuid: u16 = 1; -// let tempo: u16 = 13; -// add_network(netuid, tempo, 0); -// register_ok_neuron(1, U256::from(55), U256::from(66), 0); -// let neuron_id; -// match SubtensorModule::get_uid_for_net_and_hotkey(netuid, &U256::from(55)) { -// Ok(k) => neuron_id = k, -// Err(e) => panic!("Error: {:?}", e), -// } -// assert!(SubtensorModule::get_uid_for_net_and_hotkey(netuid, &U256::from(55)).is_ok()); -// assert_eq!(neuron_id, 0); -// register_ok_neuron(1, U256::from(56), U256::from(67), 300000); -// let neuron_uid = -// SubtensorModule::get_uid_for_net_and_hotkey(netuid, &U256::from(56)).unwrap(); -// assert_eq!(neuron_uid, 1); -// assert_ok!(SubtensorModule::do_remove_network( -// <::RuntimeOrigin>::root(), -// netuid -// )); -// assert!(SubtensorModule::get_uid_for_net_and_hotkey(netuid, &U256::from(55)).is_err()); -// }); -// } - -// #[test] -// fn test_remove_difficulty_for_network() { -// new_test_ext().execute_with(|| { -// let netuid: u16 = 1; -// let difficulty: u64 = 10; -// let tempo: u16 = 13; -// add_network(netuid, tempo, 0); -// register_ok_neuron(1, U256::from(55), U256::from(66), 0); -// assert_ok!(SubtensorModule::sudo_set_difficulty( -// <::RuntimeOrigin>::root(), -// netuid, -// difficulty -// )); -// assert_eq!(SubtensorModule::get_difficulty_as_u64(netuid), difficulty); -// assert_ok!(SubtensorModule::do_remove_network( -// <::RuntimeOrigin>::root(), -// netuid -// )); -// assert_eq!(SubtensorModule::get_difficulty_as_u64(netuid), 10000); -// }); -// } - -// #[test] -// fn test_remove_network_for_all_hotkeys() { -// new_test_ext().execute_with(|| { -// let netuid: u16 = 1; -// let tempo: u16 = 13; -// add_network(netuid, tempo, 0); -// register_ok_neuron(1, U256::from(55), U256::from(66), 0); -// register_ok_neuron(1, U256::from(77), U256::from(88), 65536); -// assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 2); -// assert_ok!(SubtensorModule::do_remove_network( -// <::RuntimeOrigin>::root(), -// netuid -// )); -// assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 0); -// }); -// } - -// #[test] -// fn test_network_set_default_value_for_other_parameters() { -// new_test_ext().execute_with(|| { -// let netuid: u16 = 1; -// let tempo: u16 = 13; -// add_network(netuid, tempo, 0); -// assert_eq!(SubtensorModule::get_min_allowed_weights(netuid), 0); -// assert_eq!(SubtensorModule::get_emission_value(netuid), 0); -// assert_eq!(SubtensorModule::get_max_weight_limit(netuid), u16::MAX); -// assert_eq!(SubtensorModule::get_difficulty_as_u64(netuid), 10000); -// assert_eq!(SubtensorModule::get_immunity_period(netuid), 2); -// }); -// } - -// // --- Set Emission Ratios Tests -// #[test] -// fn test_network_set_emission_ratios_dispatch_info_ok() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2]; -// let emission: Vec = vec![100000000, 900000000]; -// let call = RuntimeCall::SubtensorModule(SubtensorCall::sudo_set_emission_values { -// netuids, -// emission, -// }); -// assert_eq!( -// call.get_dispatch_info(), -// DispatchInfo { -// weight: frame_support::weights::Weight::from_parts(28000000, 0), -// class: DispatchClass::Operational, -// pays_fee: Pays::No -// } -// ); -// }); -// } - -// #[test] -// fn test_network_set_emission_ratios_ok() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2]; -// let emission: Vec = vec![100000000, 900000000]; -// add_network(1, 0, 0); -// add_network(2, 0, 0); -// assert_ok!(SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// )); -// }); -// } - -// #[test] -// fn test_network_set_emission_ratios_fail_summation() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2]; -// let emission: Vec = vec![100000000, 910000000]; -// add_network(1, 0, 0); -// add_network(2, 0, 0); -// assert_eq!( -// SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// ), -// Err(Error::::InvalidEmissionValues.into()) -// ); -// }); -// } - -// #[test] -// fn test_network_set_emission_invalid_netuids() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2]; -// let emission: Vec = vec![100000000, 900000000]; -// add_network(1, 0, 0); -// assert_eq!( -// SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// ), -// Err(Error::::IncorrectNetuidsLength.into()) -// ); -// }); -// } - -// #[test] -// fn test_network_set_emission_ratios_fail_net() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2]; -// let emission: Vec = vec![100000000, 900000000]; -// add_network(1, 0, 0); -// add_network(3, 0, 0); -// assert_eq!( -// SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// ), -// Err(Error::::UidVecContainInvalidOne.into()) -// ); -// }); -// } - -// #[test] -// fn test_add_difficulty_fail() { -// new_test_ext().execute_with(|| { -// let netuid: u16 = 1; -// assert_eq!( -// SubtensorModule::sudo_set_difficulty( -// <::RuntimeOrigin>::root(), -// netuid, -// 120000 -// ), -// Err(Error::::NetworkDoesNotExist.into()) -// ); -// }); -// } - -// #[test] -// fn test_multi_tempo_with_emission() { -// new_test_ext().execute_with(|| { -// let netuid: u16 = 1; -// assert_eq!( -// SubtensorModule::sudo_set_difficulty( -// <::RuntimeOrigin>::root(), -// netuid, -// 120000 -// ), -// Err(Error::::NetworkDoesNotExist.into()) -// ); -// }); -// } - -// #[test] -// // Required by the test otherwise it would panic if compiled in debug mode -// #[allow(arithmetic_overflow)] -// fn test_set_emission_values_errors_on_emission_sum_overflow() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2]; -// // u64(u64::MAX + 1..000..1) equals to 1_000_000_000 which is the same as -// // the value of Self::get_block_emission() expected by the extrinsic -// let emission: Vec = vec![u64::MAX, 1_000_000_001]; -// add_network(1, 0, 0); -// add_network(2, 0, 0); -// assert_eq!( -// SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// ), -// Err(Error::::InvalidEmissionValues.into()) -// ); -// }); -// } - -// #[test] -// #[allow(arithmetic_overflow)] -// fn test_set_emission_values_no_errors() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2]; -// let emission: Vec = vec![600_000_000, 400_000_000]; - -// add_network(1, 0, 0); -// add_network(2, 0, 0); -// assert_eq!( -// SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// ), -// Ok(()) -// ); -// }); -// } - -// #[test] -// // Required by the test otherwise it would panic if compiled in debug mode -// #[allow(arithmetic_overflow)] -// fn test_set_emission_values_sum_too_large() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2]; -// // u64(1_000_000_000 + 1) equals to 1_000_000_001 which is more than -// // the value of Self::get_block_emission() expected by the extrinsic -// let emission: Vec = vec![1_000_000_000, 1]; -// add_network(1, 0, 0); -// add_network(2, 0, 0); -// assert_eq!( -// SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// ), -// Err(Error::::InvalidEmissionValues.into()) -// ); -// }); -// } - -// #[test] -// // Required by the test otherwise it would panic if compiled in debug mode -// #[allow(arithmetic_overflow)] -// fn test_set_emission_values_sum_too_small() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2]; -// // u64(1 + 2_000) equals to 2_001 which is LESS than -// // the value of Self::get_block_emission() expected by the extrinsic -// let emission: Vec = vec![1, 2_000]; -// add_network(1, 0, 0); -// add_network(2, 0, 0); -// assert_eq!( -// SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// ), -// Err(Error::::InvalidEmissionValues.into()) -// ); -// }); -// } - -// #[test] -// fn test_set_emission_values_too_many_netuids() { -// new_test_ext().execute_with(|| { -// let netuids: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - -// // Sums to 1_000_000_000 and has 10 elements -// let emission: Vec = vec![1_000_000_000, 0, 0, 0, 0, 0, 0, 0, 0, 0]; -// add_network(1, 0, 0); -// add_network(2, 0, 0); -// // We only add 2 networks, so this should fail -// assert_eq!( -// SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// ), -// Err(Error::::IncorrectNetuidsLength.into()) -// ); -// }); -// } - -// #[test] -// fn test_set_emission_values_over_u16_max_values() { -// new_test_ext().execute_with(|| { -// // Make vec of u16 with length 2^16 + 2 -// let netuids: Vec = vec![0; 0x10002]; -// // This is greater than u16::MAX -// assert!(netuids.len() > u16::MAX as usize); -// // On cast to u16, this will be 2 -// assert!(netuids.len() as u16 == 2); - -// // Sums to 1_000_000_000 and the length is 65536 -// let mut emission: Vec = vec![0; netuids.len()]; -// emission[0] = 1_000_000_000; - -// add_network(1, 0, 0); -// add_network(2, 0, 0); -// // We only add 2 networks, so this should fail -// // but if we cast to u16 during length comparison, -// // the length will be 2 and the check will pass -// assert_eq!( -// SubtensorModule::sudo_set_emission_values( -// <::RuntimeOrigin>::root(), -// netuids, -// emission -// ), -// Err(Error::::IncorrectNetuidsLength.into()) -// ); -// }); -// } +use crate::mock::*; +use frame_support::assert_ok; +use frame_system::Config; +use pallet_subtensor::{ColdkeySwapScheduleDuration, DissolveNetworkScheduleDuration, Event}; +use sp_core::U256; + +mod mock; + +#[test] +fn test_registration_ok() { + new_test_ext(1).execute_with(|| { + let block_number: u64 = 0; + let netuid: u16 = 2; + let tempo: u16 = 13; + let hotkey_account_id: U256 = U256::from(1); + let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 129123813, + &hotkey_account_id, + ); + + //add network + add_network(netuid, tempo, 0); + + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + block_number, + nonce, + work.clone(), + hotkey_account_id, + coldkey_account_id + )); + + assert_ok!(SubtensorModule::user_remove_network( + coldkey_account_id, + netuid + )); + + assert!(!SubtensorModule::if_subnet_exist(netuid)) + }) +} + +#[test] +fn test_schedule_dissolve_network_execution() { + new_test_ext(1).execute_with(|| { + let block_number: u64 = 0; + let netuid: u16 = 2; + let tempo: u16 = 13; + let hotkey_account_id: U256 = U256::from(1); + let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 129123813, + &hotkey_account_id, + ); + + //add network + add_network(netuid, tempo, 0); + + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + block_number, + nonce, + work.clone(), + hotkey_account_id, + coldkey_account_id + )); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + assert_ok!(SubtensorModule::schedule_dissolve_network( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid + )); + + let current_block = System::block_number(); + let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); + + System::assert_last_event( + Event::DissolveNetworkScheduled { + account: coldkey_account_id, + netuid, + execution_block, + } + .into(), + ); + + run_to_block(execution_block); + assert!(!SubtensorModule::if_subnet_exist(netuid)); + }) +} + +#[test] +fn test_non_owner_schedule_dissolve_network_execution() { + new_test_ext(1).execute_with(|| { + let block_number: u64 = 0; + let netuid: u16 = 2; + let tempo: u16 = 13; + let hotkey_account_id: U256 = U256::from(1); + let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har + let non_network_owner_account_id = U256::from(2); // + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 129123813, + &hotkey_account_id, + ); + + //add network + add_network(netuid, tempo, 0); + + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + block_number, + nonce, + work.clone(), + hotkey_account_id, + coldkey_account_id + )); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + assert_ok!(SubtensorModule::schedule_dissolve_network( + <::RuntimeOrigin>::signed(non_network_owner_account_id), + netuid + )); + + let current_block = System::block_number(); + let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); + + System::assert_last_event( + Event::DissolveNetworkScheduled { + account: non_network_owner_account_id, + netuid, + execution_block, + } + .into(), + ); + + run_to_block(execution_block); + // network exists since the caller is no the network owner + assert!(SubtensorModule::if_subnet_exist(netuid)); + }) +} + +#[test] +fn test_new_owner_schedule_dissolve_network_execution() { + new_test_ext(1).execute_with(|| { + let block_number: u64 = 0; + let netuid: u16 = 2; + let tempo: u16 = 13; + let hotkey_account_id: U256 = U256::from(1); + let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har + let new_network_owner_account_id = U256::from(2); // + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 129123813, + &hotkey_account_id, + ); + + //add network + add_network(netuid, tempo, 0); + + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + block_number, + nonce, + work.clone(), + hotkey_account_id, + coldkey_account_id + )); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // the account is not network owner when schedule the call + assert_ok!(SubtensorModule::schedule_dissolve_network( + <::RuntimeOrigin>::signed(new_network_owner_account_id), + netuid + )); + + let current_block = System::block_number(); + let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); + + System::assert_last_event( + Event::DissolveNetworkScheduled { + account: new_network_owner_account_id, + netuid, + execution_block, + } + .into(), + ); + run_to_block(current_block + 1); + // become network owner after call scheduled + pallet_subtensor::SubnetOwner::::insert(netuid, new_network_owner_account_id); + + run_to_block(execution_block); + // network exists since the caller is no the network owner + assert!(!SubtensorModule::if_subnet_exist(netuid)); + }) +} + +#[test] +fn test_schedule_dissolve_network_execution_with_coldkey_swap() { + new_test_ext(1).execute_with(|| { + let block_number: u64 = 0; + let netuid: u16 = 2; + let tempo: u16 = 13; + let hotkey_account_id: U256 = U256::from(1); + let coldkey_account_id = U256::from(0); // Neighbour of the beast, har har + let new_network_owner_account_id = U256::from(2); // + + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 1000000000000000); + + let (nonce, work): (u64, Vec) = SubtensorModule::create_work_for_block_number( + netuid, + block_number, + 129123813, + &hotkey_account_id, + ); + + //add network + add_network(netuid, tempo, 0); + + assert_ok!(SubtensorModule::register( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + block_number, + nonce, + work.clone(), + hotkey_account_id, + coldkey_account_id + )); + + assert!(SubtensorModule::if_subnet_exist(netuid)); + + // the account is not network owner when schedule the call + assert_ok!(SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(coldkey_account_id), + new_network_owner_account_id + )); + + let current_block = System::block_number(); + let execution_block = current_block + ColdkeySwapScheduleDuration::::get(); + + run_to_block(execution_block - 1); + + // the account is not network owner when schedule the call + assert_ok!(SubtensorModule::schedule_dissolve_network( + <::RuntimeOrigin>::signed(new_network_owner_account_id), + netuid + )); + + System::assert_last_event( + Event::DissolveNetworkScheduled { + account: new_network_owner_account_id, + netuid, + execution_block: DissolveNetworkScheduleDuration::::get() + execution_block + - 1, + } + .into(), + ); + + run_to_block(execution_block); + assert_eq!( + pallet_subtensor::SubnetOwner::::get(netuid), + new_network_owner_account_id + ); + + let current_block = System::block_number(); + let execution_block = current_block + DissolveNetworkScheduleDuration::::get(); + + run_to_block(execution_block); + // network exists since the caller is no the network owner + assert!(!SubtensorModule::if_subnet_exist(netuid)); + }) +} diff --git a/pallets/subtensor/tests/registration.rs b/pallets/subtensor/tests/registration.rs index 7d6e8ea65..9cea45bc5 100644 --- a/pallets/subtensor/tests/registration.rs +++ b/pallets/subtensor/tests/registration.rs @@ -276,7 +276,7 @@ fn test_registration_rate_limit_exceeded() { let result = extension.validate(&who, &call.into(), &info, 10); // Expectation: The transaction should be rejected - assert_err!(result, InvalidTransaction::ExhaustsResources); + assert_err!(result, InvalidTransaction::Custom(5)); let current_registrants = SubtensorModule::get_registrations_this_interval(netuid); assert!(current_registrants <= max_registrants); @@ -360,10 +360,7 @@ fn test_burned_registration_rate_limit_exceeded() { extension.validate(&who, &call_burned_register.into(), &info, 10); // Expectation: The transaction should be rejected - assert_err!( - burned_register_result, - InvalidTransaction::ExhaustsResources - ); + assert_err!(burned_register_result, InvalidTransaction::Custom(5)); let current_registrants = SubtensorModule::get_registrations_this_interval(netuid); assert!(current_registrants <= max_registrants); @@ -538,6 +535,122 @@ fn test_burn_adjustment() { }); } +#[test] +fn test_burn_registration_pruning_scenarios() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let burn_cost = 1000; + let coldkey_account_id = U256::from(667); + let max_allowed_uids = 6; + let immunity_period = 5000; + + const IS_IMMUNE: bool = true; + const NOT_IMMUNE: bool = false; + + // Initial setup + SubtensorModule::set_burn(netuid, burn_cost); + SubtensorModule::set_max_allowed_uids(netuid, max_allowed_uids); + SubtensorModule::set_target_registrations_per_interval(netuid, max_allowed_uids); + SubtensorModule::set_immunity_period(netuid, immunity_period); + + add_network(netuid, tempo, 0); + + let mint_balance = burn_cost * u64::from(max_allowed_uids) + 1_000_000_000; + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, mint_balance); + + // Register first half of neurons + for i in 0..3 { + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + U256::from(i) + )); + step_block(1); + } + + // Note: pruning score is set to u16::MAX after getting neuron to prune + + // 1. Test if all immune neurons + assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 0), IS_IMMUNE); + assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 1), IS_IMMUNE); + assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 2), IS_IMMUNE); + + SubtensorModule::set_pruning_score_for_uid(netuid, 0, 100); + SubtensorModule::set_pruning_score_for_uid(netuid, 1, 75); + SubtensorModule::set_pruning_score_for_uid(netuid, 2, 50); + + // The immune neuron with the lowest score should be pruned + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 2); + + // 2. Test tie-breaking for immune neurons + SubtensorModule::set_pruning_score_for_uid(netuid, 1, 50); + SubtensorModule::set_pruning_score_for_uid(netuid, 2, 50); + + // Should get the oldest neuron (i.e., neuron that was registered first) + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); + + // 3. Test if no immune neurons + step_block(immunity_period); + + // ensure all neurons are non-immune + assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 0), NOT_IMMUNE); + assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 1), NOT_IMMUNE); + assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 2), NOT_IMMUNE); + + SubtensorModule::set_pruning_score_for_uid(netuid, 0, 100); + SubtensorModule::set_pruning_score_for_uid(netuid, 1, 50); + SubtensorModule::set_pruning_score_for_uid(netuid, 2, 75); + + // The non-immune neuron with the lowest score should be pruned + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); + + // 4. Test tie-breaking for non-immune neurons + SubtensorModule::set_pruning_score_for_uid(netuid, 1, 50); + SubtensorModule::set_pruning_score_for_uid(netuid, 2, 50); + + // Should get the oldest non-immune neuron + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); + + // 5. Test mixed immunity + // Register second batch of neurons (these will be non-immune) + for i in 3..6 { + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + U256::from(i) + )); + step_block(1); + } + + // Ensure all new neurons are immune + assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 3), IS_IMMUNE); + assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 4), IS_IMMUNE); + assert_eq!(SubtensorModule::get_neuron_is_immune(netuid, 5), IS_IMMUNE); + + // Set pruning scores for all neurons + SubtensorModule::set_pruning_score_for_uid(netuid, 0, 75); // non-immune + SubtensorModule::set_pruning_score_for_uid(netuid, 1, 50); // non-immune + SubtensorModule::set_pruning_score_for_uid(netuid, 2, 60); // non-immune + SubtensorModule::set_pruning_score_for_uid(netuid, 3, 40); // immune + SubtensorModule::set_pruning_score_for_uid(netuid, 4, 55); // immune + SubtensorModule::set_pruning_score_for_uid(netuid, 5, 45); // immune + + // The non-immune neuron with the lowest score should be pruned + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); + + // If we remove the lowest non-immune neuron, it should choose the next lowest non-immune + SubtensorModule::set_pruning_score_for_uid(netuid, 1, u16::MAX); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 2); + + // If we make all non-immune neurons have high scores, it should choose the oldest non-immune neuron + SubtensorModule::set_pruning_score_for_uid(netuid, 0, u16::MAX); + SubtensorModule::set_pruning_score_for_uid(netuid, 1, u16::MAX); + SubtensorModule::set_pruning_score_for_uid(netuid, 2, u16::MAX); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 0); + }); +} + #[test] fn test_registration_too_many_registrations_per_block() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/tests/root.rs b/pallets/subtensor/tests/root.rs index 7c6622670..caf1e5935 100644 --- a/pallets/subtensor/tests/root.rs +++ b/pallets/subtensor/tests/root.rs @@ -4,8 +4,9 @@ use crate::mock::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; use frame_system::{EventRecord, Phase}; -use pallet_subtensor::migration; use pallet_subtensor::Error; +use pallet_subtensor::{migrations, SubnetIdentity}; +use pallet_subtensor::{SubnetIdentities, SubnetIdentityOf}; use sp_core::{Get, H256, U256}; mod mock; @@ -22,7 +23,7 @@ fn record(event: RuntimeEvent) -> EventRecord { #[test] fn test_root_register_network_exist() { new_test_ext(1).execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let hotkey_account_id: U256 = U256::from(1); let coldkey_account_id = U256::from(667); assert_ok!(SubtensorModule::root_register( @@ -32,6 +33,7 @@ fn test_root_register_network_exist() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test root -- test_set_weights_not_root_error --exact --nocapture #[test] fn test_set_weights_not_root_error() { new_test_ext(0).execute_with(|| { @@ -60,10 +62,11 @@ fn test_set_weights_not_root_error() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test root -- test_root_register_normal_on_root_fails --exact --nocapture #[test] fn test_root_register_normal_on_root_fails() { new_test_ext(1).execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); // Test fails because normal registrations are not allowed // on the root network. let root_netuid: u16 = 0; @@ -104,10 +107,11 @@ fn test_root_register_normal_on_root_fails() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test root -- test_root_register_stake_based_pruning_works --exact --nocapture #[test] fn test_root_register_stake_based_pruning_works() { new_test_ext(1).execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); // Add two networks. let root_netuid: u16 = 0; let other_netuid: u16 = 1; @@ -192,11 +196,12 @@ fn test_root_register_stake_based_pruning_works() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test root -- test_root_set_weights --exact --nocapture #[test] fn test_root_set_weights() { new_test_ext(1).execute_with(|| { System::set_block_number(0); - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let n: usize = 10; let root_netuid: u16 = 0; @@ -231,7 +236,7 @@ fn test_root_set_weights() { for netuid in 1..n { log::debug!("Adding network with netuid: {}", netuid); assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(U256::from(netuid + 456)) + <::RuntimeOrigin>::signed(U256::from(netuid + 456)), )); } @@ -334,11 +339,12 @@ fn test_root_set_weights() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test root -- test_root_set_weights --exact --nocapture #[test] fn test_root_set_weights_out_of_order_netuids() { new_test_ext(1).execute_with(|| { System::set_block_number(0); - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let n: usize = 10; let root_netuid: u16 = 0; @@ -375,7 +381,7 @@ fn test_root_set_weights_out_of_order_netuids() { if netuid % 2 == 0 { assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(U256::from(netuid)) + <::RuntimeOrigin>::signed(U256::from(netuid)), )); } else { add_network(netuid as u16 * 10, 1000, 0) @@ -458,7 +464,7 @@ fn test_root_set_weights_out_of_order_netuids() { fn test_root_subnet_creation_deletion() { new_test_ext(1).execute_with(|| { System::set_block_number(0); - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); // Owner of subnets. let owner: U256 = U256::from(0); @@ -466,14 +472,14 @@ fn test_root_subnet_creation_deletion() { SubtensorModule::add_balance_to_coldkey_account(&owner, 1_000_000_000_000_000); // last_lock: 100000000000, min_lock: 100000000000, last_lock_block: 0, lock_reduction_interval: 2, current_block: 0, mult: 1 lock_cost: 100000000000 assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); // last_lock: 100000000000, min_lock: 100000000000, last_lock_block: 0, lock_reduction_interval: 2, current_block: 0, mult: 1 lock_cost: 100000000000 assert_eq!(SubtensorModule::get_network_lock_cost(), 100_000_000_000); step_block(1); // last_lock: 100000000000, min_lock: 100000000000, last_lock_block: 0, lock_reduction_interval: 2, current_block: 1, mult: 1 lock_cost: 100000000000 assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); // last_lock: 100000000000, min_lock: 100000000000, last_lock_block: 1, lock_reduction_interval: 2, current_block: 1, mult: 2 lock_cost: 200000000000 assert_eq!(SubtensorModule::get_network_lock_cost(), 200_000_000_000); // Doubles from previous subnet creation @@ -487,38 +493,38 @@ fn test_root_subnet_creation_deletion() { // last_lock: 100000000000, min_lock: 100000000000, last_lock_block: 1, lock_reduction_interval: 2, current_block: 4, mult: 2 lock_cost: 100000000000 assert_eq!(SubtensorModule::get_network_lock_cost(), 100_000_000_000); // Reaches min value assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); // last_lock: 100000000000, min_lock: 100000000000, last_lock_block: 4, lock_reduction_interval: 2, current_block: 4, mult: 2 lock_cost: 200000000000 assert_eq!(SubtensorModule::get_network_lock_cost(), 200_000_000_000); // Doubles from previous subnet creation step_block(1); // last_lock: 100000000000, min_lock: 100000000000, last_lock_block: 4, lock_reduction_interval: 2, current_block: 5, mult: 2 lock_cost: 150000000000 assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); // last_lock: 150000000000, min_lock: 100000000000, last_lock_block: 5, lock_reduction_interval: 2, current_block: 5, mult: 2 lock_cost: 300000000000 assert_eq!(SubtensorModule::get_network_lock_cost(), 300_000_000_000); // Doubles from previous subnet creation step_block(1); // last_lock: 150000000000, min_lock: 100000000000, last_lock_block: 5, lock_reduction_interval: 2, current_block: 6, mult: 2 lock_cost: 225000000000 assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); // last_lock: 225000000000, min_lock: 100000000000, last_lock_block: 6, lock_reduction_interval: 2, current_block: 6, mult: 2 lock_cost: 450000000000 assert_eq!(SubtensorModule::get_network_lock_cost(), 450_000_000_000); // Increasing step_block(1); // last_lock: 225000000000, min_lock: 100000000000, last_lock_block: 6, lock_reduction_interval: 2, current_block: 7, mult: 2 lock_cost: 337500000000 assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); // last_lock: 337500000000, min_lock: 100000000000, last_lock_block: 7, lock_reduction_interval: 2, current_block: 7, mult: 2 lock_cost: 675000000000 assert_eq!(SubtensorModule::get_network_lock_cost(), 675_000_000_000); // Increasing. assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); // last_lock: 337500000000, min_lock: 100000000000, last_lock_block: 7, lock_reduction_interval: 2, current_block: 7, mult: 2 lock_cost: 675000000000 assert_eq!(SubtensorModule::get_network_lock_cost(), 1_350_000_000_000); // Double increasing. assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); assert_eq!(SubtensorModule::get_network_lock_cost(), 2_700_000_000_000); // Double increasing again. @@ -538,7 +544,7 @@ fn test_root_subnet_creation_deletion() { fn test_network_pruning() { new_test_ext(1).execute_with(|| { System::set_block_number(0); - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); assert_eq!(SubtensorModule::get_total_issuance(), 0); @@ -567,7 +573,7 @@ fn test_network_pruning() { 1_000 )); assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(cold) + <::RuntimeOrigin>::signed(cold), )); log::debug!("Adding network with netuid: {}", (i as u16) + 1); assert!(SubtensorModule::if_subnet_exist((i as u16) + 1)); @@ -630,7 +636,7 @@ fn test_network_pruning() { #[test] fn test_network_prune_results() { new_test_ext(1).execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); SubtensorModule::set_network_immunity_period(3); SubtensorModule::set_network_min_lock(0); @@ -640,17 +646,17 @@ fn test_network_prune_results() { SubtensorModule::add_balance_to_coldkey_account(&owner, 1_000_000_000_000_000); assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); step_block(3); assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); step_block(3); assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(owner) + <::RuntimeOrigin>::signed(owner), )); step_block(3); @@ -671,7 +677,7 @@ fn test_network_prune_results() { #[test] fn test_weights_after_network_pruning() { new_test_ext(1).execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); assert_eq!(SubtensorModule::get_total_issuance(), 0); @@ -694,7 +700,7 @@ fn test_weights_after_network_pruning() { // Register a network assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(cold) + <::RuntimeOrigin>::signed(cold), )); log::debug!("Adding network with netuid: {}", (i as u16) + 1); @@ -754,7 +760,7 @@ fn test_weights_after_network_pruning() { assert_eq!(latest_weights[0][1], 21845); assert_ok!(SubtensorModule::register_network( - <::RuntimeOrigin>::signed(cold) + <::RuntimeOrigin>::signed(cold), )); // Subnet should not exist, as it would replace a previous subnet. @@ -909,7 +915,8 @@ fn test_dissolve_network_ok() { assert!(SubtensorModule::if_subnet_exist(netuid)); assert_ok!(SubtensorModule::dissolve_network( - RuntimeOrigin::signed(owner_coldkey), + RuntimeOrigin::root(), + owner_coldkey, netuid )); assert!(!SubtensorModule::if_subnet_exist(netuid)) @@ -932,7 +939,8 @@ fn test_dissolve_network_refund_coldkey_ok() { assert!(SubtensorModule::if_subnet_exist(netuid)); assert_ok!(SubtensorModule::dissolve_network( - RuntimeOrigin::signed(owner_coldkey), + RuntimeOrigin::root(), + owner_coldkey, netuid )); assert!(!SubtensorModule::if_subnet_exist(netuid)); @@ -956,7 +964,7 @@ fn test_dissolve_network_not_owner_err() { register_ok_neuron(netuid, hotkey, owner_coldkey, 3); assert_err!( - SubtensorModule::dissolve_network(RuntimeOrigin::signed(random_coldkey), netuid), + SubtensorModule::dissolve_network(RuntimeOrigin::root(), random_coldkey, netuid), Error::::NotSubnetOwner ); }); @@ -969,8 +977,78 @@ fn test_dissolve_network_does_not_exist_err() { let coldkey = U256::from(2); assert_err!( - SubtensorModule::dissolve_network(RuntimeOrigin::signed(coldkey), netuid), + SubtensorModule::dissolve_network(RuntimeOrigin::root(), coldkey, netuid), Error::::SubNetworkDoesNotExist ); }); } + +#[test] +fn test_user_add_network_with_identity_fields_ok() { + new_test_ext(1).execute_with(|| { + let coldkey_1 = U256::from(1); + let coldkey_2 = U256::from(2); + let balance_1 = SubtensorModule::get_network_lock_cost() + 10_000; + + let subnet_name_1: Vec = b"GenericSubnet1".to_vec(); + let github_repo_1: Vec = b"GenericSubnet1.com".to_vec(); + let subnet_contact_1: Vec = b"https://www.GenericSubnet1.co".to_vec(); + + let identity_value_1: SubnetIdentity = SubnetIdentityOf { + subnet_name: subnet_name_1.clone(), + github_repo: github_repo_1.clone(), + subnet_contact: subnet_contact_1.clone(), + }; + + let subnet_name_2: Vec = b"DistinctSubnet2".to_vec(); + let github_repo_2: Vec = b"https://github.com/DistinctRepo2".to_vec(); + let subnet_contact_2: Vec = b"https://contact2.example.com".to_vec(); + + let identity_value_2: SubnetIdentity = SubnetIdentityOf { + subnet_name: subnet_name_2.clone(), + github_repo: github_repo_2.clone(), + subnet_contact: subnet_contact_2.clone(), + }; + + SubtensorModule::add_balance_to_coldkey_account(&coldkey_1, balance_1); + + assert_ok!(SubtensorModule::user_add_network( + RuntimeOrigin::signed(coldkey_1), + Some(identity_value_1.clone()) + )); + + let balance_2 = SubtensorModule::get_network_lock_cost() + 10_000; + SubtensorModule::add_balance_to_coldkey_account(&coldkey_2, balance_2); + + assert_ok!(SubtensorModule::user_add_network( + RuntimeOrigin::signed(coldkey_2), + Some(identity_value_2.clone()) + )); + + let stored_identity_1: SubnetIdentity = SubnetIdentities::::get(1).unwrap(); + assert_eq!(stored_identity_1.subnet_name, subnet_name_1); + assert_eq!(stored_identity_1.github_repo, github_repo_1); + assert_eq!(stored_identity_1.subnet_contact, subnet_contact_1); + + let stored_identity_2: SubnetIdentity = SubnetIdentities::::get(2).unwrap(); + assert_eq!(stored_identity_2.subnet_name, subnet_name_2); + assert_eq!(stored_identity_2.github_repo, github_repo_2); + assert_eq!(stored_identity_2.subnet_contact, subnet_contact_2); + + // Now remove the first network. + assert_ok!(SubtensorModule::user_remove_network(coldkey_1, 1)); + + // Verify that the first network and identity have been removed. + assert!(SubnetIdentities::::get(1).is_none()); + + // Ensure the second network and identity are still intact. + let stored_identity_2_after_removal: SubnetIdentity = + SubnetIdentities::::get(2).unwrap(); + assert_eq!(stored_identity_2_after_removal.subnet_name, subnet_name_2); + assert_eq!(stored_identity_2_after_removal.github_repo, github_repo_2); + assert_eq!( + stored_identity_2_after_removal.subnet_contact, + subnet_contact_2 + ); + }); +} diff --git a/pallets/subtensor/tests/senate.rs b/pallets/subtensor/tests/senate.rs index bcec1a63a..e1f33db5e 100644 --- a/pallets/subtensor/tests/senate.rs +++ b/pallets/subtensor/tests/senate.rs @@ -15,7 +15,7 @@ use sp_runtime::{ use frame_system::pallet_prelude::*; use frame_system::Config; use pallet_collective::Event as CollectiveEvent; -use pallet_subtensor::migration; +use pallet_subtensor::migrations; use pallet_subtensor::Error; pub fn new_test_ext() -> sp_io::TestExternalities { @@ -57,7 +57,7 @@ fn record(event: RuntimeEvent) -> EventRecord { #[test] fn test_senate_join_works() { new_test_ext().execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let netuid: u16 = 1; let tempo: u16 = 13; @@ -125,7 +125,7 @@ fn test_senate_join_works() { #[test] fn test_senate_vote_works() { new_test_ext().execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let netuid: u16 = 1; let tempo: u16 = 13; @@ -233,7 +233,7 @@ fn test_senate_vote_works() { #[test] fn test_senate_vote_not_member() { new_test_ext().execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let netuid: u16 = 1; let tempo: u16 = 13; @@ -294,7 +294,7 @@ fn test_senate_vote_not_member() { #[test] fn test_senate_leave_works() { new_test_ext().execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let netuid: u16 = 1; let tempo: u16 = 13; @@ -362,7 +362,7 @@ fn test_senate_leave_works() { #[test] fn test_senate_leave_vote_removal() { new_test_ext().execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let netuid: u16 = 1; let tempo: u16 = 13; @@ -501,7 +501,7 @@ fn test_senate_leave_vote_removal() { #[test] fn test_senate_not_leave_when_stake_removed() { new_test_ext().execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let netuid: u16 = 1; let tempo: u16 = 13; @@ -582,7 +582,7 @@ fn test_senate_not_leave_when_stake_removed() { fn test_senate_join_current_delegate() { // Test that a current delegate can join the senate new_test_ext().execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let netuid: u16 = 1; let tempo: u16 = 13; @@ -656,7 +656,7 @@ fn test_senate_join_current_delegate() { fn test_adjust_senate_events() { // Test the events emitted after adjusting the senate successfully new_test_ext().execute_with(|| { - migration::migrate_create_root_network::(); + migrations::migrate_create_root_network::migrate_create_root_network::(); let netuid: u16 = 1; let tempo: u16 = 13; diff --git a/pallets/subtensor/tests/serving.rs b/pallets/subtensor/tests/serving.rs index b87b7fd10..49a963951 100644 --- a/pallets/subtensor/tests/serving.rs +++ b/pallets/subtensor/tests/serving.rs @@ -1,11 +1,14 @@ use crate::mock::*; mod mock; +use frame_support::assert_noop; +use frame_support::pallet_prelude::Weight; use frame_support::{ assert_ok, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, }; use frame_system::Config; use pallet_subtensor::Error; +use pallet_subtensor::*; use sp_core::U256; mod test { @@ -548,3 +551,466 @@ fn test_serving_is_invalid_ipv6_address() { )); }); } + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test serving -- test_do_set_identity --exact --nocapture +#[test] +fn test_do_set_identity() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid = 1; + + // Register a hotkey for the coldkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Prepare identity data + let name = b"Alice".to_vec(); + let url = b"https://alice.com".to_vec(); + let image = b"alice.jpg".to_vec(); + let discord = b"alice#1234".to_vec(); + let description = b"Alice's identity".to_vec(); + let additional = b"Additional info".to_vec(); + + // Set identity + assert_ok!(SubtensorModule::do_set_identity( + <::RuntimeOrigin>::signed(coldkey), + name.clone(), + url.clone(), + image.clone(), + discord.clone(), + description.clone(), + additional.clone() + )); + + // Check if identity is set correctly + let stored_identity = Identities::::get(coldkey).expect("Identity should be set"); + assert_eq!(stored_identity.name, name); + assert_eq!(stored_identity.url, url); + assert_eq!(stored_identity.image, image); + assert_eq!(stored_identity.discord, discord); + assert_eq!(stored_identity.description, description); + assert_eq!(stored_identity.additional, additional); + + // Test setting identity with no registered hotkey + let coldkey_without_hotkey = U256::from(3); + assert_noop!( + SubtensorModule::do_set_identity( + <::RuntimeOrigin>::signed(coldkey_without_hotkey), + name.clone(), + url.clone(), + image.clone(), + discord.clone(), + description.clone(), + additional.clone() + ), + Error::::HotKeyNotRegisteredInNetwork + ); + + // Test updating an existing identity + let new_name = b"Alice Updated".to_vec(); + let new_url = b"https://alice-updated.com".to_vec(); + assert_ok!(SubtensorModule::do_set_identity( + <::RuntimeOrigin>::signed(coldkey), + new_name.clone(), + new_url.clone(), + image.clone(), + discord.clone(), + description.clone(), + additional.clone() + )); + + let updated_identity = + Identities::::get(coldkey).expect("Updated identity should be set"); + assert_eq!(updated_identity.name, new_name); + assert_eq!(updated_identity.url, new_url); + + // Test setting identity with invalid data (exceeding 512 bytes total) + let long_data = vec![0; 513]; + assert_noop!( + SubtensorModule::do_set_identity( + <::RuntimeOrigin>::signed(coldkey), + long_data.clone(), + long_data.clone(), + long_data.clone(), + long_data.clone(), + long_data.clone(), + long_data.clone() + ), + Error::::InvalidIdentity + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test serving -- test_is_valid_identity --exact --nocapture +#[test] +fn test_is_valid_identity() { + new_test_ext(1).execute_with(|| { + // Test valid identity + let valid_identity = ChainIdentity { + name: vec![0; 256], + url: vec![0; 256], + image: vec![0; 1024], + discord: vec![0; 256], + description: vec![0; 1024], + additional: vec![0; 1024], + }; + assert!(SubtensorModule::is_valid_identity(&valid_identity)); + + // Test identity with total length exactly at the maximum + let max_length_identity = ChainIdentity { + name: vec![0; 256], + url: vec![0; 256], + image: vec![0; 1024], + discord: vec![0; 256], + description: vec![0; 1024], + additional: vec![0; 1024], + }; + assert!(SubtensorModule::is_valid_identity(&max_length_identity)); + + // Test identity with total length exceeding the maximum + let invalid_length_identity = ChainIdentity { + name: vec![0; 257], + url: vec![0; 256], + image: vec![0; 1024], + discord: vec![0; 256], + description: vec![0; 1024], + additional: vec![0; 1024], + }; + assert!(!SubtensorModule::is_valid_identity( + &invalid_length_identity + )); + + // Test identity with one field exceeding its maximum + let invalid_field_identity = ChainIdentity { + name: vec![0; 257], + url: vec![0; 256], + image: vec![0; 1024], + discord: vec![0; 256], + description: vec![0; 1024], + additional: vec![0; 1024], + }; + assert!(!SubtensorModule::is_valid_identity(&invalid_field_identity)); + + // Test identity with empty fields + let empty_identity = ChainIdentity { + name: vec![], + url: vec![], + image: vec![], + discord: vec![], + description: vec![], + additional: vec![], + }; + assert!(SubtensorModule::is_valid_identity(&empty_identity)); + + // Test identity with some empty and some filled fields + let mixed_identity = ChainIdentity { + name: b"Alice".to_vec(), + url: b"https://alice.com".to_vec(), + image: vec![], + discord: b"alice#1234".to_vec(), + description: vec![], + additional: b"Additional info".to_vec(), + }; + assert!(SubtensorModule::is_valid_identity(&mixed_identity)); + + // Test identity with all fields at maximum allowed length + let max_field_identity = ChainIdentity { + name: vec![0; 256], + url: vec![0; 256], + image: vec![0; 1024], + discord: vec![0; 256], + description: vec![0; 1024], + additional: vec![0; 1024], + }; + assert!(SubtensorModule::is_valid_identity(&max_field_identity)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test serving -- test_set_and_get_identity --exact --nocapture +#[test] +fn test_set_and_get_identity() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid = 1; + + // Register a hotkey for the coldkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Prepare identity data + let name = b"Bob".to_vec(); + let url = b"https://bob.com".to_vec(); + let image = b"bob.jpg".to_vec(); + let discord = b"bob#5678".to_vec(); + let description = b"Bob's identity".to_vec(); + let additional = b"More about Bob".to_vec(); + + // Set identity + assert_ok!(SubtensorModule::do_set_identity( + <::RuntimeOrigin>::signed(coldkey), + name.clone(), + url.clone(), + image.clone(), + discord.clone(), + description.clone(), + additional.clone() + )); + + // Get and verify identity + let stored_identity = Identities::::get(coldkey).expect("Identity should be set"); + assert_eq!(stored_identity.name, name); + assert_eq!(stored_identity.url, url); + assert_eq!(stored_identity.image, image); + assert_eq!(stored_identity.discord, discord); + assert_eq!(stored_identity.description, description); + assert_eq!(stored_identity.additional, additional); + + // Update identity + let new_name = b"Bobby".to_vec(); + let new_url = b"https://bobby.com".to_vec(); + assert_ok!(SubtensorModule::do_set_identity( + <::RuntimeOrigin>::signed(coldkey), + new_name.clone(), + new_url.clone(), + image.clone(), + discord.clone(), + description.clone(), + additional.clone() + )); + + // Get and verify updated identity + let updated_identity = + Identities::::get(coldkey).expect("Updated identity should be set"); + assert_eq!(updated_identity.name, new_name); + assert_eq!(updated_identity.url, new_url); + assert_eq!(updated_identity.image, image); + assert_eq!(updated_identity.discord, discord); + assert_eq!(updated_identity.description, description); + assert_eq!(updated_identity.additional, additional); + + // Verify non-existent identity + let non_existent_coldkey = U256::from(999); + assert!(Identities::::get(non_existent_coldkey).is_none()); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test serving -- test_migrate_set_hotkey_identities --exact --nocapture +#[test] +fn test_migrate_set_hotkey_identities() { + new_test_ext(1).execute_with(|| { + // Run the migration + let weight = + pallet_subtensor::migrations::migrate_chain_identity::migrate_set_hotkey_identities::< + Test, + >(); + + // Assert that the migration has run + assert!(HasMigrationRun::::get(b"migrate_identities".to_vec())); + + // Verify that some identities were set + // Note: This assumes that at least one valid identity was in the JSON file + let mut identity_count = 0; + for (_, _) in Identities::::iter() { + identity_count += 1; + } + assert!( + identity_count > 0, + "No identities were set during migration" + ); + + // Verify that the weight is non-zero + assert!( + weight != Weight::zero(), + "Migration weight should be non-zero" + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test serving -- test_do_set_subnet_identity --exact --nocapture +#[test] +fn test_do_set_subnet_identity() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid = 1; + + // Register a hotkey for the coldkey + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + + // Set coldkey as the owner of the subnet + SubnetOwner::::insert(netuid, coldkey); + + // Prepare subnet identity data + let subnet_name = b"Test Subnet".to_vec(); + let github_repo = b"https://github.com/test/subnet".to_vec(); + let subnet_contact = b"contact@testsubnet.com".to_vec(); + + // Set subnet identity + assert_ok!(SubtensorModule::do_set_subnet_identity( + <::RuntimeOrigin>::signed(coldkey), + netuid, + subnet_name.clone(), + github_repo.clone(), + subnet_contact.clone() + )); + + // Check if subnet identity is set correctly + let stored_identity = + SubnetIdentities::::get(netuid).expect("Subnet identity should be set"); + assert_eq!(stored_identity.subnet_name, subnet_name); + assert_eq!(stored_identity.github_repo, github_repo); + assert_eq!(stored_identity.subnet_contact, subnet_contact); + + // Test setting subnet identity by non-owner + let non_owner_coldkey = U256::from(2); + assert_noop!( + SubtensorModule::do_set_subnet_identity( + <::RuntimeOrigin>::signed(non_owner_coldkey), + netuid, + subnet_name.clone(), + github_repo.clone(), + subnet_contact.clone() + ), + Error::::NotSubnetOwner + ); + + // Test updating an existing subnet identity + let new_subnet_name = b"Updated Subnet".to_vec(); + let new_github_repo = b"https://github.com/test/subnet-updated".to_vec(); + assert_ok!(SubtensorModule::do_set_subnet_identity( + <::RuntimeOrigin>::signed(coldkey), + netuid, + new_subnet_name.clone(), + new_github_repo.clone(), + subnet_contact.clone() + )); + + let updated_identity = + SubnetIdentities::::get(netuid).expect("Updated subnet identity should be set"); + assert_eq!(updated_identity.subnet_name, new_subnet_name); + assert_eq!(updated_identity.github_repo, new_github_repo); + + // Test setting subnet identity with invalid data (exceeding 1024 bytes total) + let long_data = vec![0; 1025]; + assert_noop!( + SubtensorModule::do_set_subnet_identity( + <::RuntimeOrigin>::signed(coldkey), + netuid, + long_data.clone(), + long_data.clone(), + long_data.clone() + ), + Error::::InvalidIdentity + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test serving -- test_is_valid_subnet_identity --exact --nocapture +#[test] +fn test_is_valid_subnet_identity() { + new_test_ext(1).execute_with(|| { + // Test valid subnet identity + let valid_identity = SubnetIdentity { + subnet_name: vec![0; 256], + github_repo: vec![0; 1024], + subnet_contact: vec![0; 1024], + }; + assert!(SubtensorModule::is_valid_subnet_identity(&valid_identity)); + + // Test subnet identity with total length exactly at the maximum + let max_length_identity = SubnetIdentity { + subnet_name: vec![0; 256], + github_repo: vec![0; 1024], + subnet_contact: vec![0; 1024], + }; + assert!(SubtensorModule::is_valid_subnet_identity( + &max_length_identity + )); + + // Test subnet identity with total length exceeding the maximum + let invalid_length_identity = SubnetIdentity { + subnet_name: vec![0; 257], + github_repo: vec![0; 1024], + subnet_contact: vec![0; 1024], + }; + assert!(!SubtensorModule::is_valid_subnet_identity( + &invalid_length_identity + )); + + // Test subnet identity with one field exceeding its maximum + let invalid_field_identity = SubnetIdentity { + subnet_name: vec![0; 257], + github_repo: vec![0; 1024], + subnet_contact: vec![0; 1024], + }; + assert!(!SubtensorModule::is_valid_subnet_identity( + &invalid_field_identity + )); + + // Test subnet identity with empty fields + let empty_identity = SubnetIdentity { + subnet_name: vec![], + github_repo: vec![], + subnet_contact: vec![], + }; + assert!(SubtensorModule::is_valid_subnet_identity(&empty_identity)); + + // Test subnet identity with some empty and some filled fields + let mixed_identity = SubnetIdentity { + subnet_name: b"Test Subnet".to_vec(), + github_repo: vec![], + subnet_contact: b"contact@testsubnet.com".to_vec(), + }; + assert!(SubtensorModule::is_valid_subnet_identity(&mixed_identity)); + }); +} + +#[test] +fn test_set_identity_for_non_existent_subnet() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let netuid = 999; // Non-existent subnet ID + + // Subnet identity data + let subnet_name = b"Non-existent Subnet".to_vec(); + let github_repo = b"https://github.com/test/nonexistent".to_vec(); + let subnet_contact = b"contact@nonexistent.com".to_vec(); + + // Attempt to set identity for a non-existent subnet + assert_noop!( + SubtensorModule::do_set_subnet_identity( + <::RuntimeOrigin>::signed(coldkey), + netuid, + subnet_name.clone(), + github_repo.clone(), + subnet_contact.clone() + ), + Error::::NotSubnetOwner // Since there's no owner, it should fail + ); + }); +} + +#[test] +fn test_set_subnet_identity_dispatch_info_ok() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let subnet_name: Vec = b"JesusSubnet".to_vec(); + let github_repo: Vec = b"bible.com".to_vec(); + let subnet_contact: Vec = b"https://www.vatican.va".to_vec(); + + let call: RuntimeCall = RuntimeCall::SubtensorModule(SubtensorCall::set_subnet_identity { + netuid, + subnet_name, + github_repo, + subnet_contact, + }); + + let dispatch_info: DispatchInfo = call.get_dispatch_info(); + + assert_eq!(dispatch_info.class, DispatchClass::Normal); + assert_eq!(dispatch_info.pays_fee, Pays::Yes); + }); +} diff --git a/pallets/subtensor/tests/staking.rs b/pallets/subtensor/tests/staking.rs index 12d299d8f..f053c7ca6 100644 --- a/pallets/subtensor/tests/staking.rs +++ b/pallets/subtensor/tests/staking.rs @@ -1,21 +1,14 @@ #![allow(clippy::unwrap_used)] #![allow(clippy::arithmetic_side_effects)] -use frame_support::pallet_prelude::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, -}; -use frame_support::traits::{OnFinalize, OnIdle, OnInitialize}; -use frame_support::weights::Weight; use frame_support::{assert_err, assert_noop, assert_ok, traits::Currency}; use frame_system::Config; mod mock; use frame_support::dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}; use frame_support::sp_runtime::DispatchError; use mock::*; -use pallet_balances::Call as BalancesCall; use pallet_subtensor::*; use sp_core::{H256, U256}; -use sp_runtime::traits::SignedExtension; /*********************************************************** staking::add_stake() tests @@ -1194,3003 +1187,867 @@ fn test_delegate_stake_division_by_zero_check() { <::RuntimeOrigin>::signed(coldkey), hotkey )); - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey, 0, 1000); }); } -#[test] -fn test_full_with_delegating() { - new_test_ext(1).execute_with(|| { - let netuid = 1; - // Make two accounts. - let hotkey0 = U256::from(1); - let hotkey1 = U256::from(2); - - let coldkey0 = U256::from(3); - let coldkey1 = U256::from(4); - add_network(netuid, 0, 0); - SubtensorModule::set_max_registrations_per_block(netuid, 4); - SubtensorModule::set_target_registrations_per_interval(netuid, 4); - SubtensorModule::set_max_allowed_uids(netuid, 4); // Allow all 4 to be registered at once - SubtensorModule::set_target_stakes_per_interval(10); // Increase max stakes per interval - - // Neither key can add stake because they dont have fundss. - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 60000 - ), - Err(Error::::NotEnoughBalanceToStake.into()) - ); - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - 60000 - ), - Err(Error::::NotEnoughBalanceToStake.into()) - ); - - // Add balances. - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 60000); - SubtensorModule::add_balance_to_coldkey_account(&coldkey1, 60000); - - // We have enough, but the keys are not registered. - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 100 - ), - Err(Error::::HotKeyAccountNotExists.into()) - ); - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 100 - ), - Err(Error::::HotKeyAccountNotExists.into()) - ); - - // Cant remove either. - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 10 - ), - Err(Error::::HotKeyAccountNotExists.into()) - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - 10 - ), - Err(Error::::HotKeyAccountNotExists.into()) - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey1, - 10 - ), - Err(Error::::HotKeyAccountNotExists.into()) - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey0, - 10 - ), - Err(Error::::HotKeyAccountNotExists.into()) - ); - - // Neither key can become a delegate either because we are not registered. - assert_eq!( - SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 100 - ), - Err(Error::::HotKeyAccountNotExists.into()) - ); - assert_eq!( - SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 100 - ), - Err(Error::::HotKeyAccountNotExists.into()) - ); - - // Register the 2 neurons to a new network. - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - register_ok_neuron(netuid, hotkey1, coldkey1, 987907); - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey0), - coldkey0 - ); - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey1), - coldkey1 - ); - assert!(SubtensorModule::coldkey_owns_hotkey(&coldkey0, &hotkey0)); - assert!(SubtensorModule::coldkey_owns_hotkey(&coldkey1, &hotkey1)); - - // We try to delegate stake but niether are allowing delegation. - assert!(!SubtensorModule::hotkey_is_delegate(&hotkey0)); - assert!(!SubtensorModule::hotkey_is_delegate(&hotkey1)); - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey1, - 100 - ), - Err(Error::::HotKeyNotDelegateAndSignerNotOwnHotKey.into()) - ); - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey0, - 100 - ), - Err(Error::::HotKeyNotDelegateAndSignerNotOwnHotKey.into()) - ); - - // We stake and all is ok. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - 100 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 100 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 100 - ); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey0), 100); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 100); - //assert_eq!( SubtensorModule::get_total_stake_for_coldkey( &coldkey0 ), 100 ); - //assert_eq!( SubtensorModule::get_total_stake_for_coldkey( &coldkey1 ), 100 ); - assert_eq!(SubtensorModule::get_total_stake(), 200); - - // Cant remove these funds because we are not delegating. - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey1, - 10 - ), - Err(Error::::HotKeyNotDelegateAndSignerNotOwnHotKey.into()) - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey0, - 10 - ), - Err(Error::::HotKeyNotDelegateAndSignerNotOwnHotKey.into()) - ); - - // Emit inflation through non delegates. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 0, 100); - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 0, 100); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey0), 200); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 200); - - // Try allowing the keys to become delegates, fails because of incorrect coldkeys. - // Set take to be 0. - assert_eq!( - SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey1, - 0 - ), - Err(Error::::NonAssociatedColdKey.into()) - ); - assert_eq!( - SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey1), - hotkey0, - 0 - ), - Err(Error::::NonAssociatedColdKey.into()) - ); - - // Become delegates all is ok. - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - )); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - SubtensorModule::get_min_take() - )); - assert!(SubtensorModule::hotkey_is_delegate(&hotkey0)); - assert!(SubtensorModule::hotkey_is_delegate(&hotkey1)); - - // Cant become a delegate twice. - assert_eq!( - SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - ), - Err(Error::::HotKeyAlreadyDelegate.into()) - ); - assert_eq!( - SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - u16::MAX / 10 - ), - Err(Error::::HotKeyAlreadyDelegate.into()) - ); - - // This add stake works for delegates. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 200 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 200 - ); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey1, - 200 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey0, - 300 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 200 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 200 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 300 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 200 - ); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey0), 500); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 400); - //assert_eq!( SubtensorModule::get_total_stake_for_coldkey( &coldkey0 ), 400 ); - //assert_eq!( SubtensorModule::get_total_stake_for_coldkey( &coldkey1 ), 500 ); - assert_eq!(SubtensorModule::get_total_stake(), 900); - - // Lets emit inflation through the hot and coldkeys. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 0, 1000); - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 0, 1000); - - // validator_take = take * validator_emission = 10% * 1000 = 100 - // old_stake + (validator_emission - validator_take) * stake_for_coldkey_and_hotkey / total_stake_for_hotkey + validator_take - // = - // 200 + 900 * 200 / 500 + 100 = 660 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 654 - ); - // validator_take = take * validator_emission = 9% * 1000 = 90 - // old_stake + (validator_emission - validator_take) * stake_for_coldkey_and_hotkey / total_stake_for_hotkey - // = - // 200 + (1000 - 90) * 200 / 400 = 655 ~ 654 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 655 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 846 - ); // 300 + 910 x ( 300 / 500 ) = 300 + 546 = 846 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 745 - ); // 200 + 1090 x ( 200 / 400 ) = 300 + 545 = 745 - assert_eq!(SubtensorModule::get_total_stake(), 2900); // 600 + 700 + 900 + 750 = 2900 - - // // Try unstaking too much. - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 100000 - ), - Err(Error::::NotEnoughStakeToWithdraw.into()) - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - 100000 - ), - Err(Error::::NotEnoughStakeToWithdraw.into()) - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey1, - 100000 - ), - Err(Error::::NotEnoughStakeToWithdraw.into()) - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey0, - 100000 - ), - Err(Error::::NotEnoughStakeToWithdraw.into()) - ); - - // unstaking is ok. - assert_ok!(SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 100 - )); - assert_ok!(SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - 100 - )); - assert_ok!(SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey1, - 100 - )); - assert_ok!(SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey0, - 100 - )); - - // All the amounts have been decreased. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 554 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 555 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 746 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 645 - ); - - // Lets register and stake a new key. - let hotkey2 = U256::from(5); - let coldkey2 = U256::from(6); - register_ok_neuron(netuid, hotkey2, coldkey2, 248_123); - assert!(SubtensorModule::is_hotkey_registered_on_any_network( - &hotkey0 - )); - assert!(SubtensorModule::is_hotkey_registered_on_any_network( - &hotkey1 - )); - - SubtensorModule::add_balance_to_coldkey_account(&coldkey2, 60_000); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey2, - 1000 - )); - assert_ok!(SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey2, - 100 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey2), - 900 - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey2, - 10 - ), - Err(Error::::HotKeyNotDelegateAndSignerNotOwnHotKey.into()) - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey2, - 10 - ), - Err(Error::::HotKeyNotDelegateAndSignerNotOwnHotKey.into()) - ); - - // Lets make this new key a delegate with a 10% take. - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey2), - hotkey2, - SubtensorModule::get_min_take() - )); - - // Add nominate some stake. - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey2, - 1_000 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey2, - 1_000 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey2, - 100 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey2), - 1_000 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey2), - 1_000 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey2), - 1_000 - ); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey2), 3_000); - assert_eq!(SubtensorModule::get_total_stake(), 5_500); - - // Lets emit inflation through this new key with distributed ownership. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey2, 0, 1000); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey2), - 1_394 - ); // 1000 + 94 + 900 * (1000/3000) = 1400 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey2), - 1_303 - ); // 1000 + 900 * (1000/3000) = 1300 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey2), - 1_303 - ); // 1000 + 900 * (1000/3000) = 1300 - assert_eq!(SubtensorModule::get_total_stake(), 6_500); // before + 1_000 = 5_500 + 1_000 = 6_500 - - step_block(1); - - // Lets register and stake a new key. - let hotkey3 = U256::from(7); - let coldkey3 = U256::from(8); - register_ok_neuron(netuid, hotkey3, coldkey3, 4124124); - SubtensorModule::add_balance_to_coldkey_account(&coldkey3, 60000); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey3), - hotkey3, - 1000 - )); - - step_block(3); - - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey3), - hotkey3, - SubtensorModule::get_min_take() - )); // Full take. - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey3, - 1000 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey3, - 1000 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey3, - 1000 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey3), - 1000 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey3), - 1000 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey3), - 1000 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey3, &hotkey3), - 1000 - ); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey3), 4000); - assert_eq!(SubtensorModule::get_total_stake(), 10_500); - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey3, 0, 1000); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey3), - 1227 - ); // 1000 + 90% * 1000 * 1000/4000 = 1225 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey3), - 1227 - ); // 1000 + 90% * 1000 * 1000/4000 = 1225 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey3), - 1227 - ); // 1000 + 90% * 1000 * 1000/4000 = 1225 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey3, &hotkey3), - 1319 - ); // 1000 + 25 * 3 + 1000 * 1000/4000 = 1325 - assert_eq!(SubtensorModule::get_total_stake(), 11_500); // before + 1_000 = 10_500 + 1_000 = 11_500 - }); -} - -// Verify delegates with servers get the full server inflation. -#[test] -fn test_full_with_delegating_some_servers() { - new_test_ext(1).execute_with(|| { - let netuid = 1; - // Make two accounts. - let hotkey0 = U256::from(1); - let hotkey1 = U256::from(2); - - let coldkey0 = U256::from(3); - let coldkey1 = U256::from(4); - SubtensorModule::set_max_registrations_per_block(netuid, 4); - SubtensorModule::set_max_allowed_uids(netuid, 10); // Allow at least 10 to be registered at once, so no unstaking occurs - SubtensorModule::set_target_stakes_per_interval(10); // Increase max stakes per interval - - // Neither key can add stake because they dont have fundss. - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 60000 - ), - Err(Error::::NotEnoughBalanceToStake.into()) - ); - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - 60000 - ), - Err(Error::::NotEnoughBalanceToStake.into()) - ); - - // Add balances. - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 60000); - SubtensorModule::add_balance_to_coldkey_account(&coldkey1, 60000); - - // Register the 2 neurons to a new network. - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - register_ok_neuron(netuid, hotkey1, coldkey1, 987907); - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey0), - coldkey0 - ); - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey1), - coldkey1 - ); - assert!(SubtensorModule::coldkey_owns_hotkey(&coldkey0, &hotkey0)); - assert!(SubtensorModule::coldkey_owns_hotkey(&coldkey1, &hotkey1)); - - // We stake and all is ok. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - 100 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 100 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 100 - ); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey0), 100); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 100); - assert_eq!(SubtensorModule::get_total_stake(), 200); - - // Emit inflation through non delegates. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 0, 100); - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 0, 100); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey0), 200); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 200); - - // Become delegates all is ok. - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - )); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - SubtensorModule::get_min_take() - )); - assert!(SubtensorModule::hotkey_is_delegate(&hotkey0)); - assert!(SubtensorModule::hotkey_is_delegate(&hotkey1)); - - // This add stake works for delegates. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 200 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 200 - ); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey1, - 200 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey0, - 300 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 200 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 200 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 300 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 200 - ); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey0), 500); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 400); - assert_eq!(SubtensorModule::get_total_stake(), 900); - - // Lets emit inflation through the hot and coldkeys. - // fist emission arg is for a server. This should only go to the owner of the hotkey. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 200, 1_000); // 1_200 total emission. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 123, 2_000); // 2_123 total emission. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 854 - ); // 200 + (200 + 910 x ( 200 / 500 )) = 200 + (200 + 400) + 60 = 854 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 846 - ); // 300 + 910 x ( 300 / 500 ) = 300 + 546 = 846 - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey0), 1_700); // initial + server emission + validator emission = 799 + 899 = 1_698 - - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 1_110 - ); // 200 + (0 + 2000 x ( 200 / 400 )) - 100 = 200 + (1000) - 100= 1_110 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 1_413 - ); // 200 + (123 + 2000 x ( 200 / 400 )) + 100 = 200 + (1_200)+ 100 = 1_423 - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 2_523); // 400 + 2_123 - assert_eq!(SubtensorModule::get_total_stake(), 4_223); // 2_100 + 2_123 = 4_223 - - // Lets emit MORE inflation through the hot and coldkeys. - // This time only server emission. This should go to the owner of the hotkey. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 350, 0); - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 150, 0); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 1_204 - ); // + 350 + 54 = 1_204 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 1_110 - ); // No change. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 846 - ); // No change. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 1_563 - ); // 1_323 + 150 + 90 = 1_573 - assert_eq!(SubtensorModule::get_total_stake(), 4_723); // 4_223 + 500 = 4_823 - - // Lets register and stake a new key. - let hotkey2 = U256::from(5); - let coldkey2 = U256::from(6); - register_ok_neuron(netuid, hotkey2, coldkey2, 248123); - SubtensorModule::add_balance_to_coldkey_account(&coldkey2, 60_000); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey2, - 1_000 - )); - assert_ok!(SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey2, - 100 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey2), - 900 - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey2, - 10 - ), - Err(Error::::HotKeyNotDelegateAndSignerNotOwnHotKey.into()) - ); - assert_eq!( - SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey2, - 10 - ), - Err(Error::::HotKeyNotDelegateAndSignerNotOwnHotKey.into()) - ); - - assert_eq!(SubtensorModule::get_total_stake(), 5_623); // 4_723 + 900 = 5_623 - - // Lets make this new key a delegate with a 9% take. - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey2), - hotkey2, - SubtensorModule::get_min_take() - )); - - // Add nominate some stake. - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey2, - 1000 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey2, - 1000 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey2, - 100 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey2), - 1000 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey2), - 1000 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey2), - 1000 - ); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey2), 3_000); - assert_eq!(SubtensorModule::get_total_stake(), 7_723); // 5_623 + (1_000 + 1_000 + 100) = 7_723 - - // Lets emit inflation through this new key with distributed ownership. - // We will emit 100 server emission, which should go in-full to the owner of the hotkey. - // We will emit 1000 validator emission, which should be distributed in-part to the nominators. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey2, 100, 1000); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey2), - 1_494 - ); // 1000 + 100 + 94 + 900 * (1000/3000) = 1000 + 200 + 300 = 1_494 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey2), - 1_303 - ); // 1000 + 900 * (1000/3000) = 1000 + 300 = 1_303 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey2), - 1_303 - ); // 1000 + 900 * (1000/3000) = 1000 + 300 = 1300 - assert_eq!(SubtensorModule::get_total_stake(), 8_823); // 7_723 + 1_100 = 8_823 - - // Lets emit MORE inflation through this new key with distributed ownership. - // This time we do ONLY server emission - // We will emit 123 server emission, which should go in-full to the owner of the hotkey. - // We will emit *0* validator emission. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey2, 123, 0); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey2), - 1_617 - ); // 1_500 + 117 = 1_617 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey2), - 1_303 - ); // No change. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey2), - 1_303 - ); // No change. - assert_eq!(SubtensorModule::get_total_stake(), 8_946); // 8_823 + 123 = 8_946 - }); -} - -#[test] -fn test_full_block_emission_occurs() { - new_test_ext(1).execute_with(|| { - let netuid = 1; - // Make two accounts. - let hotkey0 = U256::from(1); - let hotkey1 = U256::from(2); - - let coldkey0 = U256::from(3); - let coldkey1 = U256::from(4); - SubtensorModule::set_max_registrations_per_block(netuid, 4); - SubtensorModule::set_max_allowed_uids(netuid, 10); // Allow at least 10 to be registered at once, so no unstaking occurs - SubtensorModule::set_target_stakes_per_interval(10); // Increase max stakes per interval - - // Neither key can add stake because they dont have fundss. - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 60000 - ), - Err(Error::::NotEnoughBalanceToStake.into()) - ); - assert_eq!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - 60000 - ), - Err(Error::::NotEnoughBalanceToStake.into()) - ); - - // Add balances. - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 60000); - SubtensorModule::add_balance_to_coldkey_account(&coldkey1, 60000); - - // Register the 2 neurons to a new network. - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - register_ok_neuron(netuid, hotkey1, coldkey1, 987907); - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey0), - coldkey0 - ); - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey1), - coldkey1 - ); - assert!(SubtensorModule::coldkey_owns_hotkey(&coldkey0, &hotkey0)); - assert!(SubtensorModule::coldkey_owns_hotkey(&coldkey1, &hotkey1)); - - // We stake and all is ok. - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 0 - ); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - 100 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey0), - 100 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0, &hotkey1), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey0), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1, &hotkey1), - 100 - ); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey0), 100); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 100); - assert_eq!(SubtensorModule::get_total_stake(), 200); - - // Emit inflation through non delegates. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 0, 111); - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 0, 234); - // Verify the full emission occurs. - assert_eq!(SubtensorModule::get_total_stake(), 200 + 111 + 234); // 200 + 111 + 234 = 545 - - // Become delegates all is ok. - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - )); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - SubtensorModule::get_min_take() - )); - assert!(SubtensorModule::hotkey_is_delegate(&hotkey0)); - assert!(SubtensorModule::hotkey_is_delegate(&hotkey1)); - - // Add some delegate stake - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey0), - hotkey1, - 200 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey0, - 300 - )); - - assert_eq!(SubtensorModule::get_total_stake(), 545 + 500); // 545 + 500 = 1045 - - // Lets emit inflation with delegatees, with both validator and server emission - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 200, 1_000); // 1_200 total emission. - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 123, 2_000); // 2_123 total emission. - - assert_eq!(SubtensorModule::get_total_stake(), 1045 + 1_200 + 2_123); // before + 1_200 + 2_123 = 4_368 - - // Lets emit MORE inflation through the hot and coldkeys. - // This time JUSt server emission - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 350, 0); - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 150, 0); - - assert_eq!(SubtensorModule::get_total_stake(), 4_368 + 350 + 150); // before + 350 + 150 = 4_868 - - // Lastly, do only validator emission - - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 0, 12_948); - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 0, 1_874); - - assert_eq!(SubtensorModule::get_total_stake(), 4_868 + 12_948 + 1_874); // before + 12_948 + 1_874 = 19_690 - }); -} - -/************************************************************ - staking::unstake_all_coldkeys_from_hotkey_account() tests -************************************************************/ - -#[test] -fn test_unstake_all_coldkeys_from_hotkey_account() { - new_test_ext(1).execute_with(|| { - let hotkey_id = U256::from(123570); - let coldkey0_id = U256::from(123560); - - let coldkey1_id = U256::from(123561); - let coldkey2_id = U256::from(123562); - let coldkey3_id = U256::from(123563); - - let amount: u64 = 10000; - - let netuid: u16 = 1; - let tempo: u16 = 13; - let start_nonce: u64 = 0; - - // Make subnet - add_network(netuid, tempo, 0); - // Register delegate - register_ok_neuron(netuid, hotkey_id, coldkey0_id, start_nonce); - - match SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_id) { - Ok(_k) => (), - Err(e) => panic!("Error: {:?}", e), - } - - //Add some stake that can be removed - SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey0_id, &hotkey_id, amount); - SubtensorModule::increase_stake_on_coldkey_hotkey_account( - &coldkey1_id, - &hotkey_id, - amount + 2, - ); - SubtensorModule::increase_stake_on_coldkey_hotkey_account( - &coldkey2_id, - &hotkey_id, - amount + 3, - ); - SubtensorModule::increase_stake_on_coldkey_hotkey_account( - &coldkey3_id, - &hotkey_id, - amount + 4, - ); - - // Verify free balance is 0 for all coldkeys - assert_eq!(Balances::free_balance(coldkey0_id), 0); - assert_eq!(Balances::free_balance(coldkey1_id), 0); - assert_eq!(Balances::free_balance(coldkey2_id), 0); - assert_eq!(Balances::free_balance(coldkey3_id), 0); - - // Verify total stake is correct - assert_eq!( - SubtensorModule::get_total_stake_for_hotkey(&hotkey_id), - amount * 4 + (2 + 3 + 4) - ); - - // Run unstake_all_coldkeys_from_hotkey_account - SubtensorModule::unstake_all_coldkeys_from_hotkey_account(&hotkey_id); - - // Verify total stake is 0 - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey_id), 0); - - // Vefify stake for all coldkeys is 0 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0_id, &hotkey_id), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1_id, &hotkey_id), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2_id, &hotkey_id), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey3_id, &hotkey_id), - 0 - ); - - // Verify free balance is correct for all coldkeys - assert_eq!(Balances::free_balance(coldkey0_id), amount); - assert_eq!(Balances::free_balance(coldkey1_id), amount + 2); - assert_eq!(Balances::free_balance(coldkey2_id), amount + 3); - assert_eq!(Balances::free_balance(coldkey3_id), amount + 4); - }); -} - -#[test] -fn test_unstake_all_coldkeys_from_hotkey_account_single_staker() { - new_test_ext(1).execute_with(|| { - let hotkey_id = U256::from(123570); - let coldkey0_id = U256::from(123560); - - let amount: u64 = 891011; - - let netuid: u16 = 1; - let tempo: u16 = 13; - let start_nonce: u64 = 0; - - // Make subnet - add_network(netuid, tempo, 0); - // Register delegate - register_ok_neuron(netuid, hotkey_id, coldkey0_id, start_nonce); - - match SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_id) { - Ok(_) => (), - Err(e) => panic!("Error: {:?}", e), - } - - //Add some stake that can be removed - SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey0_id, &hotkey_id, amount); - - // Verify free balance is 0 for coldkey - assert_eq!(Balances::free_balance(coldkey0_id), 0); - - // Verify total stake is correct - assert_eq!( - SubtensorModule::get_total_stake_for_hotkey(&hotkey_id), - amount - ); - - // Run unstake_all_coldkeys_from_hotkey_account - SubtensorModule::unstake_all_coldkeys_from_hotkey_account(&hotkey_id); - - // Verify total stake is 0 - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey_id), 0); - - // Vefify stake for single coldkey is 0 - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0_id, &hotkey_id), - 0 - ); - - // Verify free balance is correct for single coldkey - assert_eq!(Balances::free_balance(coldkey0_id), amount); - }); -} - -#[test] -fn test_faucet_ok() { - new_test_ext(1).execute_with(|| { - let coldkey = U256::from(123560); - - log::info!("Creating work for submission to faucet..."); - - let block_number = SubtensorModule::get_current_block_as_u64(); - let difficulty: U256 = U256::from(10_000_000); - let mut nonce: u64 = 0; - let mut work: H256 = SubtensorModule::create_seal_hash(block_number, nonce, &coldkey); - while !SubtensorModule::hash_meets_difficulty(&work, difficulty) { - nonce += 1; - work = SubtensorModule::create_seal_hash(block_number, nonce, &coldkey); - } - let vec_work: Vec = SubtensorModule::hash_to_vec(work); - - log::info!("Faucet state: {}", cfg!(feature = "pow-faucet")); - - #[cfg(feature = "pow-faucet")] - assert_ok!(SubtensorModule::do_faucet( - <::RuntimeOrigin>::signed(coldkey), - block_number, - nonce, - vec_work - )); - - #[cfg(not(feature = "pow-faucet"))] - assert_ok!(SubtensorModule::do_faucet( - <::RuntimeOrigin>::signed(coldkey), - block_number, - nonce, - vec_work - )); - }); -} - -/// This test ensures that the clear_small_nominations function works as expected. -/// It creates a network with two hotkeys and two coldkeys, and then registers a nominator account for each hotkey. -/// When we call set_nominator_min_required_stake, it should clear all small nominations that are below the minimum required stake. -/// Run this test using: cargo test --package pallet-subtensor --test staking test_clear_small_nominations -#[test] -fn test_clear_small_nominations() { - new_test_ext(0).execute_with(|| { - System::set_block_number(1); - - // Create accounts. - let netuid = 1; - let hot1 = U256::from(1); - let hot2 = U256::from(2); - let cold1 = U256::from(3); - let cold2 = U256::from(4); - - SubtensorModule::set_target_stakes_per_interval(10); - // Register hot1 and hot2 . - add_network(netuid, 0, 0); - - // Register hot1. - register_ok_neuron(netuid, hot1, cold1, 0); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(cold1), - hot1, - SubtensorModule::get_min_take() - )); - assert_eq!(SubtensorModule::get_owning_coldkey_for_hotkey(&hot1), cold1); - - // Register hot2. - register_ok_neuron(netuid, hot2, cold2, 0); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(cold2), - hot2, - SubtensorModule::get_min_take() - )); - assert_eq!(SubtensorModule::get_owning_coldkey_for_hotkey(&hot2), cold2); - - // Add stake cold1 --> hot1 (non delegation.) - SubtensorModule::add_balance_to_coldkey_account(&cold1, 5); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(cold1), - hot1, - 1 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot1), - 1 - ); - assert_eq!(Balances::free_balance(cold1), 4); - - // Add stake cold2 --> hot1 (is delegation.) - SubtensorModule::add_balance_to_coldkey_account(&cold2, 5); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(cold2), - hot1, - 1 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot1), - 1 - ); - assert_eq!(Balances::free_balance(cold2), 4); - - // Add stake cold1 --> hot2 (non delegation.) - SubtensorModule::add_balance_to_coldkey_account(&cold1, 5); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(cold1), - hot2, - 1 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot2), - 1 - ); - assert_eq!(Balances::free_balance(cold1), 8); - - // Add stake cold2 --> hot2 (is delegation.) - SubtensorModule::add_balance_to_coldkey_account(&cold2, 5); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(cold2), - hot2, - 1 - )); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot2), - 1 - ); - assert_eq!(Balances::free_balance(cold2), 8); - - // Run clear all small nominations when min stake is zero (noop) - SubtensorModule::set_nominator_min_required_stake(0); - SubtensorModule::clear_small_nominations(); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot1), - 1 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot2), - 1 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot1), - 1 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot2), - 1 - ); - - // Set min nomination to 10 - let total_cold1_stake_before = TotalColdkeyStake::::get(cold1); - let total_cold2_stake_before = TotalColdkeyStake::::get(cold2); - let total_hot1_stake_before = TotalHotkeyStake::::get(hot1); - let total_hot2_stake_before = TotalHotkeyStake::::get(hot2); - let _ = Stake::::try_get(hot2, cold1).unwrap(); // ensure exists before - let _ = Stake::::try_get(hot1, cold2).unwrap(); // ensure exists before - let total_stake_before = TotalStake::::get(); - SubtensorModule::set_nominator_min_required_stake(10); - - // Run clear all small nominations (removes delegations under 10) - SubtensorModule::clear_small_nominations(); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot1), - 1 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot2), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot1), - 0 - ); - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot2), - 1 - ); - - // Balances have been added back into accounts. - assert_eq!(Balances::free_balance(cold1), 9); - assert_eq!(Balances::free_balance(cold2), 9); - - // Internal storage is updated - assert_eq!( - TotalColdkeyStake::::get(cold2), - total_cold2_stake_before - 1 - ); - assert_eq!( - TotalHotkeyStake::::get(hot2), - total_hot2_stake_before - 1 - ); - Stake::::try_get(hot2, cold1).unwrap_err(); - Stake::::try_get(hot1, cold2).unwrap_err(); - assert_eq!( - TotalColdkeyStake::::get(cold1), - total_cold1_stake_before - 1 - ); - assert_eq!( - TotalHotkeyStake::::get(hot1), - total_hot1_stake_before - 1 - ); - Stake::::try_get(hot2, cold1).unwrap_err(); - assert_eq!(TotalStake::::get(), total_stake_before - 2); - }); -} - -/// Test that the nominator minimum staking threshold is enforced when stake is added. -#[test] -fn test_add_stake_below_minimum_threshold() { - new_test_ext(0).execute_with(|| { - let netuid: u16 = 1; - let coldkey1 = U256::from(0); - let hotkey1 = U256::from(1); - let coldkey2 = U256::from(2); - let minimum_threshold = 10_000_000; - let amount_below = 50_000; - - // Add balances. - SubtensorModule::add_balance_to_coldkey_account(&coldkey1, 100_000); - SubtensorModule::add_balance_to_coldkey_account(&coldkey2, 100_000); - SubtensorModule::set_nominator_min_required_stake(minimum_threshold); - SubtensorModule::set_target_stakes_per_interval(10); - - // Create network - add_network(netuid, 0, 0); - - // Register the neuron to a new network. - register_ok_neuron(netuid, hotkey1, coldkey1, 0); - assert_ok!(SubtensorModule::become_delegate( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1 - )); - - // Coldkey staking on its own hotkey can stake below min threshold. - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - amount_below - )); - - // Nomination stake cannot stake below min threshold. - assert_noop!( - SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey1, - amount_below - ), - pallet_subtensor::Error::::NomStakeBelowMinimumThreshold - ); - }); -} - -/// Test that the nominator minimum staking threshold is enforced when stake is removed. -#[test] -fn test_remove_stake_below_minimum_threshold() { - new_test_ext(0).execute_with(|| { - let netuid: u16 = 1; - let coldkey1 = U256::from(0); - let hotkey1 = U256::from(1); - let coldkey2 = U256::from(2); - let initial_balance = 200_000_000; - let initial_stake = 100_000; - let minimum_threshold = 50_000; - let stake_amount_to_remove = 51_000; - - // Add balances. - SubtensorModule::add_balance_to_coldkey_account(&coldkey1, initial_balance); - SubtensorModule::add_balance_to_coldkey_account(&coldkey2, initial_balance); - SubtensorModule::set_nominator_min_required_stake(minimum_threshold); - SubtensorModule::set_target_stakes_per_interval(10); - - // Create network - add_network(netuid, 0, 0); - - // Register the neuron to a new network. - register_ok_neuron(netuid, hotkey1, coldkey1, 0); - assert_ok!(SubtensorModule::become_delegate( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - initial_stake - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey1, - initial_stake - )); - - // Coldkey staking on its own hotkey can unstake below min threshold. - assert_ok!(SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey1), - hotkey1, - stake_amount_to_remove - )); - - // Nomination stake cannot unstake below min threshold, - // without unstaking all and removing the nomination. - let total_hotkey_stake_before = SubtensorModule::get_total_stake_for_hotkey(&hotkey1); - let bal_before = Balances::free_balance(coldkey2); - let staked_before = SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey1); - let total_network_stake_before = SubtensorModule::get_total_stake(); - let total_issuance_before = SubtensorModule::get_total_issuance(); - // check the premise of the test is correct - assert!(initial_stake - stake_amount_to_remove < minimum_threshold); - assert_ok!(SubtensorModule::remove_stake( - <::RuntimeOrigin>::signed(coldkey2), - hotkey1, - stake_amount_to_remove - )); - - // Has no stake now - assert_eq!( - SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey1), - 0 - ); - let stake_removed = staked_before; // All stake was removed - // Has the full balance - assert_eq!(Balances::free_balance(coldkey2), bal_before + stake_removed); - - // Stake map entry is removed - assert!(Stake::::try_get(hotkey1, coldkey2).is_err(),); - // Stake tracking is updated - assert_eq!( - TotalColdkeyStake::::try_get(coldkey2).unwrap(), - 0 // Did not have any stake before; Entry is NOT removed - ); - assert_eq!( - TotalHotkeyStake::::try_get(hotkey1).unwrap(), - total_hotkey_stake_before - stake_removed // Stake was removed from hotkey1 tracker - ); - assert_eq!( - TotalStake::::try_get().unwrap(), - total_network_stake_before - stake_removed - ); - - // Total issuance is the same - assert_eq!( - SubtensorModule::get_total_issuance(), - total_issuance_before // Nothing was issued - ); - }); -} - -// Verify delegate take can be decreased -#[test] -fn test_delegate_take_can_be_decreased() { - new_test_ext(1).execute_with(|| { - // Make account - let hotkey0 = U256::from(1); - let coldkey0 = U256::from(3); - - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - - // Register the neuron to a new network - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - - // Coldkey / hotkey 0 become delegates with 9% take - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - )); - assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() - ); - - // Coldkey / hotkey 0 decreases take to 5%. This should fail as the minimum take is 9% - assert_err!( - SubtensorModule::do_decrease_take( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - u16::MAX / 20 - ), - Error::::DelegateTakeTooLow - ); - }); -} +/************************************************************ + staking::unstake_all_coldkeys_from_hotkey_account() tests +************************************************************/ -// Verify delegate take can be decreased #[test] -fn test_can_set_min_take_ok() { +fn test_unstake_all_coldkeys_from_hotkey_account() { new_test_ext(1).execute_with(|| { - // Make account - let hotkey0 = U256::from(1); - let coldkey0 = U256::from(3); - - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - - // Register the neuron to a new network - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - - // Coldkey / hotkey 0 become delegates - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - u16::MAX / 10 - )); + let hotkey_id = U256::from(123570); + let coldkey0_id = U256::from(123560); - // Coldkey / hotkey 0 decreases take to min - assert_ok!(SubtensorModule::do_decrease_take( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - )); - assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() - ); - }); -} + let coldkey1_id = U256::from(123561); + let coldkey2_id = U256::from(123562); + let coldkey3_id = U256::from(123563); -// Verify delegate take can not be increased with do_decrease_take -#[test] -fn test_delegate_take_can_not_be_increased_with_decrease_take() { - new_test_ext(1).execute_with(|| { - // Make account - let hotkey0 = U256::from(1); - let coldkey0 = U256::from(3); + let amount: u64 = 10000; - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); + let netuid: u16 = 1; + let tempo: u16 = 13; + let start_nonce: u64 = 0; - // Register the neuron to a new network - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); + // Make subnet + add_network(netuid, tempo, 0); + // Register delegate + register_ok_neuron(netuid, hotkey_id, coldkey0_id, start_nonce); - // Coldkey / hotkey 0 become delegates with 10% take - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - )); - assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() - ); + match SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_id) { + Ok(_k) => (), + Err(e) => panic!("Error: {:?}", e), + } - // Coldkey / hotkey 0 tries to increase take to 12.5% - assert_eq!( - SubtensorModule::do_decrease_take( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - u16::MAX / 8 - ), - Err(Error::::DelegateTakeTooLow.into()) + //Add some stake that can be removed + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey0_id, &hotkey_id, amount); + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &coldkey1_id, + &hotkey_id, + amount + 2, ); - assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &coldkey2_id, + &hotkey_id, + amount + 3, ); - }); -} - -// Verify delegate take can be increased -#[test] -fn test_delegate_take_can_be_increased() { - new_test_ext(1).execute_with(|| { - // Make account - let hotkey0 = U256::from(1); - let coldkey0 = U256::from(3); - - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - - // Register the neuron to a new network - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - - // Coldkey / hotkey 0 become delegates with 9% take - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - )); - assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() + SubtensorModule::increase_stake_on_coldkey_hotkey_account( + &coldkey3_id, + &hotkey_id, + amount + 4, ); - step_block(1 + InitialTxDelegateTakeRateLimit::get() as u16); - - // Coldkey / hotkey 0 decreases take to 12.5% - assert_ok!(SubtensorModule::do_increase_take( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - u16::MAX / 8 - )); - assert_eq!(SubtensorModule::get_hotkey_take(&hotkey0), u16::MAX / 8); - }); -} + // Verify free balance is 0 for all coldkeys + assert_eq!(Balances::free_balance(coldkey0_id), 0); + assert_eq!(Balances::free_balance(coldkey1_id), 0); + assert_eq!(Balances::free_balance(coldkey2_id), 0); + assert_eq!(Balances::free_balance(coldkey3_id), 0); -// Verify delegate take can not be decreased with increase_take -#[test] -fn test_delegate_take_can_not_be_decreased_with_increase_take() { - new_test_ext(1).execute_with(|| { - // Make account - let hotkey0 = U256::from(1); - let coldkey0 = U256::from(3); + // Verify total stake is correct + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey_id), + amount * 4 + (2 + 3 + 4) + ); - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); + // Run unstake_all_coldkeys_from_hotkey_account + SubtensorModule::unstake_all_coldkeys_from_hotkey_account(&hotkey_id); - // Register the neuron to a new network - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); + // Verify total stake is 0 + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey_id), 0); - // Coldkey / hotkey 0 become delegates with 9% take - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - )); + // Vefify stake for all coldkeys is 0 assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0_id, &hotkey_id), + 0 ); - - // Coldkey / hotkey 0 tries to decrease take to 5% assert_eq!( - SubtensorModule::do_increase_take( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - u16::MAX / 20 - ), - Err(Error::::DelegateTakeTooLow.into()) + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey1_id, &hotkey_id), + 0 ); assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2_id, &hotkey_id), + 0 ); + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey3_id, &hotkey_id), + 0 + ); + + // Verify free balance is correct for all coldkeys + assert_eq!(Balances::free_balance(coldkey0_id), amount); + assert_eq!(Balances::free_balance(coldkey1_id), amount + 2); + assert_eq!(Balances::free_balance(coldkey2_id), amount + 3); + assert_eq!(Balances::free_balance(coldkey3_id), amount + 4); }); } -// Verify delegate take can be increased up to InitialDefaultTake (18%) #[test] -fn test_delegate_take_can_be_increased_to_limit() { +fn test_unstake_all_coldkeys_from_hotkey_account_single_staker() { new_test_ext(1).execute_with(|| { - // Make account - let hotkey0 = U256::from(1); - let coldkey0 = U256::from(3); + let hotkey_id = U256::from(123570); + let coldkey0_id = U256::from(123560); - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); + let amount: u64 = 891011; - // Register the neuron to a new network - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); + let netuid: u16 = 1; + let tempo: u16 = 13; + let start_nonce: u64 = 0; - // Coldkey / hotkey 0 become delegates with 9% take - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() - )); - assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() - ); + // Make subnet + add_network(netuid, tempo, 0); + // Register delegate + register_ok_neuron(netuid, hotkey_id, coldkey0_id, start_nonce); - step_block(1 + InitialTxDelegateTakeRateLimit::get() as u16); + match SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_id) { + Ok(_) => (), + Err(e) => panic!("Error: {:?}", e), + } - // Coldkey / hotkey 0 tries to increase take to InitialDefaultTake+1 - assert_ok!(SubtensorModule::do_increase_take( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - InitialDefaultTake::get() - )); + //Add some stake that can be removed + SubtensorModule::increase_stake_on_coldkey_hotkey_account(&coldkey0_id, &hotkey_id, amount); + + // Verify free balance is 0 for coldkey + assert_eq!(Balances::free_balance(coldkey0_id), 0); + + // Verify total stake is correct assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - InitialDefaultTake::get() + SubtensorModule::get_total_stake_for_hotkey(&hotkey_id), + amount ); - }); -} -// Verify delegate take can not be set above InitialDefaultTake -#[test] -fn test_delegate_take_can_not_be_set_beyond_limit() { - new_test_ext(1).execute_with(|| { - // Make account - let hotkey0 = U256::from(1); - let coldkey0 = U256::from(3); + // Run unstake_all_coldkeys_from_hotkey_account + SubtensorModule::unstake_all_coldkeys_from_hotkey_account(&hotkey_id); - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); + // Verify total stake is 0 + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey_id), 0); - // Register the neuron to a new network - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - let before = SubtensorModule::get_hotkey_take(&hotkey0); + // Vefify stake for single coldkey is 0 + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey0_id, &hotkey_id), + 0 + ); - // Coldkey / hotkey 0 attempt to become delegates with take above maximum - // (Disable this check if InitialDefaultTake is u16::MAX) - if InitialDefaultTake::get() != u16::MAX { - assert_eq!( - SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - InitialDefaultTake::get() + 1 - ), - Err(Error::::DelegateTakeTooHigh.into()) - ); - } - assert_eq!(SubtensorModule::get_hotkey_take(&hotkey0), before); + // Verify free balance is correct for single coldkey + assert_eq!(Balances::free_balance(coldkey0_id), amount); }); } -// Verify delegate take can not be increased above InitialDefaultTake (18%) #[test] -fn test_delegate_take_can_not_be_increased_beyond_limit() { +fn test_faucet_ok() { new_test_ext(1).execute_with(|| { - // Make account - let hotkey0 = U256::from(1); - let coldkey0 = U256::from(3); + let coldkey = U256::from(123560); - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); + log::info!("Creating work for submission to faucet..."); - // Register the neuron to a new network - let netuid = 1; - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); + let block_number = SubtensorModule::get_current_block_as_u64(); + let difficulty: U256 = U256::from(10_000_000); + let mut nonce: u64 = 0; + let mut work: H256 = SubtensorModule::create_seal_hash(block_number, nonce, &coldkey); + while !SubtensorModule::hash_meets_difficulty(&work, difficulty) { + nonce += 1; + work = SubtensorModule::create_seal_hash(block_number, nonce, &coldkey); + } + let vec_work: Vec = SubtensorModule::hash_to_vec(work); - // Coldkey / hotkey 0 become delegates with 9% take - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() + log::info!("Faucet state: {}", cfg!(feature = "pow-faucet")); + + #[cfg(feature = "pow-faucet")] + assert_ok!(SubtensorModule::do_faucet( + <::RuntimeOrigin>::signed(coldkey), + block_number, + nonce, + vec_work )); - assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() - ); - // Coldkey / hotkey 0 tries to increase take to InitialDefaultTake+1 - // (Disable this check if InitialDefaultTake is u16::MAX) - if InitialDefaultTake::get() != u16::MAX { - assert_eq!( - SubtensorModule::do_increase_take( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - InitialDefaultTake::get() + 1 - ), - Err(Error::::DelegateTakeTooHigh.into()) - ); - } - assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() - ); + #[cfg(not(feature = "pow-faucet"))] + assert_ok!(SubtensorModule::do_faucet( + <::RuntimeOrigin>::signed(coldkey), + block_number, + nonce, + vec_work + )); }); } -// Test rate-limiting on increase_take +/// This test ensures that the clear_small_nominations function works as expected. +/// It creates a network with two hotkeys and two coldkeys, and then registers a nominator account for each hotkey. +/// When we call set_nominator_min_required_stake, it should clear all small nominations that are below the minimum required stake. +/// Run this test using: cargo test --package pallet-subtensor --test staking test_clear_small_nominations #[test] -fn test_rate_limits_enforced_on_increase_take() { - new_test_ext(1).execute_with(|| { - // Make account - let hotkey0 = U256::from(1); - let coldkey0 = U256::from(3); - - // Add balance - SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); +fn test_clear_small_nominations() { + new_test_ext(0).execute_with(|| { + System::set_block_number(1); - // Register the neuron to a new network + // Create accounts. let netuid = 1; + let hot1 = U256::from(1); + let hot2 = U256::from(2); + let cold1 = U256::from(3); + let cold2 = U256::from(4); + + SubtensorModule::set_target_stakes_per_interval(10); + // Register hot1 and hot2 . add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - // Coldkey / hotkey 0 become delegates with 9% take + // Register hot1. + register_ok_neuron(netuid, hot1, cold1, 0); assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - SubtensorModule::get_min_take() + <::RuntimeOrigin>::signed(cold1), + hot1, + SubtensorModule::get_min_delegate_take() + )); + assert_eq!(SubtensorModule::get_owning_coldkey_for_hotkey(&hot1), cold1); + + // Register hot2. + register_ok_neuron(netuid, hot2, cold2, 0); + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(cold2), + hot2, + SubtensorModule::get_min_delegate_take() + )); + assert_eq!(SubtensorModule::get_owning_coldkey_for_hotkey(&hot2), cold2); + + // Add stake cold1 --> hot1 (non delegation.) + SubtensorModule::add_balance_to_coldkey_account(&cold1, 5); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(cold1), + hot1, + 1 )); assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot1), + 1 ); + assert_eq!(Balances::free_balance(cold1), 4); - // Coldkey / hotkey 0 increases take to 12.5% + // Add stake cold2 --> hot1 (is delegation.) + SubtensorModule::add_balance_to_coldkey_account(&cold2, 5); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(cold2), + hot1, + 1 + )); assert_eq!( - SubtensorModule::do_increase_take( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - u16::MAX / 8 - ), - Err(Error::::DelegateTxRateLimitExceeded.into()) + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot1), + 1 ); + assert_eq!(Balances::free_balance(cold2), 4); + + // Add stake cold1 --> hot2 (non delegation.) + SubtensorModule::add_balance_to_coldkey_account(&cold1, 5); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(cold1), + hot2, + 1 + )); assert_eq!( - SubtensorModule::get_hotkey_take(&hotkey0), - SubtensorModule::get_min_take() + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot2), + 1 ); + assert_eq!(Balances::free_balance(cold1), 8); - step_block(1 + InitialTxDelegateTakeRateLimit::get() as u16); - - // Can increase after waiting - assert_ok!(SubtensorModule::do_increase_take( - <::RuntimeOrigin>::signed(coldkey0), - hotkey0, - u16::MAX / 8 + // Add stake cold2 --> hot2 (is delegation.) + SubtensorModule::add_balance_to_coldkey_account(&cold2, 5); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(cold2), + hot2, + 1 )); - assert_eq!(SubtensorModule::get_hotkey_take(&hotkey0), u16::MAX / 8); - }); -} - -// Helper function to set up a test environment -fn setup_test_environment() -> (AccountId, AccountId, AccountId) { - let current_coldkey = U256::from(1); - let hotkey = U256::from(2); - let new_coldkey = U256::from(3); - // Register the neuron to a new network - let netuid = 1; - add_network(netuid, 0, 0); - - // Register the hotkey and associate it with the current coldkey - register_ok_neuron(1, hotkey, current_coldkey, 0); - - // Add some balance to the hotkey - SubtensorModule::add_balance_to_coldkey_account(¤t_coldkey, 1000); - - // Stake some amount - assert_ok!(SubtensorModule::add_stake( - RuntimeOrigin::signed(current_coldkey), - hotkey, - 500 - )); - - (current_coldkey, hotkey, new_coldkey) -} - -/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test staking -- test_arbitrated_coldkey_swap_success --exact --nocapture -#[test] -fn test_arbitrated_coldkey_swap_success() { - new_test_ext(1).execute_with(|| { - let (current_coldkey, hotkey, new_coldkey) = setup_test_environment(); + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot2), + 1 + ); + assert_eq!(Balances::free_balance(cold2), 8); - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work, nonce) = generate_valid_pow( - ¤t_coldkey, - current_block, - U256::from(BaseDifficulty::::get()), + // Run clear all small nominations when min stake is zero (noop) + SubtensorModule::set_nominator_min_required_stake(0); + SubtensorModule::clear_small_nominations(); + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot1), + 1 ); - SubtensorModule::add_balance_to_coldkey_account( - ¤t_coldkey, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot2), + 1 + ); + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot1), + 1 ); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - &new_coldkey, - work.to_fixed_bytes().to_vec(), - current_block, - nonce - )); - - // Check that ColdkeySwapDestinations is populated correctly assert_eq!( - pallet_subtensor::ColdkeySwapDestinations::::get(current_coldkey), - vec![new_coldkey] + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot2), + 1 ); - // Check that drain block is set correctly - let drain_block: u64 = 7200 * 3 + 1; + // Set min nomination to 10 + let total_cold1_stake_before = TotalColdkeyStake::::get(cold1); + let total_cold2_stake_before = TotalColdkeyStake::::get(cold2); + let total_hot1_stake_before = TotalHotkeyStake::::get(hot1); + let total_hot2_stake_before = TotalHotkeyStake::::get(hot2); + let _ = Stake::::try_get(hot2, cold1).unwrap(); // ensure exists before + let _ = Stake::::try_get(hot1, cold2).unwrap(); // ensure exists before + let total_stake_before = TotalStake::::get(); + SubtensorModule::set_nominator_min_required_stake(10); - log::info!( - "ColdkeysToSwapAtBlock before scheduling: {:?}", - pallet_subtensor::ColdkeysToSwapAtBlock::::get(drain_block) + // Run clear all small nominations (removes delegations under 10) + SubtensorModule::clear_small_nominations(); + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot1), + 1 ); - assert_eq!( - pallet_subtensor::ColdkeysToSwapAtBlock::::get(drain_block), - vec![current_coldkey] + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold1, &hot2), + 0 ); - log::info!("Drain block set correctly: {:?}", drain_block); - log::info!( - "Drain block {:?}", - pallet_subtensor::ColdkeysToSwapAtBlock::::get(drain_block) + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot1), + 0 ); - - // Make 5400 blocks pass - run_to_block(drain_block); - - // Run unstaking - SubtensorModule::swap_coldkeys_this_block(&BlockWeights::get().max_block).unwrap(); - log::info!( - "Arbitrated coldkeys for block: {:?}", - SubtensorModule::get_current_block_as_u64() + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&cold2, &hot2), + 1 ); - // Check the hotkey stake. - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey), 500); + // Balances have been added back into accounts. + assert_eq!(Balances::free_balance(cold1), 9); + assert_eq!(Balances::free_balance(cold2), 9); - // Get the owner of the hotkey now new key. + // Internal storage is updated assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey), - new_coldkey + TotalColdkeyStake::::get(cold2), + total_cold2_stake_before - 1 + ); + assert_eq!( + TotalHotkeyStake::::get(hot2), + total_hot2_stake_before - 1 + ); + Stake::::try_get(hot2, cold1).unwrap_err(); + Stake::::try_get(hot1, cold2).unwrap_err(); + assert_eq!( + TotalColdkeyStake::::get(cold1), + total_cold1_stake_before - 1 ); - - // Check that the balance has been transferred to the new coldkey assert_eq!( - SubtensorModule::get_coldkey_balance(&new_coldkey), - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP + 500 - ); // The new key as the 500 + TotalHotkeyStake::::get(hot1), + total_hot1_stake_before - 1 + ); + Stake::::try_get(hot2, cold1).unwrap_err(); + assert_eq!(TotalStake::::get(), total_stake_before - 2); }); } -/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test staking -- test_arbitrated_coldkey_swap_same_coldkey --exact --nocapture +/// Test that the nominator minimum staking threshold is enforced when stake is added. #[test] -fn test_arbitrated_coldkey_swap_same_coldkey() { - new_test_ext(1).execute_with(|| { - let (current_coldkey, _hotkey, _) = setup_test_environment(); +fn test_add_stake_below_minimum_threshold() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let coldkey1 = U256::from(0); + let hotkey1 = U256::from(1); + let coldkey2 = U256::from(2); + let minimum_threshold = 10_000_000; + let amount_below = 50_000; - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work, nonce) = generate_valid_pow( - ¤t_coldkey, - current_block, - U256::from(BaseDifficulty::::get()), - ); + // Add balances. + SubtensorModule::add_balance_to_coldkey_account(&coldkey1, 100_000); + SubtensorModule::add_balance_to_coldkey_account(&coldkey2, 100_000); + SubtensorModule::set_nominator_min_required_stake(minimum_threshold); + SubtensorModule::set_target_stakes_per_interval(10); + + // Create network + add_network(netuid, 0, 0); + + // Register the neuron to a new network. + register_ok_neuron(netuid, hotkey1, coldkey1, 0); + assert_ok!(SubtensorModule::become_delegate( + <::RuntimeOrigin>::signed(coldkey1), + hotkey1 + )); + + // Coldkey staking on its own hotkey can stake below min threshold. + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey1), + hotkey1, + amount_below + )); + // Nomination stake cannot stake below min threshold. assert_noop!( - SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - ¤t_coldkey, - work.to_fixed_bytes().to_vec(), - current_block, - nonce + SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey2), + hotkey1, + amount_below ), - Error::::SameColdkey + pallet_subtensor::Error::::NomStakeBelowMinimumThreshold ); }); } -/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test staking -- test_arbitrated_coldkey_swap_no_balance --exact --nocapture +/// Test that the nominator minimum staking threshold is enforced when stake is removed. #[test] -fn test_arbitrated_coldkey_swap_no_balance() { - new_test_ext(1).execute_with(|| { - // Create accounts manually - let current_coldkey: AccountId = U256::from(1); - let hotkey: AccountId = U256::from(2); - let new_coldkey: AccountId = U256::from(3); - - add_network(1, 0, 0); +fn test_remove_stake_below_minimum_threshold() { + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let coldkey1 = U256::from(0); + let hotkey1 = U256::from(1); + let coldkey2 = U256::from(2); + let initial_balance = 200_000_000; + let initial_stake = 100_000; + let minimum_threshold = 50_000; + let stake_amount_to_remove = 51_000; - // Register the hotkey and associate it with the current coldkey - register_ok_neuron(1, hotkey, current_coldkey, 0); + // Add balances. + SubtensorModule::add_balance_to_coldkey_account(&coldkey1, initial_balance); + SubtensorModule::add_balance_to_coldkey_account(&coldkey2, initial_balance); + SubtensorModule::set_nominator_min_required_stake(minimum_threshold); + SubtensorModule::set_target_stakes_per_interval(10); - // Print initial balances - log::info!( - "Initial current_coldkey balance: {:?}", - Balances::total_balance(¤t_coldkey) - ); - log::info!( - "Initial hotkey balance: {:?}", - Balances::total_balance(&hotkey) - ); - log::info!( - "Initial new_coldkey balance: {:?}", - Balances::total_balance(&new_coldkey) - ); + // Create network + add_network(netuid, 0, 0); - // Ensure there's no balance in any of the accounts - assert_eq!(Balances::total_balance(¤t_coldkey), 0); - assert_eq!(Balances::total_balance(&hotkey), 0); - assert_eq!(Balances::total_balance(&new_coldkey), 0); - - // Generate valid PoW - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work, nonce) = generate_valid_pow( - ¤t_coldkey, - current_block, - U256::from(BaseDifficulty::::get()), - ); + // Register the neuron to a new network. + register_ok_neuron(netuid, hotkey1, coldkey1, 0); + assert_ok!(SubtensorModule::become_delegate( + <::RuntimeOrigin>::signed(coldkey1), + hotkey1 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey1), + hotkey1, + initial_stake + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey2), + hotkey1, + initial_stake + )); - // Try to schedule coldkey swap - let result = SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - &new_coldkey, - work.to_fixed_bytes().to_vec(), - current_block, - nonce, - ); + // Coldkey staking on its own hotkey can unstake below min threshold. + assert_ok!(SubtensorModule::remove_stake( + <::RuntimeOrigin>::signed(coldkey1), + hotkey1, + stake_amount_to_remove + )); - // Print the result - log::info!("Result of arbitrated_coldkey_swap: {:?}", result); + // Nomination stake cannot unstake below min threshold, + // without unstaking all and removing the nomination. + let total_hotkey_stake_before = SubtensorModule::get_total_stake_for_hotkey(&hotkey1); + let bal_before = Balances::free_balance(coldkey2); + let staked_before = SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey1); + let total_network_stake_before = SubtensorModule::get_total_stake(); + let total_issuance_before = SubtensorModule::get_total_issuance(); + // check the premise of the test is correct + assert!(initial_stake - stake_amount_to_remove < minimum_threshold); + assert_ok!(SubtensorModule::remove_stake( + <::RuntimeOrigin>::signed(coldkey2), + hotkey1, + stake_amount_to_remove + )); - // Verify that the operation failed due to insufficient balance - assert_noop!( - result, - Error::::InsufficientBalanceToPerformColdkeySwap + // Has no stake now + assert_eq!( + SubtensorModule::get_stake_for_coldkey_and_hotkey(&coldkey2, &hotkey1), + 0 ); + let stake_removed = staked_before; // All stake was removed + // Has the full balance + assert_eq!(Balances::free_balance(coldkey2), bal_before + stake_removed); - // Print final balances - log::info!( - "Final current_coldkey balance: {:?}", - Balances::total_balance(¤t_coldkey) + // Stake map entry is removed + assert!(Stake::::try_get(hotkey1, coldkey2).is_err(),); + // Stake tracking is updated + assert_eq!( + TotalColdkeyStake::::try_get(coldkey2).unwrap(), + 0 // Did not have any stake before; Entry is NOT removed + ); + assert_eq!( + TotalHotkeyStake::::try_get(hotkey1).unwrap(), + total_hotkey_stake_before - stake_removed // Stake was removed from hotkey1 tracker ); - log::info!( - "Final hotkey balance: {:?}", - Balances::total_balance(&hotkey) + assert_eq!( + TotalStake::::try_get().unwrap(), + total_network_stake_before - stake_removed ); - log::info!( - "Final new_coldkey balance: {:?}", - Balances::total_balance(&new_coldkey) + + // Total issuance is the same + assert_eq!( + SubtensorModule::get_total_issuance(), + total_issuance_before // Nothing was issued ); - - // Verify that no balance was transferred - assert_eq!(Balances::total_balance(¤t_coldkey), 0); - assert_eq!(Balances::total_balance(&hotkey), 0); - assert_eq!(Balances::total_balance(&new_coldkey), 0); }); } -// To run this test, use the following command: -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test staking -- test_arbitrated_coldkey_swap_with_no_stake --exact --nocapture +// Verify delegate take can be decreased #[test] -fn test_arbitrated_coldkey_swap_with_no_stake() { +fn test_delegate_take_can_be_decreased() { new_test_ext(1).execute_with(|| { - // Create accounts manually - let current_coldkey: AccountId = U256::from(1); - let hotkey: AccountId = U256::from(2); - let new_coldkey: AccountId = U256::from(3); - - add_network(1, 0, 0); - - // Register the hotkey and associate it with the current coldkey - register_ok_neuron(1, hotkey, current_coldkey, 0); - - // Add balance to the current coldkey without staking - Balances::make_free_balance_be(¤t_coldkey, MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP); + // Make account + let hotkey0 = U256::from(1); + let coldkey0 = U256::from(3); - // Print initial balances - log::info!( - "Initial current_coldkey balance: {:?}", - Balances::total_balance(¤t_coldkey) - ); - log::info!( - "Initial hotkey balance: {:?}", - Balances::total_balance(&hotkey) - ); - log::info!( - "Initial new_coldkey balance: {:?}", - Balances::total_balance(&new_coldkey) - ); + // Add balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - // Ensure initial balances are correct - assert_eq!( - Balances::total_balance(¤t_coldkey), - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP - ); - assert_eq!(Balances::total_balance(&hotkey), 0); - assert_eq!(Balances::total_balance(&new_coldkey), 0); - - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work, nonce) = generate_valid_pow( - ¤t_coldkey, - current_block, - U256::from(BaseDifficulty::::get()), - ); + // Register the neuron to a new network + let netuid = 1; + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - // Schedule coldkey swap - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - &new_coldkey, - work.to_fixed_bytes().to_vec(), - current_block, - nonce + // Coldkey / hotkey 0 become delegates with 9% take + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + SubtensorModule::get_min_delegate_take() )); - - // Make 5400 blocks pass, simulating on_idle for each block - let drain_block: u64 = 7200 * 3 + 1; - for _ in 0..drain_block { - next_block(); - SubtensorModule::on_idle(System::block_number(), Weight::MAX); - } - - // Print final balances - log::info!( - "Final current_coldkey balance: {:?}", - Balances::total_balance(¤t_coldkey) - ); - log::info!( - "Final hotkey balance: {:?}", - Balances::total_balance(&hotkey) - ); - log::info!( - "Final new_coldkey balance: {:?}", - Balances::total_balance(&new_coldkey) + assert_eq!( + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - // Check that the balance has been transferred to the new coldkey - assert_eq!( - Balances::total_balance(&new_coldkey), - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP + // Coldkey / hotkey 0 decreases take to 5%. This should fail as the minimum take is 9% + assert_err!( + SubtensorModule::do_decrease_take( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + u16::MAX / 20 + ), + Error::::DelegateTakeTooLow ); - assert_eq!(Balances::total_balance(¤t_coldkey), 0); }); } -// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test staking -- test_arbitrated_coldkey_swap_with_multiple_stakes --exact --nocapture +// Verify delegate take can be decreased #[test] -fn test_arbitrated_coldkey_swap_with_multiple_stakes() { +fn test_can_set_min_take_ok() { new_test_ext(1).execute_with(|| { - let (current_coldkey, hotkey, new_coldkey) = setup_test_environment(); - - SubtensorModule::set_target_stakes_per_interval(10); - SubtensorModule::add_balance_to_coldkey_account( - ¤t_coldkey, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); + // Make account + let hotkey0 = U256::from(1); + let coldkey0 = U256::from(3); - // Add more stake - assert_ok!(SubtensorModule::add_stake( - RuntimeOrigin::signed(current_coldkey), - hotkey, - 300 - )); + // Add balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work, nonce) = generate_valid_pow( - ¤t_coldkey, - current_block, - U256::from(BaseDifficulty::::get()), - ); + // Register the neuron to a new network + let netuid = 1; + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - &new_coldkey, - work.to_fixed_bytes().to_vec(), - current_block, - nonce + // Coldkey / hotkey 0 become delegates + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + u16::MAX / 10 )); - // Make 5400 blocks pass, simulating on_idle for each block - let drain_block: u64 = 7200 * 3 + 1; - for _ in 0..drain_block { - next_block(); - SubtensorModule::on_idle(System::block_number(), Weight::MAX); - } - - // Check that all stake has been removed - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey), 800); - - // Owner has changed - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&hotkey), - new_coldkey - ); - - // Check that the full balance has been transferred to the new coldkey + // Coldkey / hotkey 0 decreases take to min + assert_ok!(SubtensorModule::do_decrease_take( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + SubtensorModule::get_min_delegate_take() + )); assert_eq!( - SubtensorModule::get_coldkey_balance(&new_coldkey), - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP + 200 + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - - // Check that the full balance has been transferred to the new coldkey - assert_eq!(SubtensorModule::get_coldkey_balance(¤t_coldkey), 0); }); } -// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test staking -- test_arbitrated_coldkey_swap_multiple_arbitrations --exact --nocapture + +// Verify delegate take can not be increased with do_decrease_take #[test] -fn test_arbitrated_coldkey_swap_multiple_arbitrations() { +fn test_delegate_take_can_not_be_increased_with_decrease_take() { new_test_ext(1).execute_with(|| { - // Set a very low base difficulty for testing - BaseDifficulty::::put(1); - - // Create coldkey with three choices. - let coldkey: AccountId = U256::from(1); - let new_coldkey1: AccountId = U256::from(2); - let new_coldkey2: AccountId = U256::from(3); - let new_coldkey3: AccountId = U256::from(4); - let hotkey: AccountId = U256::from(5); - - // Setup network state. - add_network(1, 0, 0); - SubtensorModule::add_balance_to_coldkey_account( - &coldkey, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); - ArbitrationPeriod::::put(5); // Set arbitration period to 5 blocks - register_ok_neuron(1, hotkey, coldkey, 0); - - let current_block = SubtensorModule::get_current_block_as_u64(); - - // Generate valid PoW for each swap attempt - let (work1, nonce1) = generate_valid_pow(&coldkey, current_block, U256::from(1)); - let (work2, nonce2) = generate_valid_pow(&coldkey, current_block, U256::from(2)); - let (work3, nonce3) = generate_valid_pow(&coldkey, current_block, U256::from(4)); - - // Schedule three swaps - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &coldkey.clone(), - &new_coldkey1, - work1.to_fixed_bytes().to_vec(), - current_block, - nonce1 - )); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &coldkey.clone(), - &new_coldkey2, - work2.to_fixed_bytes().to_vec(), - current_block, - nonce2 - )); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &coldkey.clone(), - &new_coldkey3, - work3.to_fixed_bytes().to_vec(), - current_block, - nonce3 - )); - - // All three keys are added in swap destinations. - assert_eq!( - pallet_subtensor::ColdkeySwapDestinations::::get(coldkey), - vec![new_coldkey1, new_coldkey2, new_coldkey3] - ); + // Make account + let hotkey0 = U256::from(1); + let coldkey0 = U256::from(3); - // Simulate the passage of blocks and on_idle calls - for i in 0..(7200 * 3 + 1) { - next_block(); - SubtensorModule::on_idle(System::block_number(), Weight::MAX); + // Add balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - log::info!( - "Block {}: Coldkey in arbitration: {}, Swap destinations: {:?}", - i + 1, - SubtensorModule::coldkey_in_arbitration(&coldkey), - pallet_subtensor::ColdkeySwapDestinations::::get(coldkey) - ); - } + // Register the neuron to a new network + let netuid = 1; + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - // Check that the swap destinations remain unchanged due to multiple (>2) swap calls + // Coldkey / hotkey 0 become delegates with 10% take + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + SubtensorModule::get_min_delegate_take() + )); assert_eq!( - pallet_subtensor::ColdkeySwapDestinations::::get(coldkey), - vec![new_coldkey1, new_coldkey2, new_coldkey3], - "ColdkeySwapDestinations should remain unchanged with more than two swap calls" - ); - - // Key remains in arbitration due to multiple (>2) swap calls - assert!( - SubtensorModule::coldkey_in_arbitration(&coldkey), - "Coldkey should remain in arbitration with more than two swap calls" + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - // Check that no balance has been transferred - assert_eq!( - SubtensorModule::get_coldkey_balance(&coldkey), - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - "Original coldkey balance should remain unchanged" - ); - assert_eq!( - SubtensorModule::get_coldkey_balance(&new_coldkey1), - 0, - "New coldkey1 should not receive any balance" - ); + // Coldkey / hotkey 0 tries to increase take to 12.5% assert_eq!( - SubtensorModule::get_coldkey_balance(&new_coldkey2), - 0, - "New coldkey2 should not receive any balance" + SubtensorModule::do_decrease_take( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + u16::MAX / 8 + ), + Err(Error::::DelegateTakeTooLow.into()) ); assert_eq!( - SubtensorModule::get_coldkey_balance(&new_coldkey3), - 0, - "New coldkey3 should not receive any balance" + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); }); } -// TODO: Verify that we never want more than 2 destinations for a coldkey +// Verify delegate take can be increased #[test] -fn test_arbitrated_coldkey_swap_existing_destination() { +fn test_delegate_take_can_be_increased() { new_test_ext(1).execute_with(|| { - let (current_coldkey, _hotkey, new_coldkey) = setup_test_environment(); - let another_coldkey = U256::from(4); - let third_coldkey = U256::from(5); + // Make account + let hotkey0 = U256::from(1); + let coldkey0 = U256::from(3); - let current_block = SubtensorModule::get_current_block_as_u64(); + // Add balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - SubtensorModule::add_balance_to_coldkey_account( - ¤t_coldkey, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); + // Register the neuron to a new network + let netuid = 1; + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - // First swap attempt (0 existing destinations) - let difficulty1 = SubtensorModule::calculate_pow_difficulty(0); - let (work1, nonce1) = generate_valid_pow(¤t_coldkey, current_block, difficulty1); - - // Schedule a swap to new_coldkey - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey, - &new_coldkey, - work1.to_fixed_bytes().to_vec(), - current_block, - nonce1 + // Coldkey / hotkey 0 become delegates with 9% take + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + SubtensorModule::get_min_delegate_take() )); - - // Second swap attempt (1 existing destination) - let difficulty2 = SubtensorModule::calculate_pow_difficulty(1); - let (work2, nonce2) = generate_valid_pow(¤t_coldkey, current_block, difficulty2); - - // Attempt to schedule a swap to the same new_coldkey again - assert_noop!( - SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - &new_coldkey, - work2.to_fixed_bytes().to_vec(), - current_block, - nonce2 - ), - Error::::DuplicateColdkey + assert_eq!( + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - // Schedule a swap to another_coldkey (still 1 existing destination) - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - &another_coldkey, - work2.to_fixed_bytes().to_vec(), - current_block, - nonce2 - )); + step_block(1 + InitialTxDelegateTakeRateLimit::get() as u16); - // Third swap attempt (2 existing destinations) - let difficulty3 = SubtensorModule::calculate_pow_difficulty(2); - let (work3, nonce3) = generate_valid_pow(¤t_coldkey, current_block, difficulty3); - - // Attempt to schedule a third swap - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - &third_coldkey, - work3.to_fixed_bytes().to_vec(), - current_block, - nonce3 + // Coldkey / hotkey 0 decreases take to 12.5% + assert_ok!(SubtensorModule::do_increase_take( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + u16::MAX / 8 )); + assert_eq!(SubtensorModule::get_hotkey_take(&hotkey0), u16::MAX / 8); }); } +// Verify delegate take can not be decreased with increase_take #[test] -fn test_arbitration_period_extension() { +fn test_delegate_take_can_not_be_decreased_with_increase_take() { new_test_ext(1).execute_with(|| { - let (current_coldkey, _hotkey, new_coldkey) = setup_test_environment(); - let another_coldkey = U256::from(4); - - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work1, nonce1) = generate_valid_pow( - ¤t_coldkey, - current_block, - U256::from(BaseDifficulty::::get()), - ); - let (work2, nonce2) = - generate_valid_pow(¤t_coldkey, current_block, U256::from(20_000_000u64)); - SubtensorModule::add_balance_to_coldkey_account( - ¤t_coldkey, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); + // Make account + let hotkey0 = U256::from(1); + let coldkey0 = U256::from(3); - // Schedule a swap to new_coldkey - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - &new_coldkey, - work1.to_fixed_bytes().to_vec(), - current_block, - nonce1 - )); + // Add balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - // Schedule a swap to another_coldkey - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - ¤t_coldkey.clone(), - &another_coldkey, - work2.to_fixed_bytes().to_vec(), - current_block, - nonce2 - )); + // Register the neuron to a new network + let netuid = 1; + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - // Check that the arbitration period is extended - let arbitration_block = - SubtensorModule::get_current_block_as_u64() + ArbitrationPeriod::::get(); + // Coldkey / hotkey 0 become delegates with 9% take + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + SubtensorModule::get_min_delegate_take() + )); assert_eq!( - pallet_subtensor::ColdkeyArbitrationBlock::::get(current_coldkey), - arbitration_block - ); - }); -} - -#[test] -fn test_concurrent_arbitrated_coldkey_swaps() { - new_test_ext(1).execute_with(|| { - // Manually create accounts - let coldkey1: AccountId = U256::from(1); - let hotkey1: AccountId = U256::from(2); - let new_coldkey1: AccountId = U256::from(3); - - let coldkey2: AccountId = U256::from(4); - let hotkey2: AccountId = U256::from(5); - let new_coldkey2: AccountId = U256::from(6); - - // Add networks - let netuid1: u16 = 1; - let netuid2: u16 = 2; - add_network(netuid1, 13, 0); - add_network(netuid2, 13, 0); - - // Register neurons in different networks - register_ok_neuron(netuid1, hotkey1, coldkey1, 0); - register_ok_neuron(netuid2, hotkey2, coldkey2, 0); - - // Add balance to coldkeys - SubtensorModule::add_balance_to_coldkey_account( - &coldkey1, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); - SubtensorModule::add_balance_to_coldkey_account( - &coldkey2, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); - - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work1, nonce1) = generate_valid_pow( - &coldkey1, - current_block, - U256::from(BaseDifficulty::::get()), - ); - let (work2, nonce2) = generate_valid_pow( - &coldkey2, - current_block, - U256::from(BaseDifficulty::::get()), + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - // Schedule swaps for both coldkeys - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &coldkey1.clone(), - &new_coldkey1, - work1.to_fixed_bytes().to_vec(), - current_block, - nonce1 - )); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &coldkey2.clone(), - &new_coldkey2, - work2.to_fixed_bytes().to_vec(), - current_block, - nonce2 - )); - // Make 5400 blocks pass - let drain_block: u64 = 7200 * 3 + 1; - run_to_block(drain_block); - - // Run arbitration - SubtensorModule::swap_coldkeys_this_block(&BlockWeights::get().max_block).unwrap(); - // Check that the balances have been transferred correctly + // Coldkey / hotkey 0 tries to decrease take to 5% assert_eq!( - SubtensorModule::get_coldkey_balance(&new_coldkey1), - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP + SubtensorModule::do_increase_take( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + u16::MAX / 20 + ), + Err(Error::::DelegateTakeTooLow.into()) ); assert_eq!( - SubtensorModule::get_coldkey_balance(&new_coldkey2), - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); }); } -// #[test] -// fn test_get_remaining_arbitration_period() { -// new_test_ext(1).execute_with(|| { -// let coldkey_account_id = U256::from(12345); // arbitrary coldkey -// let new_coldkey_account_id = U256::from(54321); // arbitrary new coldkey - -// let current_block = SubtensorModule::get_current_block_as_u64(); -// let (work, nonce) = generate_valid_pow( -// &coldkey_account_id, -// current_block, -// U256::from(BaseDifficulty::::get()), -// ); - -// SubtensorModule::add_balance_to_coldkey_account( -// &coldkey_account_id, -// MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, -// ); - -// // Schedule a coldkey swap to set the arbitration block -// assert_ok!(SubtensorModule::do_schedule_coldkey_swap( -// &coldkey_account_id.clone(), -// &new_coldkey_account_id, -// work.to_fixed_bytes().to_vec(), -// current_block, -// nonce -// )); - -// // Get the current block number and arbitration period -// let current_block: u64 = SubtensorModule::get_current_block_as_u64(); -// let arbitration_period: u64 = ArbitrationPeriod::::get(); -// log::info!("arbitration_period: {:?}", arbitration_period); -// let arbitration_block: u64 = current_block + arbitration_period; -// log::info!("arbitration_block: {:?}", arbitration_block); - -// // Check if the remaining arbitration period is correct -// let remaining_period = -// SubtensorModule::get_remaining_arbitration_period(&coldkey_account_id); -// assert_eq!(remaining_period, arbitration_period); - -// // Move the current block forward and check again -// step_block(50); -// let remaining_period = -// SubtensorModule::get_remaining_arbitration_period(&coldkey_account_id); -// assert_eq!(remaining_period, arbitration_period - 50); - -// // Move the current block beyond the arbitration block and check again -// step_block((arbitration_period as u16) - 50 + 1); -// let remaining_period = -// SubtensorModule::get_remaining_arbitration_period(&coldkey_account_id); -// assert_eq!(remaining_period, 0); -// }); -// } - +// Verify delegate take can be increased up to InitialDefaultDelegateTake (18%) #[test] -fn test_transfer_coldkey_in_arbitration() { +fn test_delegate_take_can_be_increased_to_limit() { new_test_ext(1).execute_with(|| { - let coldkey_account_id = U256::from(1); - let recipient_account_id = U256::from(2); - let new_coldkey_account_id = U256::from(3); + // Make account + let hotkey0 = U256::from(1); + let coldkey0 = U256::from(3); - // Add balance to coldkey - SubtensorModule::add_balance_to_coldkey_account( - &coldkey_account_id, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); + // Add balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work, nonce) = generate_valid_pow( - &coldkey_account_id, - current_block, - U256::from(BaseDifficulty::::get()), - ); + // Register the neuron to a new network + let netuid = 1; + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - // Schedule a coldkey swap to put the coldkey in arbitration - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &coldkey_account_id.clone(), - &new_coldkey_account_id, - work.to_fixed_bytes().to_vec(), - current_block, - nonce + // Coldkey / hotkey 0 become delegates with 9% take + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + SubtensorModule::get_min_delegate_take() )); - - // Try to transfer balance - let call = RuntimeCall::Balances(BalancesCall::transfer_allow_death { - dest: recipient_account_id, - value: 1000, - }); - assert_eq!( - validate_transaction(&coldkey_account_id, &call), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - ); - }); -} - -#[test] -fn test_add_stake_coldkey_in_arbitration() { - new_test_ext(1).execute_with(|| { - let hotkey_account_id = U256::from(561337); - let coldkey_account_id = U256::from(61337); - let new_coldkey_account_id = U256::from(71337); - let netuid: u16 = 1; - let start_nonce: u64 = 0; - let tempo: u16 = 13; - - add_network(netuid, tempo, 0); - register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, start_nonce); - SubtensorModule::add_balance_to_coldkey_account( - &coldkey_account_id, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work, nonce) = generate_valid_pow( - &coldkey_account_id, - current_block, - U256::from(BaseDifficulty::::get()), - ); + step_block(1 + InitialTxDelegateTakeRateLimit::get() as u16); - // Schedule a coldkey swap to put the coldkey in arbitration - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &coldkey_account_id.clone(), - &new_coldkey_account_id, - work.to_fixed_bytes().to_vec(), - current_block, - nonce + // Coldkey / hotkey 0 tries to increase take to InitialDefaultDelegateTake+1 + assert_ok!(SubtensorModule::do_increase_take( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + InitialDefaultDelegateTake::get() )); - let call = RuntimeCall::SubtensorModule(crate::Call::add_stake { - hotkey: hotkey_account_id, - amount_staked: 1000, - }); - - // This should now be Ok - assert!(validate_transaction(&coldkey_account_id, &call).is_ok()); - }) -} - -#[test] -fn test_remove_stake_coldkey_in_arbitration() { - new_test_ext(1).execute_with(|| { - let hotkey_account_id = U256::from(561337); - let coldkey_account_id = U256::from(61337); - let new_coldkey_account_id = U256::from(71337); - let netuid: u16 = 1; - let start_nonce: u64 = 0; - let tempo: u16 = 13; - - add_network(netuid, tempo, 0); - register_ok_neuron(netuid, hotkey_account_id, coldkey_account_id, start_nonce); - SubtensorModule::add_balance_to_coldkey_account( - &coldkey_account_id, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); - SubtensorModule::increase_stake_on_hotkey_account(&hotkey_account_id, 1000); - - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work, nonce) = generate_valid_pow( - &coldkey_account_id, - current_block, - U256::from(BaseDifficulty::::get()), + assert_eq!( + SubtensorModule::get_hotkey_take(&hotkey0), + InitialDefaultDelegateTake::get() ); - - // Schedule a coldkey swap to put the coldkey in arbitration - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &coldkey_account_id.clone(), - &new_coldkey_account_id, - work.to_fixed_bytes().to_vec(), - current_block, - nonce - )); - - let call = RuntimeCall::SubtensorModule(crate::Call::remove_stake { - hotkey: hotkey_account_id, - amount_unstaked: 500, - }); - - // This should now be Ok - assert!(validate_transaction(&coldkey_account_id, &call).is_ok()); }); } +// Verify delegate take can not be set above InitialDefaultDelegateTake #[test] -fn test_transfer_coldkey_not_in_arbitration() { +fn test_delegate_take_can_not_be_set_beyond_limit() { new_test_ext(1).execute_with(|| { - let coldkey_account_id = U256::from(61337); - let recipient_account_id = U256::from(71337); + // Make account + let hotkey0 = U256::from(1); + let coldkey0 = U256::from(3); - SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 60000); + // Add balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - let call = RuntimeCall::Balances(BalancesCall::transfer_allow_death { - dest: recipient_account_id, - value: 1000, - }); + // Register the neuron to a new network + let netuid = 1; + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey0, coldkey0, 124124); + let before = SubtensorModule::get_hotkey_take(&hotkey0); - // This should be Ok - assert!(validate_transaction(&coldkey_account_id, &call).is_ok()); + // Coldkey / hotkey 0 attempt to become delegates with take above maximum + // (Disable this check if InitialDefaultDelegateTake is u16::MAX) + if InitialDefaultDelegateTake::get() != u16::MAX { + assert_eq!( + SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + InitialDefaultDelegateTake::get() + 1 + ), + Err(Error::::DelegateTakeTooHigh.into()) + ); + } + assert_eq!(SubtensorModule::get_hotkey_take(&hotkey0), before); }); } -fn validate_transaction(who: &AccountId, call: &RuntimeCall) -> TransactionValidity { - SubtensorSignedExtension::::new().validate(who, call, &DispatchInfo::default(), 0) -} +// Verify delegate take can not be increased above InitialDefaultDelegateTake (18%) +#[test] +fn test_delegate_take_can_not_be_increased_beyond_limit() { + new_test_ext(1).execute_with(|| { + // Make account + let hotkey0 = U256::from(1); + let coldkey0 = U256::from(3); -// Helper function to generate valid PoW -fn generate_valid_pow(coldkey: &U256, block_number: u64, difficulty: U256) -> (H256, u64) { - let mut nonce: u64 = 0; - loop { - let work = SubtensorModule::create_seal_hash(block_number, nonce, coldkey); - if SubtensorModule::hash_meets_difficulty(&work, difficulty) { - return (work, nonce); - } - nonce += 1; - } -} + // Add balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); -// Helper function to advance to the next block and run hooks -fn next_block() { - let current_block = System::block_number(); - System::on_finalize(current_block); - System::set_block_number(current_block + 1); - System::on_initialize(System::block_number()); - SubtensorModule::on_initialize(System::block_number()); -} + // Register the neuron to a new network + let netuid = 1; + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey0, coldkey0, 124124); -// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test staking -- test_coldkey_meets_enough --exact --nocapture -#[test] -fn test_coldkey_meets_enough() { - new_test_ext(1).execute_with(|| { - let coldkey = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey = U256::from(2); - let netuid = 1u16; - add_network(netuid, 13, 0); - register_ok_neuron(netuid, hotkey, coldkey, 0); - let current_block = SubtensorModule::get_current_block_as_u64(); - let (work1, nonce1) = generate_valid_pow( - &coldkey, - current_block, - U256::from(BaseDifficulty::::get()), - ); - assert_err!( - SubtensorModule::do_schedule_coldkey_swap( - &coldkey.clone(), - &new_coldkey, - work1.to_fixed_bytes().to_vec(), - current_block, - nonce1 - ), - Error::::InsufficientBalanceToPerformColdkeySwap + // Coldkey / hotkey 0 become delegates with 9% take + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + SubtensorModule::get_min_delegate_take() + )); + assert_eq!( + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - SubtensorModule::add_balance_to_coldkey_account( - &coldkey, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, + + // Coldkey / hotkey 0 tries to increase take to InitialDefaultDelegateTake+1 + // (Disable this check if InitialDefaultDelegateTake is u16::MAX) + if InitialDefaultDelegateTake::get() != u16::MAX { + assert_eq!( + SubtensorModule::do_increase_take( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + InitialDefaultDelegateTake::get() + 1 + ), + Err(Error::::DelegateTakeTooHigh.into()) + ); + } + assert_eq!( + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &coldkey.clone(), - &new_coldkey, - work1.to_fixed_bytes().to_vec(), - current_block, - nonce1 - )); }); } +// Test rate-limiting on increase_take #[test] -fn test_comprehensive_coldkey_swap_scenarios() { +fn test_rate_limits_enforced_on_increase_take() { new_test_ext(1).execute_with(|| { - // Set arbitration period to 5 blocks - ArbitrationPeriod::::put(5); - - let subnet_owner1 = U256::from(1); - let subnet_owner2 = U256::from(2); - let regular_user = U256::from(3); - let new_coldkey1 = U256::from(4); - let new_coldkey2 = U256::from(5); - let new_coldkey3 = U256::from(6); - let netuid1 = 1; - let netuid2 = 2; - - // Add networks and register subnet owners - add_network(netuid1, 13, 0); - add_network(netuid2, 13, 0); - SubnetOwner::::insert(netuid1, subnet_owner1); - SubnetOwner::::insert(netuid2, subnet_owner2); - - // Add balance to subnet owners and regular user - SubtensorModule::add_balance_to_coldkey_account( - &subnet_owner1, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); - SubtensorModule::add_balance_to_coldkey_account( - &subnet_owner2, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - ); - SubtensorModule::add_balance_to_coldkey_account( - ®ular_user, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP * 2, - ); - - // Set a very low base difficulty for testing - BaseDifficulty::::put(1); - - let current_block = SubtensorModule::get_current_block_as_u64(); - - // Schedule swaps for subnet owners and regular user - let (work1, nonce1) = generate_valid_pow(&subnet_owner1, current_block, U256::from(BaseDifficulty::::get())); - let (work2, nonce2) = generate_valid_pow(&subnet_owner2, current_block, U256::from(BaseDifficulty::::get())); - let (work3, nonce3) = generate_valid_pow(®ular_user, current_block, U256::from(BaseDifficulty::::get())); + // Make account + let hotkey0 = U256::from(1); + let coldkey0 = U256::from(3); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &subnet_owner1, - &new_coldkey1, - work1.to_fixed_bytes().to_vec(), - current_block, - nonce1 - )); + // Add balance + SubtensorModule::add_balance_to_coldkey_account(&coldkey0, 100000); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &subnet_owner2, - &new_coldkey2, - work2.to_fixed_bytes().to_vec(), - current_block, - nonce2 - )); + // Register the neuron to a new network + let netuid = 1; + add_network(netuid, 0, 0); + register_ok_neuron(netuid, hotkey0, coldkey0, 124124); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - ®ular_user, - &new_coldkey3, - work3.to_fixed_bytes().to_vec(), - current_block, - nonce3 + // Coldkey / hotkey 0 become delegates with 9% take + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + SubtensorModule::get_min_delegate_take() )); - - // Check if swaps were scheduled correctly - assert_eq!( - ColdkeySwapDestinations::::get(subnet_owner1), - vec![new_coldkey1] - ); - assert_eq!( - ColdkeySwapDestinations::::get(subnet_owner2), - vec![new_coldkey2] - ); assert_eq!( - ColdkeySwapDestinations::::get(regular_user), - vec![new_coldkey3] + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - // Run through the arbitration period plus one block - for i in 0..6 { - next_block(); - SubtensorModule::on_idle(System::block_number(), Weight::MAX); - - log::info!( - "Block {}: Coldkey in arbitration: {}, Swap destinations: {:?}", - i + 1, - SubtensorModule::coldkey_in_arbitration(&subnet_owner1), - ColdkeySwapDestinations::::get(subnet_owner1) - ); - - // Test edge case: try to schedule another swap during arbitration - if i == 2 { - let (work4, nonce4) = generate_valid_pow( - &subnet_owner1, - current_block + i as u64, - U256::from(4) * U256::from(BaseDifficulty::::get()), - ); - assert_ok!(SubtensorModule::do_schedule_coldkey_swap( - &subnet_owner1, - &new_coldkey2, - work4.to_fixed_bytes().to_vec(), - current_block + i as u64, - nonce4 - )); - // This should add new_coldkey2 to subnet_owner1's destinations - assert_eq!( - ColdkeySwapDestinations::::get(subnet_owner1), - vec![new_coldkey1, new_coldkey2] - ); - } - } - - // Check if swaps have been executed - log::info!( - "After arbitration period - Swap destinations for subnet_owner1: {:?}", - ColdkeySwapDestinations::::get(subnet_owner1) + // Coldkey / hotkey 0 increases take to 12.5% + assert_eq!( + SubtensorModule::do_increase_take( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + u16::MAX / 8 + ), + Err(Error::::DelegateTxRateLimitExceeded.into()) ); assert_eq!( - ColdkeySwapDestinations::::get(subnet_owner1), - vec![new_coldkey1, new_coldkey2], - "ColdkeySwapDestinations for subnet_owner1 should still contain two destinations after arbitration period" + SubtensorModule::get_hotkey_take(&hotkey0), + SubtensorModule::get_min_delegate_take() ); - assert!(ColdkeySwapDestinations::::get(subnet_owner2).is_empty()); - assert!(ColdkeySwapDestinations::::get(regular_user).is_empty()); - // Verify that subnet ownerships have NOT been transferred for subnet_owner1 - assert_eq!(SubnetOwner::::get(netuid1), subnet_owner1); - // But subnet_owner2's ownership should have been transferred - assert_eq!(SubnetOwner::::get(netuid2), new_coldkey2); + step_block(1 + InitialTxDelegateTakeRateLimit::get() as u16); - // Verify regular user's balance has been transferred - assert_eq!( - SubtensorModule::get_coldkey_balance(&new_coldkey3), - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP * 2 - ); - assert_eq!(SubtensorModule::get_coldkey_balance(®ular_user), 0); + // Can increase after waiting + assert_ok!(SubtensorModule::do_increase_take( + <::RuntimeOrigin>::signed(coldkey0), + hotkey0, + u16::MAX / 8 + )); + assert_eq!(SubtensorModule::get_hotkey_take(&hotkey0), u16::MAX / 8); }); } @@ -4449,302 +2306,3 @@ fn test_get_total_delegated_stake_exclude_owner_stake() { ); }); } - -#[test] -fn test_do_schedule_coldkey_swap_subnet_owner_skips_min_balance() { - new_test_ext(1).execute_with(|| { - let netuid = 1u16; - let subnet_owner = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey = U256::from(3); - let current_block = 0u64; - - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey, subnet_owner, 0); - - // Make subnet_owner the owner of the subnet - SubnetOwner::::insert(netuid, subnet_owner); - - // Ensure subnet_owner has less than minimum balance - assert!( - SubtensorModule::get_coldkey_balance(&subnet_owner) - < MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP - ); - - // Generate valid PoW - let difficulty = U256::from(4) * U256::from(BaseDifficulty::::get()); - let (work, nonce) = generate_valid_pow(&subnet_owner, current_block, difficulty); - - // Debug prints - println!("Subnet owner: {:?}", subnet_owner); - println!("New coldkey: {:?}", new_coldkey); - println!("Current block: {}", current_block); - println!("Difficulty: {:?}", difficulty); - println!("Work: {:?}", work); - println!("Nonce: {}", nonce); - - // Verify the PoW - let seal = SubtensorModule::create_seal_hash(current_block, nonce, &subnet_owner); - println!("Calculated seal: {:?}", seal); - println!("Work matches seal: {}", work == seal); - println!( - "Seal meets difficulty: {}", - SubtensorModule::hash_meets_difficulty(&seal, difficulty) - ); - - // Attempt to schedule coldkey swap - let result = SubtensorModule::do_schedule_coldkey_swap( - &subnet_owner, - &new_coldkey, - work.to_fixed_bytes().to_vec(), - current_block, - nonce, - ); - - // Print the result - println!("Swap result: {:?}", result); - - assert_ok!(result); - - // Verify that the swap was scheduled - assert_eq!( - ColdkeySwapDestinations::::get(subnet_owner), - vec![new_coldkey] - ); - }); -} - -#[test] -fn test_do_schedule_coldkey_swap_delegate_with_500_tao_skips_min_balance() { - new_test_ext(1).execute_with(|| { - let netuid = 1u16; - let delegate_coldkey = U256::from(1); - let delegate_hotkey = U256::from(2); - let new_coldkey = U256::from(3); - let delegator = U256::from(4); - let current_block = 0u64; - - add_network(netuid, 0, 0); - register_ok_neuron(netuid, delegate_hotkey, delegate_coldkey, 0); - - // Make delegate a delegate - assert_ok!(SubtensorModule::become_delegate( - RuntimeOrigin::signed(delegate_coldkey), - delegate_hotkey - )); - - // Add more than 500 TAO of stake to the delegate's hotkey - let stake_amount = 501_000_000_000; // 501 TAO in RAO - SubtensorModule::add_balance_to_coldkey_account(&delegator, stake_amount); - assert_ok!(SubtensorModule::add_stake( - RuntimeOrigin::signed(delegator), - delegate_hotkey, - stake_amount - )); - - // Debug prints - println!( - "Delegator balance: {}", - SubtensorModule::get_coldkey_balance(&delegator) - ); - println!( - "Delegate coldkey balance: {}", - SubtensorModule::get_coldkey_balance(&delegate_coldkey) - ); - println!("Stake amount: {}", stake_amount); - println!( - "Delegate hotkey total stake: {}", - SubtensorModule::get_total_stake_for_hotkey(&delegate_hotkey) - ); - println!( - "Delegate coldkey delegated stake: {}", - SubtensorModule::get_total_delegated_stake(&delegate_coldkey) - ); - - // Ensure delegate's coldkey has less than minimum balance - assert!( - SubtensorModule::get_coldkey_balance(&delegate_coldkey) - < MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP, - "Delegate coldkey balance should be less than minimum required" - ); - - // Ensure the delegate's hotkey has more than 500 TAO delegated - assert!( - SubtensorModule::get_total_delegated_stake(&delegate_coldkey) >= 500_000_000_000, - "Delegate hotkey should have at least 500 TAO delegated" - ); - - // Generate valid PoW - let (work, nonce) = generate_valid_pow( - &delegate_coldkey, - current_block, - U256::from(4) * U256::from(BaseDifficulty::::get()), - ); - - // Debug prints - println!("Work: {:?}", work); - println!("Nonce: {}", nonce); - - // Attempt to schedule coldkey swap - let result = SubtensorModule::do_schedule_coldkey_swap( - &delegate_coldkey, - &new_coldkey, - work.to_fixed_bytes().to_vec(), - current_block, - nonce, - ); - - // Print the result - println!("Swap result: {:?}", result); - - assert_ok!(result); - - // Verify that the swap was scheduled - assert_eq!( - ColdkeySwapDestinations::::get(delegate_coldkey), - vec![new_coldkey] - ); - - // Additional debug prints after swap - println!( - "Coldkey swap destinations: {:?}", - ColdkeySwapDestinations::::get(delegate_coldkey) - ); - println!( - "Is coldkey in arbitration: {}", - SubtensorModule::coldkey_in_arbitration(&delegate_coldkey) - ); - }); -} - -#[test] -fn test_do_schedule_coldkey_swap_regular_user_fails_min_balance() { - new_test_ext(1).execute_with(|| { - let netuid = 1u16; - let regular_user = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey = U256::from(3); - let current_block = 0u64; - let nonce = 0u64; - - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey, regular_user, 0); - - // Ensure regular_user has less than minimum balance - assert!( - SubtensorModule::get_coldkey_balance(®ular_user) - < MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP - ); - - let (work, _) = generate_valid_pow( - ®ular_user, - current_block, - U256::from(4) * U256::from(BaseDifficulty::::get()), - ); - - // Attempt to schedule coldkey swap - assert_noop!( - SubtensorModule::do_schedule_coldkey_swap( - ®ular_user, - &new_coldkey, - work.to_fixed_bytes().to_vec(), - current_block, - nonce - ), - Error::::InsufficientBalanceToPerformColdkeySwap - ); - - // Verify that the swap was not scheduled - assert!(ColdkeySwapDestinations::::get(regular_user).is_empty()); - }); -} - -#[test] -fn test_do_schedule_coldkey_swap_regular_user_passes_min_balance() { - new_test_ext(1).execute_with(|| { - let netuid = 1u16; - let regular_user = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey = U256::from(3); - let current_block = 0u64; - - add_network(netuid, 0, 0); - register_ok_neuron(netuid, hotkey, regular_user, 0); - - // Ensure regular_user has more than minimum balance - SubtensorModule::add_balance_to_coldkey_account( - ®ular_user, - MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP + 1, - ); - assert!( - SubtensorModule::get_coldkey_balance(®ular_user) - > MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP - ); - - // Generate valid PoW - let (work, nonce) = generate_valid_pow( - ®ular_user, - current_block, - U256::from(4) * U256::from(BaseDifficulty::::get()), - ); - - // Debug prints - println!("Regular user: {:?}", regular_user); - println!("New coldkey: {:?}", new_coldkey); - println!("Current block: {}", current_block); - println!("Work: {:?}", work); - println!("Nonce: {}", nonce); - - // Attempt to schedule coldkey swap - let result = SubtensorModule::do_schedule_coldkey_swap( - ®ular_user, - &new_coldkey, - work.to_fixed_bytes().to_vec(), - current_block, - nonce, - ); - - // Print the result - println!("Swap result: {:?}", result); - - assert_ok!(result); - - // Verify that the swap was scheduled - assert_eq!( - ColdkeySwapDestinations::::get(regular_user), - vec![new_coldkey] - ); - }); -} - -// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test staking -- test_emission_creates_staking_hotkeys_entry --exact --nocapture -#[test] -fn test_emission_creates_staking_hotkeys_entry() { - new_test_ext(1).execute_with(|| { - let hotkey0 = U256::from(1); - let hotkey1 = U256::from(2); - - let coldkey = U256::from(3); - - // Add to Owner map - Owner::::insert(hotkey0, coldkey); - Owner::::insert(hotkey1, coldkey); - OwnedHotkeys::::insert(coldkey, vec![hotkey0, hotkey1]); - - // Emit through hotkey - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey0, 0, 1_000); - - // Verify StakingHotkeys has an entry - assert_eq!(StakingHotkeys::::get(coldkey).len(), 1); - assert!(StakingHotkeys::::get(coldkey).contains(&hotkey0)); - - // Try again with another emission on hotkey1 - SubtensorModule::emit_inflation_through_hotkey_account(&hotkey1, 0, 2_000); - - // Verify both hotkeys are now in the map - assert_eq!(StakingHotkeys::::get(coldkey).len(), 2); - let final_map = StakingHotkeys::::get(coldkey); - assert!(final_map.contains(&hotkey0)); - assert!(final_map.contains(&hotkey1)); - }) -} diff --git a/pallets/subtensor/tests/swap.rs b/pallets/subtensor/tests/swap.rs deleted file mode 100644 index 21c3a983a..000000000 --- a/pallets/subtensor/tests/swap.rs +++ /dev/null @@ -1,1889 +0,0 @@ -#![allow(unused, clippy::indexing_slicing, clippy::panic, clippy::unwrap_used)] - -use codec::Encode; -use frame_support::weights::Weight; -use frame_support::{assert_err, assert_noop, assert_ok}; -use frame_system::{Config, RawOrigin}; -mod mock; -use mock::*; -use pallet_subtensor::*; -use sp_core::U256; - -#[test] -fn test_do_swap_hotkey_ok() { - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let swap_cost = 1_000_000_000u64; - - // Setup initial state - add_network(netuid, tempo, 0); - register_ok_neuron(netuid, old_hotkey, coldkey, 0); - SubtensorModule::add_balance_to_coldkey_account(&coldkey, swap_cost); - - // Perform the swap - assert_ok!(SubtensorModule::do_swap_hotkey( - <::RuntimeOrigin>::signed(coldkey), - &old_hotkey, - &new_hotkey - )); - - // Verify the swap - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&new_hotkey), - coldkey - ); - assert_ne!( - SubtensorModule::get_owning_coldkey_for_hotkey(&old_hotkey), - coldkey - ); - - // Verify other storage changes - assert_eq!( - SubtensorModule::get_total_stake_for_hotkey(&new_hotkey), - SubtensorModule::get_total_stake_for_hotkey(&old_hotkey) - ); - assert_eq!( - SubtensorModule::get_delegate(new_hotkey.encode()), - SubtensorModule::get_delegate(old_hotkey.encode()) - ); - assert_eq!( - SubtensorModule::get_last_tx_block(&new_hotkey), - SubtensorModule::get_last_tx_block(&old_hotkey) - ); - - // Verify raw storage maps - // Stake - for (coldkey, stake_amount) in Stake::::iter_prefix(old_hotkey) { - assert_eq!(Stake::::get(new_hotkey, coldkey), stake_amount); - } - - let mut weight = Weight::zero(); - // UIDs - for netuid in SubtensorModule::get_netuid_is_member(&old_hotkey, &mut weight) { - assert_eq!( - Uids::::get(netuid, new_hotkey), - Uids::::get(netuid, old_hotkey) - ); - } - - // Prometheus - for netuid in SubtensorModule::get_netuid_is_member(&old_hotkey, &mut weight) { - assert_eq!( - Prometheus::::get(netuid, new_hotkey), - Prometheus::::get(netuid, old_hotkey) - ); - } - - // LoadedEmission - for netuid in SubtensorModule::get_netuid_is_member(&old_hotkey, &mut weight) { - assert_eq!( - LoadedEmission::::get(netuid).unwrap(), - LoadedEmission::::get(netuid).unwrap() - ); - } - - // IsNetworkMember - for netuid in SubtensorModule::get_netuid_is_member(&old_hotkey, &mut weight) { - assert!(IsNetworkMember::::contains_key(new_hotkey, netuid)); - assert!(!IsNetworkMember::::contains_key(old_hotkey, netuid)); - } - - // Owner - assert_eq!(Owner::::get(new_hotkey), coldkey); - - // TotalHotkeyStake - assert_eq!( - TotalHotkeyStake::::get(new_hotkey), - TotalHotkeyStake::::get(old_hotkey) - ); - - // Delegates - assert_eq!( - Delegates::::get(new_hotkey), - Delegates::::get(old_hotkey) - ); - - // LastTxBlock - assert_eq!( - LastTxBlock::::get(new_hotkey), - LastTxBlock::::get(old_hotkey) - ); - - // Axons - for netuid in SubtensorModule::get_netuid_is_member(&old_hotkey, &mut weight) { - assert_eq!( - Axons::::get(netuid, new_hotkey), - Axons::::get(netuid, old_hotkey) - ); - } - - // TotalHotkeyColdkeyStakesThisInterval - assert_eq!( - TotalHotkeyColdkeyStakesThisInterval::::get(new_hotkey, coldkey), - TotalHotkeyColdkeyStakesThisInterval::::get(old_hotkey, coldkey) - ); - }); -} - -#[test] -fn test_do_swap_hotkey_ok_robust() { - new_test_ext(1).execute_with(|| { - let num_subnets: u16 = 10; - let tempo: u16 = 13; - let swap_cost = 1_000_000_000u64; - - // Create 10 sets of keys - let mut old_hotkeys = vec![]; - let mut new_hotkeys = vec![]; - let mut coldkeys = vec![]; - - for i in 0..10 { - old_hotkeys.push(U256::from(i * 2 + 1)); - new_hotkeys.push(U256::from(i * 2 + 2)); - coldkeys.push(U256::from(i * 2 + 11)); - } - - // Setup initial state - for netuid in 1..=num_subnets { - add_network(netuid, tempo, 0); - SubtensorModule::set_max_registrations_per_block(netuid, 20); - SubtensorModule::set_target_registrations_per_interval(netuid, 1000); - log::info!( - "Registrations this interval for netuid {:?} is {:?}", - netuid, - SubtensorModule::get_target_registrations_per_interval(netuid) - ); - for i in 0..10 { - register_ok_neuron(netuid, old_hotkeys[i], coldkeys[i], 0); - } - } - - // Add balance to coldkeys for swap cost - for coldkey in coldkeys.iter().take(10) { - SubtensorModule::add_balance_to_coldkey_account(coldkey, swap_cost); - } - - // Add old_hotkeys[0] and old_hotkeys[1] to Senate - assert_ok!(SenateMembers::add_member( - RawOrigin::Root.into(), - old_hotkeys[0] - )); - assert_ok!(SenateMembers::add_member( - RawOrigin::Root.into(), - old_hotkeys[1] - )); - - // Verify initial Senate membership - assert!(Senate::is_member(&old_hotkeys[0])); - assert!(Senate::is_member(&old_hotkeys[1])); - assert!(!Senate::is_member(&new_hotkeys[0])); - assert!(!Senate::is_member(&new_hotkeys[1])); - - // Perform the swaps for only two hotkeys - assert_ok!(SubtensorModule::do_swap_hotkey( - <::RuntimeOrigin>::signed(coldkeys[0]), - &old_hotkeys[0], - &new_hotkeys[0] - )); - assert_ok!(SubtensorModule::do_swap_hotkey( - <::RuntimeOrigin>::signed(coldkeys[1]), - &old_hotkeys[1], - &new_hotkeys[1] - )); - - // Verify the swaps - for netuid in 1..=num_subnets { - for i in 0..10 { - if i == 0 || i == 1 { - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&new_hotkeys[i]), - coldkeys[i] - ); - assert_ne!( - SubtensorModule::get_owning_coldkey_for_hotkey(&old_hotkeys[i]), - coldkeys[i] - ); - - // Verify other storage changes - assert_eq!( - SubtensorModule::get_total_stake_for_hotkey(&new_hotkeys[i]), - SubtensorModule::get_total_stake_for_hotkey(&old_hotkeys[i]) - ); - - assert_eq!( - SubtensorModule::get_delegate(new_hotkeys[i].encode()), - SubtensorModule::get_delegate(old_hotkeys[i].encode()) - ); - - assert_eq!( - SubtensorModule::get_last_tx_block(&new_hotkeys[i]), - SubtensorModule::get_last_tx_block(&old_hotkeys[i]) - ); - - // Verify raw storage maps - // Stake - for (coldkey, stake_amount) in Stake::::iter_prefix(old_hotkeys[i]) { - assert_eq!(Stake::::get(new_hotkeys[i], coldkey), stake_amount); - } - - let mut weight = Weight::zero(); - // UIDs - for netuid in - SubtensorModule::get_netuid_is_member(&old_hotkeys[i], &mut weight) - { - assert_eq!( - Uids::::get(netuid, new_hotkeys[i]), - Uids::::get(netuid, old_hotkeys[i]) - ); - } - - // Prometheus - for netuid in - SubtensorModule::get_netuid_is_member(&old_hotkeys[i], &mut weight) - { - assert_eq!( - Prometheus::::get(netuid, new_hotkeys[i]), - Prometheus::::get(netuid, old_hotkeys[i]) - ); - } - - // LoadedEmission - for netuid in - SubtensorModule::get_netuid_is_member(&old_hotkeys[i], &mut weight) - { - assert_eq!( - LoadedEmission::::get(netuid).unwrap(), - LoadedEmission::::get(netuid).unwrap() - ); - } - - // IsNetworkMember - for netuid in - SubtensorModule::get_netuid_is_member(&old_hotkeys[i], &mut weight) - { - assert!(IsNetworkMember::::contains_key( - new_hotkeys[i], - netuid - )); - assert!(!IsNetworkMember::::contains_key( - old_hotkeys[i], - netuid - )); - } - - // Owner - assert_eq!(Owner::::get(new_hotkeys[i]), coldkeys[i]); - - // Keys - for (uid, hotkey) in Keys::::iter_prefix(netuid) { - if hotkey == old_hotkeys[i] { - assert_eq!(Keys::::get(netuid, uid), new_hotkeys[i]); - } - } - - // Verify Senate membership swap - assert!(!Senate::is_member(&old_hotkeys[i])); - assert!(Senate::is_member(&new_hotkeys[i])); - } else { - // Ensure other hotkeys remain unchanged - assert_eq!( - SubtensorModule::get_owning_coldkey_for_hotkey(&old_hotkeys[i]), - coldkeys[i] - ); - assert_ne!( - SubtensorModule::get_owning_coldkey_for_hotkey(&new_hotkeys[i]), - coldkeys[i] - ); - - // Verify Senate membership remains unchanged for other hotkeys - assert!(!Senate::is_member(&old_hotkeys[i])); - assert!(!Senate::is_member(&new_hotkeys[i])); - } - } - } - }); -} - -#[test] -fn test_swap_hotkey_tx_rate_limit_exceeded() { - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let old_hotkey = U256::from(1); - let new_hotkey_1 = U256::from(2); - let new_hotkey_2 = U256::from(4); - let coldkey = U256::from(3); - let swap_cost = 1_000_000_000u64 * 2; - - let tx_rate_limit = 1; - - // Get the current transaction rate limit - let current_tx_rate_limit = SubtensorModule::get_tx_rate_limit(); - log::info!("current_tx_rate_limit: {:?}", current_tx_rate_limit); - - // Set the transaction rate limit - SubtensorModule::set_tx_rate_limit(tx_rate_limit); - // assert the rate limit is set to 1000 blocks - assert_eq!(SubtensorModule::get_tx_rate_limit(), tx_rate_limit); - - // Setup initial state - add_network(netuid, tempo, 0); - register_ok_neuron(netuid, old_hotkey, coldkey, 0); - SubtensorModule::add_balance_to_coldkey_account(&coldkey, swap_cost); - - // Perform the first swap - assert_ok!(SubtensorModule::do_swap_hotkey( - <::RuntimeOrigin>::signed(coldkey), - &old_hotkey, - &new_hotkey_1 - )); - - // Attempt to perform another swap immediately, which should fail due to rate limit - assert_err!( - SubtensorModule::do_swap_hotkey( - <::RuntimeOrigin>::signed(coldkey), - &new_hotkey_1, - &new_hotkey_2 - ), - Error::::HotKeySetTxRateLimitExceeded - ); - - // move in time past the rate limit - step_block(1001); - assert_ok!(SubtensorModule::do_swap_hotkey( - <::RuntimeOrigin>::signed(coldkey), - &new_hotkey_1, - &new_hotkey_2 - )); - }); -} - -#[test] -fn test_do_swap_hotkey_err_not_owner() { - new_test_ext(1).execute_with(|| { - let netuid: u16 = 1; - let tempo: u16 = 13; - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let not_owner_coldkey = U256::from(4); - let swap_cost = 1_000_000_000u64; - - // Setup initial state - add_network(netuid, tempo, 0); - register_ok_neuron(netuid, old_hotkey, coldkey, 0); - SubtensorModule::add_balance_to_coldkey_account(¬_owner_coldkey, swap_cost); - - // Attempt the swap with a non-owner coldkey - assert_err!( - SubtensorModule::do_swap_hotkey( - <::RuntimeOrigin>::signed(not_owner_coldkey), - &old_hotkey, - &new_hotkey - ), - Error::::NonAssociatedColdKey - ); - }); -} - -#[test] -fn test_swap_owner_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let mut weight = Weight::zero(); - - // Initialize Owner for old_hotkey - Owner::::insert(old_hotkey, coldkey); - - // Perform the swap - SubtensorModule::swap_owner(&old_hotkey, &new_hotkey, &coldkey, &mut weight); - - // Verify the swap - assert_eq!(Owner::::get(new_hotkey), coldkey); - assert!(!Owner::::contains_key(old_hotkey)); - }); -} - -#[test] -fn test_swap_owner_old_hotkey_not_exist() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let mut weight = Weight::zero(); - - // Ensure old_hotkey does not exist - assert!(!Owner::::contains_key(old_hotkey)); - - // Perform the swap - SubtensorModule::swap_owner(&old_hotkey, &new_hotkey, &coldkey, &mut weight); - - // Verify the swap - assert_eq!(Owner::::get(new_hotkey), coldkey); - assert!(!Owner::::contains_key(old_hotkey)); - }); -} - -#[test] -fn test_swap_owner_new_hotkey_already_exists() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let another_coldkey = U256::from(4); - let mut weight = Weight::zero(); - - // Initialize Owner for old_hotkey and new_hotkey - Owner::::insert(old_hotkey, coldkey); - Owner::::insert(new_hotkey, another_coldkey); - - // Perform the swap - SubtensorModule::swap_owner(&old_hotkey, &new_hotkey, &coldkey, &mut weight); - - // Verify the swap - assert_eq!(Owner::::get(new_hotkey), coldkey); - assert!(!Owner::::contains_key(old_hotkey)); - }); -} - -#[test] -fn test_swap_owner_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let mut weight = Weight::zero(); - - // Initialize Owner for old_hotkey - Owner::::insert(old_hotkey, coldkey); - - // Perform the swap - SubtensorModule::swap_owner(&old_hotkey, &new_hotkey, &coldkey, &mut weight); - - // Verify the weight update - let expected_weight = ::DbWeight::get().writes(2); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_total_hotkey_stake_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let total_stake = 1000u64; - let mut weight = Weight::zero(); - - // Initialize TotalHotkeyStake for old_hotkey - TotalHotkeyStake::::insert(old_hotkey, total_stake); - - // Perform the swap - SubtensorModule::swap_total_hotkey_stake(&old_hotkey, &new_hotkey, &mut weight); - - // Verify the swap - assert_eq!(TotalHotkeyStake::::get(new_hotkey), total_stake); - assert!(!TotalHotkeyStake::::contains_key(old_hotkey)); - }); -} - -#[test] -fn test_swap_total_hotkey_stake_old_hotkey_not_exist() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let mut weight = Weight::zero(); - - // Ensure old_hotkey does not exist - assert!(!TotalHotkeyStake::::contains_key(old_hotkey)); - - // Perform the swap - SubtensorModule::swap_total_hotkey_stake(&old_hotkey, &new_hotkey, &mut weight); - - // Verify that new_hotkey does not have a stake - assert!(!TotalHotkeyStake::::contains_key(new_hotkey)); - }); -} - -#[test] -fn test_swap_total_hotkey_stake_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let total_stake = 1000u64; - let mut weight = Weight::zero(); - - // Initialize TotalHotkeyStake for old_hotkey - TotalHotkeyStake::::insert(old_hotkey, total_stake); - - // Perform the swap - SubtensorModule::swap_total_hotkey_stake(&old_hotkey, &new_hotkey, &mut weight); - - // Verify the weight update - let expected_weight = ::DbWeight::get().reads_writes(1, 2); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_delegates_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let delegate_take = 10u16; - let mut weight = Weight::zero(); - - // Initialize Delegates for old_hotkey - Delegates::::insert(old_hotkey, delegate_take); - - // Perform the swap - SubtensorModule::swap_delegates(&old_hotkey, &new_hotkey, &mut weight); - - // Verify the swap - assert_eq!(Delegates::::get(new_hotkey), delegate_take); - assert!(!Delegates::::contains_key(old_hotkey)); - }); -} - -#[test] -fn test_swap_delegates_old_hotkey_not_exist() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let mut weight = Weight::zero(); - - // Ensure old_hotkey does not exist - assert!(!Delegates::::contains_key(old_hotkey)); - - // Perform the swap - SubtensorModule::swap_delegates(&old_hotkey, &new_hotkey, &mut weight); - - // Verify that new_hotkey does not have a delegate - assert!(!Delegates::::contains_key(new_hotkey)); - }); -} - -#[test] -fn test_swap_delegates_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let delegate_take = 10u16; - let mut weight = Weight::zero(); - - // Initialize Delegates for old_hotkey - Delegates::::insert(old_hotkey, delegate_take); - - // Perform the swap - SubtensorModule::swap_delegates(&old_hotkey, &new_hotkey, &mut weight); - - // Verify the weight update - let expected_weight = ::DbWeight::get().reads_writes(1, 2); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_stake_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let stake_amount = 1000u64; - let mut weight = Weight::zero(); - - // Initialize Stake for old_hotkey - Stake::::insert(old_hotkey, coldkey, stake_amount); - - // Perform the swap - SubtensorModule::swap_stake(&old_hotkey, &new_hotkey, &mut weight); - - // Verify the swap - assert_eq!(Stake::::get(new_hotkey, coldkey), stake_amount); - assert!(!Stake::::contains_key(old_hotkey, coldkey)); - }); -} - -#[test] -fn test_swap_stake_old_hotkey_not_exist() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let stake_amount = 1000u64; - let mut weight = Weight::zero(); - - // Initialize Stake for old_hotkey - Stake::::insert(old_hotkey, coldkey, stake_amount); - - // Ensure old_hotkey has a stake - assert!(Stake::::contains_key(old_hotkey, coldkey)); - - // Perform the swap - SubtensorModule::swap_stake(&old_hotkey, &new_hotkey, &mut weight); - - // Verify that new_hotkey has the stake and old_hotkey does not - assert!(Stake::::contains_key(new_hotkey, coldkey)); - assert!(!Stake::::contains_key(old_hotkey, coldkey)); - }); -} - -#[test] -fn test_swap_stake_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let stake_amount = 1000u64; - let mut weight = Weight::zero(); - - // Initialize Stake for old_hotkey - Stake::::insert(old_hotkey, coldkey, stake_amount); - - // Perform the swap - SubtensorModule::swap_stake(&old_hotkey, &new_hotkey, &mut weight); - - // Verify the weight update - let expected_weight = ::DbWeight::get().writes(4); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_is_network_member_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let mut weight = Weight::zero(); - - // Initialize IsNetworkMember for old_hotkey - for netuid in &netuid_is_member { - IsNetworkMember::::insert(old_hotkey, netuid, true); - } - - // Perform the swap - SubtensorModule::swap_is_network_member( - &old_hotkey, - &new_hotkey, - &netuid_is_member, - &mut weight, - ); - - // Verify the swap - for netuid in &netuid_is_member { - assert!(IsNetworkMember::::contains_key(new_hotkey, netuid)); - assert!(!IsNetworkMember::::contains_key(old_hotkey, netuid)); - } - }); -} - -#[test] -fn test_swap_is_network_member_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let mut weight = Weight::zero(); - - // Initialize IsNetworkMember for old_hotkey - for netuid in &netuid_is_member { - IsNetworkMember::::insert(old_hotkey, netuid, true); - } - - // Perform the swap - SubtensorModule::swap_is_network_member( - &old_hotkey, - &new_hotkey, - &netuid_is_member, - &mut weight, - ); - - // Verify the weight update - let expected_weight = ::DbWeight::get().writes(4); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_axons_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let axon_info = AxonInfo { - block: 100, - version: 1, - ip: 0x1234567890abcdef, - port: 8080, - ip_type: 4, - protocol: 1, - placeholder1: 0, - placeholder2: 0, - }; - let mut weight = Weight::zero(); - - // Initialize Axons for old_hotkey - for netuid in &netuid_is_member { - Axons::::insert(netuid, old_hotkey, axon_info.clone()); - } - - // Perform the swap - SubtensorModule::swap_axons(&old_hotkey, &new_hotkey, &netuid_is_member, &mut weight); - - // Verify the swap - for netuid in &netuid_is_member { - assert_eq!(Axons::::get(netuid, new_hotkey).unwrap(), axon_info); - assert!(!Axons::::contains_key(netuid, old_hotkey)); - } - }); -} - -#[test] -fn test_swap_axons_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let axon_info = AxonInfo { - block: 100, - version: 1, - ip: 0x1234567890abcdef, - port: 8080, - ip_type: 4, - protocol: 1, - placeholder1: 0, - placeholder2: 0, - }; - let mut weight = Weight::zero(); - - // Initialize Axons for old_hotkey - for netuid in &netuid_is_member { - Axons::::insert(netuid, old_hotkey, axon_info.clone()); - } - - // Perform the swap - SubtensorModule::swap_axons(&old_hotkey, &new_hotkey, &netuid_is_member, &mut weight); - - // Verify the weight update - let expected_weight = netuid_is_member.len() as u64 - * ::DbWeight::get().reads_writes(1, 2); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_keys_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let uid = 42u16; - let mut weight = Weight::zero(); - - // Initialize Keys for old_hotkey - for netuid in &netuid_is_member { - log::info!("Inserting old_hotkey:{:?} netuid:{:?}", old_hotkey, netuid); - Keys::::insert(*netuid, uid, old_hotkey); - } - - // Perform the swap - SubtensorModule::swap_keys(&old_hotkey, &new_hotkey, &netuid_is_member, &mut weight); - - // Verify the swap - for netuid in &netuid_is_member { - log::info!( - "neutuid, uid, hotkey: {:?}, {:?}, {:?}", - netuid, - uid, - new_hotkey - ); - assert_eq!(Keys::::get(netuid, uid), new_hotkey); - } - }); -} - -#[test] -fn test_swap_keys_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let uid = 42u16; - let mut weight = Weight::zero(); - - // Initialize Keys for old_hotkey - for netuid in &netuid_is_member { - Keys::::insert(*netuid, uid, old_hotkey); - } - - // Perform the swap - SubtensorModule::swap_keys(&old_hotkey, &new_hotkey, &netuid_is_member, &mut weight); - - // Verify the weight update - let expected_weight = ::DbWeight::get().writes(4); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_loaded_emission_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let se = 100u64; - let ve = 200u64; - let mut weight = Weight::zero(); - - // Initialize LoadedEmission for old_hotkey - for netuid in &netuid_is_member { - LoadedEmission::::mutate(netuid, |emission_exists| { - if let Some(emissions) = emission_exists { - emissions.push((old_hotkey, se, ve)); - } else { - *emission_exists = Some(vec![(old_hotkey, se, ve)]); - } - }); - } - - // Perform the swap - SubtensorModule::swap_loaded_emission( - &old_hotkey, - &new_hotkey, - &netuid_is_member, - &mut weight, - ); - - // Verify the swap - for netuid in &netuid_is_member { - let emissions = LoadedEmission::::get(netuid).unwrap(); - assert!(emissions.iter().any(|(hk, _, _)| hk == &new_hotkey)); - assert!(!emissions.iter().any(|(hk, _, _)| hk == &old_hotkey)); - } - }); -} - -#[test] -fn test_swap_loaded_emission_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - // let uid = 42u64; - let se = 100u64; - let ve = 200u64; - let mut weight = Weight::zero(); - - // Initialize LoadedEmission for old_hotkey - for netuid in &netuid_is_member { - LoadedEmission::::mutate(netuid, |emission_exists| { - if let Some(emissions) = emission_exists { - emissions.push((old_hotkey, se, ve)); - } else { - *emission_exists = Some(vec![(old_hotkey, se, ve)]); - } - }); - } - - // Perform the swap - SubtensorModule::swap_loaded_emission( - &old_hotkey, - &new_hotkey, - &netuid_is_member, - &mut weight, - ); - - // Verify the weight update - let expected_weight = ::DbWeight::get().writes(2); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_uids_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let uid = 42u16; - let mut weight = Weight::zero(); - - // Initialize Uids for old_hotkey - for netuid in &netuid_is_member { - Uids::::insert(netuid, old_hotkey, uid); - } - - // Perform the swap - SubtensorModule::swap_uids(&old_hotkey, &new_hotkey, &netuid_is_member, &mut weight); - - // Verify the swap - for netuid in &netuid_is_member { - assert_eq!(Uids::::get(netuid, new_hotkey).unwrap(), uid); - assert!(!Uids::::contains_key(netuid, old_hotkey)); - } - }); -} - -#[test] -fn test_swap_uids_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let uid = 42u16; - let mut weight = Weight::zero(); - - // Initialize Uids for old_hotkey - for netuid in &netuid_is_member { - Uids::::insert(netuid, old_hotkey, uid); - } - - // Perform the swap - SubtensorModule::swap_uids(&old_hotkey, &new_hotkey, &netuid_is_member, &mut weight); - - // Verify the weight update - let expected_weight = ::DbWeight::get().writes(4); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_prometheus_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let prometheus_info = PrometheusInfo { - block: 100, - version: 1, - ip: 0x1234567890abcdef, - port: 8080, - ip_type: 4, - }; - let mut weight = Weight::zero(); - - // Initialize Prometheus for old_hotkey - for netuid in &netuid_is_member { - Prometheus::::insert(netuid, old_hotkey, prometheus_info.clone()); - } - - // Perform the swap - SubtensorModule::swap_prometheus(&old_hotkey, &new_hotkey, &netuid_is_member, &mut weight); - - // Verify the swap - for netuid in &netuid_is_member { - assert_eq!( - Prometheus::::get(netuid, new_hotkey).unwrap(), - prometheus_info - ); - assert!(!Prometheus::::contains_key(netuid, old_hotkey)); - } - }); -} - -#[test] -fn test_swap_prometheus_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let netuid_is_member = vec![1u16, 2u16]; - let prometheus_info = PrometheusInfo { - block: 100, - version: 1, - ip: 0x1234567890abcdef, - port: 8080, - ip_type: 4, - }; - let mut weight = Weight::zero(); - - // Initialize Prometheus for old_hotkey - for netuid in &netuid_is_member { - Prometheus::::insert(netuid, old_hotkey, prometheus_info.clone()); - } - - // Perform the swap - SubtensorModule::swap_prometheus(&old_hotkey, &new_hotkey, &netuid_is_member, &mut weight); - - // Verify the weight update - let expected_weight = netuid_is_member.len() as u64 - * ::DbWeight::get().reads_writes(1, 2); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_total_hotkey_coldkey_stakes_this_interval_success() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let stake = (1000u64, 42u64); // Example tuple value - let mut weight = Weight::zero(); - - // Initialize TotalHotkeyColdkeyStakesThisInterval for old_hotkey - TotalHotkeyColdkeyStakesThisInterval::::insert(old_hotkey, coldkey, stake); - - // Perform the swap - SubtensorModule::swap_total_hotkey_coldkey_stakes_this_interval( - &old_hotkey, - &new_hotkey, - &mut weight, - ); - - // Verify the swap - assert_eq!( - TotalHotkeyColdkeyStakesThisInterval::::get(new_hotkey, coldkey), - stake - ); - assert!(!TotalHotkeyColdkeyStakesThisInterval::::contains_key( - old_hotkey, coldkey - )); - }); -} - -#[test] -fn test_swap_total_hotkey_coldkey_stakes_this_interval_weight_update() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let coldkey = U256::from(3); - let stake = (1000u64, 42u64); - let mut weight = Weight::zero(); - - // Initialize TotalHotkeyColdkeyStakesThisInterval for old_hotkey - TotalHotkeyColdkeyStakesThisInterval::::insert(old_hotkey, coldkey, stake); - - // Perform the swap - - SubtensorModule::swap_total_hotkey_coldkey_stakes_this_interval( - &old_hotkey, - &new_hotkey, - &mut weight, - ); - - // Verify the weight update - let expected_weight = ::DbWeight::get().writes(2); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_do_swap_coldkey_success() { - new_test_ext(1).execute_with(|| { - let old_coldkey = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey1 = U256::from(3); - let hotkey2 = U256::from(4); - let netuid = 1u16; - let stake_amount1 = 1000u64; - let stake_amount2 = 2000u64; - let swap_cost = SubtensorModule::get_key_swap_cost(); - let free_balance_old = 12345u64 + swap_cost; - - // Setup initial state - add_network(netuid, 13, 0); - register_ok_neuron(netuid, hotkey1, old_coldkey, 0); - register_ok_neuron(netuid, hotkey2, old_coldkey, 0); - - // Add balance to old coldkey - SubtensorModule::add_balance_to_coldkey_account( - &old_coldkey, - stake_amount1 + stake_amount2 + free_balance_old, - ); - - // Log initial state - log::info!( - "Initial total stake: {}", - SubtensorModule::get_total_stake() - ); - log::info!( - "Initial old coldkey stake: {}", - SubtensorModule::get_total_stake_for_coldkey(&old_coldkey) - ); - log::info!( - "Initial new coldkey stake: {}", - SubtensorModule::get_total_stake_for_coldkey(&new_coldkey) - ); - - // Add stake to the neurons - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(old_coldkey), - hotkey1, - stake_amount1 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(old_coldkey), - hotkey2, - stake_amount2 - )); - - // Log state after adding stake - log::info!( - "Total stake after adding: {}", - SubtensorModule::get_total_stake() - ); - log::info!( - "Old coldkey stake after adding: {}", - SubtensorModule::get_total_stake_for_coldkey(&old_coldkey) - ); - log::info!( - "New coldkey stake after adding: {}", - SubtensorModule::get_total_stake_for_coldkey(&new_coldkey) - ); - - // Record total stake before swap - let total_stake_before_swap = SubtensorModule::get_total_stake(); - - // Perform the swap - assert_ok!(SubtensorModule::do_swap_coldkey( - <::RuntimeOrigin>::signed(old_coldkey), - &new_coldkey - )); - - // Log state after swap - log::info!( - "Total stake after swap: {}", - SubtensorModule::get_total_stake() - ); - log::info!( - "Old coldkey stake after swap: {}", - SubtensorModule::get_total_stake_for_coldkey(&old_coldkey) - ); - log::info!( - "New coldkey stake after swap: {}", - SubtensorModule::get_total_stake_for_coldkey(&new_coldkey) - ); - - // Verify the swap - assert_eq!(Owner::::get(hotkey1), new_coldkey); - assert_eq!(Owner::::get(hotkey2), new_coldkey); - assert_eq!( - TotalColdkeyStake::::get(new_coldkey), - stake_amount1 + stake_amount2 - ); - assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); - assert_eq!(Stake::::get(hotkey1, new_coldkey), stake_amount1); - assert_eq!(Stake::::get(hotkey2, new_coldkey), stake_amount2); - assert!(!Stake::::contains_key(hotkey1, old_coldkey)); - assert!(!Stake::::contains_key(hotkey2, old_coldkey)); - - // Verify OwnedHotkeys - let new_owned_hotkeys = OwnedHotkeys::::get(new_coldkey); - assert!(new_owned_hotkeys.contains(&hotkey1)); - assert!(new_owned_hotkeys.contains(&hotkey2)); - assert_eq!(new_owned_hotkeys.len(), 2); - assert!(!OwnedHotkeys::::contains_key(old_coldkey)); - - // Verify balance transfer - assert_eq!( - SubtensorModule::get_coldkey_balance(&new_coldkey), - free_balance_old - swap_cost - ); - assert_eq!(SubtensorModule::get_coldkey_balance(&old_coldkey), 0); - - // Verify total stake remains unchanged - assert_eq!( - SubtensorModule::get_total_stake(), - total_stake_before_swap, - "Total stake changed unexpectedly" - ); - - // Verify event emission - System::assert_last_event( - Event::ColdkeySwapped { - old_coldkey, - new_coldkey, - } - .into(), - ); - }); -} - -// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap -- test_swap_stake_for_coldkey --exact --nocaptur -#[test] -fn test_swap_stake_for_coldkey() { - new_test_ext(1).execute_with(|| { - let old_coldkey = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey1 = U256::from(3); - let hotkey2 = U256::from(4); - let stake_amount1 = 1000u64; - let stake_amount2 = 2000u64; - let stake_amount3 = 3000u64; - let total_stake = stake_amount1 + stake_amount2; - let mut weight = Weight::zero(); - - // Setup initial state - OwnedHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); - StakingHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); - Stake::::insert(hotkey1, old_coldkey, stake_amount1); - Stake::::insert(hotkey2, old_coldkey, stake_amount2); - assert_eq!(Stake::::get(hotkey1, old_coldkey), stake_amount1); - assert_eq!(Stake::::get(hotkey1, old_coldkey), stake_amount1); - - // Insert existing for same hotkey1 - Stake::::insert(hotkey1, new_coldkey, stake_amount3); - StakingHotkeys::::insert(new_coldkey, vec![hotkey1]); - - TotalHotkeyStake::::insert(hotkey1, stake_amount1); - TotalHotkeyStake::::insert(hotkey2, stake_amount2); - TotalColdkeyStake::::insert(old_coldkey, total_stake); - - // Set up total issuance - TotalIssuance::::put(total_stake); - TotalStake::::put(total_stake); - - // Record initial values - let initial_total_issuance = SubtensorModule::get_total_issuance(); - let initial_total_stake = SubtensorModule::get_total_stake(); - - // Perform the swap - SubtensorModule::swap_stake_for_coldkey(&old_coldkey, &new_coldkey, &mut weight); - - // Verify stake is additive, not replaced - assert_eq!( - Stake::::get(hotkey1, new_coldkey), - stake_amount1 + stake_amount3 - ); - - // Verify ownership transfer - assert_eq!( - SubtensorModule::get_owned_hotkeys(&new_coldkey), - vec![hotkey1, hotkey2] - ); - assert_eq!(SubtensorModule::get_owned_hotkeys(&old_coldkey), vec![]); - - // Verify stake transfer - assert_eq!(Stake::::get(hotkey2, new_coldkey), stake_amount2); - assert_eq!(Stake::::get(hotkey1, old_coldkey), 0); - assert_eq!(Stake::::get(hotkey2, old_coldkey), 0); - - // Verify TotalColdkeyStake - assert_eq!(TotalColdkeyStake::::get(new_coldkey), total_stake); - assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); - - // Verify TotalHotkeyStake remains unchanged - assert_eq!(TotalHotkeyStake::::get(hotkey1), stake_amount1); - assert_eq!(TotalHotkeyStake::::get(hotkey2), stake_amount2); - - // Verify total stake and issuance remain unchanged - assert_eq!( - SubtensorModule::get_total_stake(), - initial_total_stake, - "Total stake changed unexpectedly" - ); - assert_eq!( - SubtensorModule::get_total_issuance(), - initial_total_issuance, - "Total issuance changed unexpectedly" - ); - }); -} - -#[test] -fn test_swap_staking_hotkeys_for_coldkey() { - new_test_ext(1).execute_with(|| { - let old_coldkey = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey1 = U256::from(3); - let hotkey2 = U256::from(4); - let stake_amount1 = 1000u64; - let stake_amount2 = 2000u64; - let total_stake = stake_amount1 + stake_amount2; - let mut weight = Weight::zero(); - - // Setup initial state - OwnedHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); - Stake::::insert(hotkey1, old_coldkey, stake_amount1); - Stake::::insert(hotkey2, old_coldkey, stake_amount2); - StakingHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); - TotalHotkeyStake::::insert(hotkey1, stake_amount1); - TotalHotkeyStake::::insert(hotkey2, stake_amount2); - TotalColdkeyStake::::insert(old_coldkey, total_stake); - - // Set up total issuance - TotalIssuance::::put(total_stake); - TotalStake::::put(total_stake); - - // Perform the swap - SubtensorModule::swap_stake_for_coldkey(&old_coldkey, &new_coldkey, &mut weight); - - // Verify StakingHotkeys transfer - assert_eq!( - StakingHotkeys::::get(new_coldkey), - vec![hotkey1, hotkey2] - ); - assert_eq!(StakingHotkeys::::get(old_coldkey), vec![]); - }); -} - -#[test] -fn test_swap_delegated_stake_for_coldkey() { - new_test_ext(1).execute_with(|| { - let old_coldkey = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey1 = U256::from(3); - let hotkey2 = U256::from(4); - let stake_amount1 = 1000u64; - let stake_amount2 = 2000u64; - let total_stake = stake_amount1 + stake_amount2; - let mut weight = Weight::zero(); - - // Notice hotkey1 and hotkey2 are not in OwnedHotkeys - // coldkey therefore delegates stake to them - - // Setup initial state - StakingHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); - Stake::::insert(hotkey1, old_coldkey, stake_amount1); - Stake::::insert(hotkey2, old_coldkey, stake_amount2); - TotalHotkeyStake::::insert(hotkey1, stake_amount1); - TotalHotkeyStake::::insert(hotkey2, stake_amount2); - TotalColdkeyStake::::insert(old_coldkey, total_stake); - - // Set up total issuance - TotalIssuance::::put(total_stake); - TotalStake::::put(total_stake); - - // Record initial values - let initial_total_issuance = SubtensorModule::get_total_issuance(); - let initial_total_stake = SubtensorModule::get_total_stake(); - - // Perform the swap - SubtensorModule::swap_stake_for_coldkey(&old_coldkey, &new_coldkey, &mut weight); - - // Verify stake transfer - assert_eq!(Stake::::get(hotkey1, new_coldkey), stake_amount1); - assert_eq!(Stake::::get(hotkey2, new_coldkey), stake_amount2); - assert_eq!(Stake::::get(hotkey1, old_coldkey), 0); - assert_eq!(Stake::::get(hotkey2, old_coldkey), 0); - - // Verify TotalColdkeyStake - assert_eq!(TotalColdkeyStake::::get(new_coldkey), total_stake); - assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); - - // Verify TotalHotkeyStake remains unchanged - assert_eq!(TotalHotkeyStake::::get(hotkey1), stake_amount1); - assert_eq!(TotalHotkeyStake::::get(hotkey2), stake_amount2); - - // Verify total stake and issuance remain unchanged - assert_eq!( - SubtensorModule::get_total_stake(), - initial_total_stake, - "Total stake changed unexpectedly" - ); - assert_eq!( - SubtensorModule::get_total_issuance(), - initial_total_issuance, - "Total issuance changed unexpectedly" - ); - }); -} - -#[test] -fn test_swap_total_hotkey_coldkey_stakes_this_interval_for_coldkey() { - new_test_ext(1).execute_with(|| { - let old_coldkey = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey1 = U256::from(3); - let hotkey2 = U256::from(4); - let stake1 = (1000u64, 100u64); - let stake2 = (2000u64, 200u64); - let mut weight = Weight::zero(); - - // Initialize TotalHotkeyColdkeyStakesThisInterval for old_coldkey - TotalHotkeyColdkeyStakesThisInterval::::insert(hotkey1, old_coldkey, stake1); - TotalHotkeyColdkeyStakesThisInterval::::insert(hotkey2, old_coldkey, stake2); - - // Populate OwnedHotkeys map - OwnedHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); - - // Perform the swap - SubtensorModule::swap_total_hotkey_coldkey_stakes_this_interval_for_coldkey( - &old_coldkey, - &new_coldkey, - &mut weight, - ); - - // Verify the swap - assert_eq!( - TotalHotkeyColdkeyStakesThisInterval::::get(hotkey1, new_coldkey), - stake1 - ); - assert_eq!( - TotalHotkeyColdkeyStakesThisInterval::::get(hotkey2, new_coldkey), - stake2 - ); - assert!(!TotalHotkeyColdkeyStakesThisInterval::::contains_key( - old_coldkey, - hotkey1 - )); - assert!(!TotalHotkeyColdkeyStakesThisInterval::::contains_key( - old_coldkey, - hotkey2 - )); - - // Verify weight update - let expected_weight = ::DbWeight::get().reads_writes(5, 4); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_swap_subnet_owner_for_coldkey() { - new_test_ext(1).execute_with(|| { - let old_coldkey = U256::from(1); - let new_coldkey = U256::from(2); - let netuid1 = 1u16; - let netuid2 = 2u16; - let mut weight = Weight::zero(); - - // Initialize SubnetOwner for old_coldkey - SubnetOwner::::insert(netuid1, old_coldkey); - SubnetOwner::::insert(netuid2, old_coldkey); - - // Set up TotalNetworks - TotalNetworks::::put(3); - - // Perform the swap - SubtensorModule::swap_subnet_owner_for_coldkey(&old_coldkey, &new_coldkey, &mut weight); - - // Verify the swap - assert_eq!(SubnetOwner::::get(netuid1), new_coldkey); - assert_eq!(SubnetOwner::::get(netuid2), new_coldkey); - - // Verify weight update - let expected_weight = ::DbWeight::get().reads_writes(3, 2); - assert_eq!(weight, expected_weight); - }); -} - -#[test] -fn test_do_swap_coldkey_with_subnet_ownership() { - new_test_ext(1).execute_with(|| { - let old_coldkey = U256::from(1); - let new_coldkey = U256::from(2); - let hotkey = U256::from(3); - let netuid = 1u16; - let stake_amount: u64 = 1000u64; - let swap_cost = SubtensorModule::get_key_swap_cost(); - - // Setup initial state - add_network(netuid, 13, 0); - register_ok_neuron(netuid, hotkey, old_coldkey, 0); - - // Set TotalNetworks because swap relies on it - pallet_subtensor::TotalNetworks::::set(1); - - SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, stake_amount + swap_cost); - SubnetOwner::::insert(netuid, old_coldkey); - - // Populate OwnedHotkeys map - OwnedHotkeys::::insert(old_coldkey, vec![hotkey]); - - // Perform the swap - assert_ok!(SubtensorModule::do_swap_coldkey( - <::RuntimeOrigin>::signed(old_coldkey), - &new_coldkey - )); - - // Verify subnet ownership transfer - assert_eq!(SubnetOwner::::get(netuid), new_coldkey); - }); -} - -#[test] -fn test_coldkey_has_associated_hotkeys() { - new_test_ext(1).execute_with(|| { - let coldkey = U256::from(1); - let hotkey = U256::from(2); - let netuid = 1u16; - - // Setup initial state - add_network(netuid, 13, 0); - register_ok_neuron(netuid, hotkey, coldkey, 0); - SubtensorModule::add_balance_to_coldkey_account(&coldkey, 1000); - }); -} - -// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap -- test_coldkey_swap_total --exact --nocapture -#[test] -fn test_coldkey_swap_total() { - new_test_ext(1).execute_with(|| { - let coldkey = U256::from(1); - let nominator1 = U256::from(2); - let nominator2 = U256::from(3); - let nominator3 = U256::from(4); - let delegate1 = U256::from(5); - let delegate2 = U256::from(6); - let delegate3 = U256::from(7); - let hotkey1 = U256::from(2); - let hotkey2 = U256::from(3); - let hotkey3 = U256::from(4); - let netuid1 = 1u16; - let netuid2 = 2u16; - let netuid3 = 3u16; - SubtensorModule::add_balance_to_coldkey_account(&coldkey, 1000); - SubtensorModule::add_balance_to_coldkey_account(&delegate1, 1000); - SubtensorModule::add_balance_to_coldkey_account(&delegate2, 1000); - SubtensorModule::add_balance_to_coldkey_account(&delegate3, 1000); - SubtensorModule::add_balance_to_coldkey_account(&nominator1, 1000); - SubtensorModule::add_balance_to_coldkey_account(&nominator2, 1000); - SubtensorModule::add_balance_to_coldkey_account(&nominator3, 1000); - - // Setup initial state - add_network(netuid1, 13, 0); - add_network(netuid2, 14, 0); - add_network(netuid3, 15, 0); - register_ok_neuron(netuid1, hotkey1, coldkey, 0); - register_ok_neuron(netuid2, hotkey2, coldkey, 0); - register_ok_neuron(netuid3, hotkey3, coldkey, 0); - register_ok_neuron(netuid1, delegate1, delegate1, 0); - register_ok_neuron(netuid2, delegate2, delegate2, 0); - register_ok_neuron(netuid3, delegate3, delegate3, 0); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey), - hotkey1, - u16::MAX / 10 - )); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey), - hotkey2, - u16::MAX / 10 - )); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(coldkey), - hotkey3, - u16::MAX / 10 - )); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(delegate1), - delegate1, - u16::MAX / 10 - )); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(delegate2), - delegate2, - u16::MAX / 10 - )); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(delegate3), - delegate3, - u16::MAX / 10 - )); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey), - hotkey1, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey), - hotkey2, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey), - hotkey3, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey), - delegate1, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey), - delegate2, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey), - delegate3, - 100 - )); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(delegate1), - hotkey1, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(delegate2), - hotkey2, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(delegate3), - hotkey3, - 100 - )); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(delegate1), - delegate1, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(delegate2), - delegate2, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(delegate3), - delegate3, - 100 - )); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(nominator1), - hotkey1, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(nominator2), - hotkey2, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(nominator3), - hotkey3, - 100 - )); - - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(nominator1), - delegate1, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(nominator2), - delegate2, - 100 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(nominator3), - delegate3, - 100 - )); - - assert_eq!( - SubtensorModule::get_owned_hotkeys(&coldkey), - vec![hotkey1, hotkey2, hotkey3] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&coldkey), - vec![hotkey1, hotkey2, hotkey3, delegate1, delegate2, delegate3] - ); - assert_eq!(SubtensorModule::get_total_stake_for_coldkey(&coldkey), 600); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey2), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey3), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate1), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate2), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate3), 300); - - assert_eq!( - SubtensorModule::get_owned_hotkeys(&delegate1), - vec![delegate1] - ); - assert_eq!( - SubtensorModule::get_owned_hotkeys(&delegate2), - vec![delegate2] - ); - assert_eq!( - SubtensorModule::get_owned_hotkeys(&delegate3), - vec![delegate3] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&delegate1), - vec![delegate1, hotkey1] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&delegate2), - vec![delegate2, hotkey2] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&delegate3), - vec![delegate3, hotkey3] - ); - - assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator1), vec![]); - assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator2), vec![]); - assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator3), vec![]); - - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&nominator1), - vec![hotkey1, delegate1] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&nominator2), - vec![hotkey2, delegate2] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&nominator3), - vec![hotkey3, delegate3] - ); - - // Perform the swap - let new_coldkey = U256::from(1100); - assert_eq!(SubtensorModule::get_total_stake_for_coldkey(&coldkey), 600); - assert_ok!(SubtensorModule::perform_swap_coldkey( - &coldkey, - &new_coldkey - )); - assert_eq!( - SubtensorModule::get_total_stake_for_coldkey(&new_coldkey), - 600 - ); - - // Check everything is swapped. - assert_eq!( - SubtensorModule::get_owned_hotkeys(&new_coldkey), - vec![hotkey1, hotkey2, hotkey3] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&new_coldkey), - vec![hotkey1, hotkey2, hotkey3, delegate1, delegate2, delegate3] - ); - assert_eq!( - SubtensorModule::get_total_stake_for_coldkey(&new_coldkey), - 600 - ); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey2), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey3), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate1), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate2), 300); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate3), 300); - - assert_eq!( - SubtensorModule::get_owned_hotkeys(&delegate1), - vec![delegate1] - ); - assert_eq!( - SubtensorModule::get_owned_hotkeys(&delegate2), - vec![delegate2] - ); - assert_eq!( - SubtensorModule::get_owned_hotkeys(&delegate3), - vec![delegate3] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&delegate1), - vec![delegate1, hotkey1] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&delegate2), - vec![delegate2, hotkey2] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&delegate3), - vec![delegate3, hotkey3] - ); - - assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator1), vec![]); - assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator2), vec![]); - assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator3), vec![]); - - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&nominator1), - vec![hotkey1, delegate1] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&nominator2), - vec![hotkey2, delegate2] - ); - assert_eq!( - SubtensorModule::get_all_staked_hotkeys(&nominator3), - vec![hotkey3, delegate3] - ); - }); -} - -#[test] -fn test_swap_senate_member() { - new_test_ext(1).execute_with(|| { - let old_hotkey = U256::from(1); - let new_hotkey = U256::from(2); - let non_member_hotkey = U256::from(3); - let mut weight = Weight::zero(); - - // Setup: Add old_hotkey as a Senate member - assert_ok!(SenateMembers::add_member( - RawOrigin::Root.into(), - old_hotkey - )); - - // Test 1: Successful swap - assert_ok!(SubtensorModule::swap_senate_member( - &old_hotkey, - &new_hotkey, - &mut weight - )); - assert!(Senate::is_member(&new_hotkey)); - assert!(!Senate::is_member(&old_hotkey)); - - // Verify weight update - let expected_weight = ::DbWeight::get().reads_writes(2, 2); - assert_eq!(weight, expected_weight); - - // Reset weight for next test - weight = Weight::zero(); - - // Test 2: Swap with non-member (should not change anything) - assert_ok!(SubtensorModule::swap_senate_member( - &non_member_hotkey, - &new_hotkey, - &mut weight - )); - assert!(Senate::is_member(&new_hotkey)); - assert!(!Senate::is_member(&non_member_hotkey)); - - // Verify weight update (should only have read operations) - let expected_weight = ::DbWeight::get().reads(1); - assert_eq!(weight, expected_weight); - }); -} - -// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap -- test_coldkey_delegations --exact --nocapture -#[test] -fn test_coldkey_delegations() { - new_test_ext(1).execute_with(|| { - let new_coldkey = U256::from(0); - let owner = U256::from(1); - let coldkey = U256::from(4); - let delegate = U256::from(2); - let netuid = 1u16; - add_network(netuid, 13, 0); - register_ok_neuron(netuid, delegate, owner, 0); - SubtensorModule::add_balance_to_coldkey_account(&coldkey, 1000); - assert_ok!(SubtensorModule::do_become_delegate( - <::RuntimeOrigin>::signed(owner), - delegate, - u16::MAX / 10 - )); - assert_ok!(SubtensorModule::add_stake( - <::RuntimeOrigin>::signed(coldkey), - delegate, - 100 - )); - assert_ok!(SubtensorModule::perform_swap_coldkey( - &coldkey, - &new_coldkey - )); - assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate), 100); - assert_eq!(SubtensorModule::get_total_stake_for_coldkey(&coldkey), 0); - assert_eq!( - SubtensorModule::get_total_stake_for_coldkey(&new_coldkey), - 100 - ); - assert_eq!(Stake::::get(delegate, new_coldkey), 100); - assert_eq!(Stake::::get(delegate, coldkey), 0); - }); -} diff --git a/pallets/subtensor/tests/swap_coldkey.rs b/pallets/subtensor/tests/swap_coldkey.rs new file mode 100644 index 000000000..0fe601cab --- /dev/null +++ b/pallets/subtensor/tests/swap_coldkey.rs @@ -0,0 +1,1630 @@ +#![allow(unused, clippy::indexing_slicing, clippy::panic, clippy::unwrap_used)] +use codec::Encode; +use frame_support::weights::Weight; +use frame_support::{assert_err, assert_noop, assert_ok}; +use frame_system::{Config, RawOrigin}; +mod mock; +use frame_support::error::BadOrigin; +use frame_support::traits::schedule::v3::Named as ScheduleNamed; +use frame_support::traits::schedule::DispatchTime; +use frame_support::traits::OnInitialize; +use mock::*; +use pallet_subtensor::*; +use pallet_subtensor::{Call, ColdkeySwapScheduleDuration, Error}; +use sp_core::H256; +use sp_core::U256; +use sp_runtime::DispatchError; + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_total_hotkey_coldkey_stakes_this_interval --exact --nocapture +#[test] +fn test_swap_total_hotkey_coldkey_stakes_this_interval() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey = U256::from(3); + let stake = 100; + let block = 42; + + OwnedHotkeys::::insert(old_coldkey, vec![hotkey]); + TotalHotkeyColdkeyStakesThisInterval::::insert(hotkey, old_coldkey, (stake, block)); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert!(!TotalHotkeyColdkeyStakesThisInterval::::contains_key( + hotkey, + old_coldkey + )); + assert_eq!( + TotalHotkeyColdkeyStakesThisInterval::::get(hotkey, new_coldkey), + (stake, block) + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_subnet_owner --exact --nocapture +#[test] +fn test_swap_subnet_owner() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let netuid = 1u16; + + add_network(netuid, 1, 0); + SubnetOwner::::insert(netuid, old_coldkey); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(SubnetOwner::::get(netuid), new_coldkey); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_stake --exact --nocapture +#[test] +fn test_swap_stake() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey = U256::from(3); + let stake = 100; + + StakingHotkeys::::insert(old_coldkey, vec![hotkey]); + Stake::::insert(hotkey, old_coldkey, stake); + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert!(!Stake::::contains_key(hotkey, old_coldkey)); + assert_eq!(Stake::::get(hotkey, new_coldkey), stake); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_total_coldkey_stake --exact --nocapture +#[test] +fn test_swap_total_coldkey_stake() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let stake = 100; + + TotalColdkeyStake::::insert(old_coldkey, stake); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); + assert_eq!(TotalColdkeyStake::::get(new_coldkey), stake); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_staking_hotkeys --exact --nocapture +#[test] +fn test_swap_staking_hotkeys() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey = U256::from(3); + + StakingHotkeys::::insert(old_coldkey, vec![hotkey]); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert!(StakingHotkeys::::get(old_coldkey).is_empty()); + assert_eq!(StakingHotkeys::::get(new_coldkey), vec![hotkey]); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_hotkey_owners --exact --nocapture +#[test] +fn test_swap_hotkey_owners() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey = U256::from(3); + + Owner::::insert(hotkey, old_coldkey); + OwnedHotkeys::::insert(old_coldkey, vec![hotkey]); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(Owner::::get(hotkey), new_coldkey); + assert!(OwnedHotkeys::::get(old_coldkey).is_empty()); + assert_eq!(OwnedHotkeys::::get(new_coldkey), vec![hotkey]); + }); +} +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_transfer_remaining_balance --exact --nocapture +#[test] +fn test_transfer_remaining_balance() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let balance = 100; + + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, balance); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(SubtensorModule::get_coldkey_balance(&old_coldkey), 0); + assert_eq!(SubtensorModule::get_coldkey_balance(&new_coldkey), balance); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_with_no_stake --exact --nocapture +#[test] +fn test_swap_with_no_stake() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); + assert_eq!(TotalColdkeyStake::::get(new_coldkey), 0); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_with_multiple_hotkeys --exact --nocapture +#[test] +fn test_swap_with_multiple_hotkeys() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey1 = U256::from(3); + let hotkey2 = U256::from(4); + + OwnedHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert!(OwnedHotkeys::::get(old_coldkey).is_empty()); + assert_eq!( + OwnedHotkeys::::get(new_coldkey), + vec![hotkey1, hotkey2] + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_with_multiple_subnets --exact --nocapture +#[test] +fn test_swap_with_multiple_subnets() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let netuid1 = 1u16; + let netuid2 = 2u16; + + add_network(netuid1, 1, 0); + add_network(netuid2, 1, 0); + SubnetOwner::::insert(netuid1, old_coldkey); + SubnetOwner::::insert(netuid2, old_coldkey); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(SubnetOwner::::get(netuid1), new_coldkey); + assert_eq!(SubnetOwner::::get(netuid2), new_coldkey); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_with_zero_balance --exact --nocapture +#[test] +fn test_swap_with_zero_balance() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(Balances::free_balance(old_coldkey), 0); + assert_eq!(Balances::free_balance(new_coldkey), 0); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_idempotency --exact --nocapture +#[test] +fn test_swap_idempotency() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let stake = 100; + + TotalColdkeyStake::::insert(old_coldkey, stake); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); + assert_eq!(TotalColdkeyStake::::get(new_coldkey), stake); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_with_max_values --exact --nocapture +#[test] +fn test_swap_with_max_values() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let max_stake = u64::MAX; + + TotalColdkeyStake::::insert(old_coldkey, max_stake); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); + assert_eq!(TotalColdkeyStake::::get(new_coldkey), max_stake); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_with_non_existent_new_coldkey --exact --nocapture +#[test] +fn test_swap_with_non_existent_new_coldkey() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let stake = 100; + + TotalColdkeyStake::::insert(old_coldkey, stake); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); + assert_eq!(TotalColdkeyStake::::get(new_coldkey), stake); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_with_overflow_in_stake_addition --exact --nocapture +#[test] +fn test_swap_with_overflow_in_stake_addition() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let max_stake = u64::MAX; + + TotalColdkeyStake::::insert(old_coldkey, max_stake); + TotalColdkeyStake::::insert(new_coldkey, 1); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); + assert_eq!(TotalColdkeyStake::::get(new_coldkey), max_stake); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_with_max_hotkeys --exact --nocapture +#[test] +fn test_swap_with_max_hotkeys() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let max_hotkeys = 1000; + let hotkeys: Vec = (0..max_hotkeys).map(U256::from).collect(); + + OwnedHotkeys::::insert(old_coldkey, hotkeys.clone()); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert!(OwnedHotkeys::::get(old_coldkey).is_empty()); + assert_eq!(OwnedHotkeys::::get(new_coldkey), hotkeys); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_effect_on_delegated_stake --exact --nocapture +#[test] +fn test_swap_effect_on_delegated_stake() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let delegator = U256::from(3); + let hotkey = U256::from(4); + let stake = 100; + + StakingHotkeys::::insert(old_coldkey, vec![hotkey]); + StakingHotkeys::::insert(delegator, vec![hotkey]); + Stake::::insert(hotkey, old_coldkey, stake); + Stake::::insert(hotkey, delegator, stake); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!(Stake::::get(hotkey, new_coldkey), stake); + assert_eq!(Stake::::get(hotkey, delegator), stake); + assert_eq!(Stake::::get(hotkey, old_coldkey), 0); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_concurrent_modifications --exact --nocapture +#[test] +fn test_swap_concurrent_modifications() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey = U256::from(3); + let netuid: u16 = 1; + let initial_stake = 100; + let additional_stake = 50; + + StakingHotkeys::::insert(old_coldkey, vec![hotkey]); + Stake::::insert(hotkey, old_coldkey, initial_stake); + + // Simulate concurrent stake addition + add_network(netuid, 1, 1); + SubtensorModule::add_balance_to_coldkey_account(&new_coldkey, additional_stake); + register_ok_neuron(netuid, hotkey, new_coldkey, 1001000); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(new_coldkey), + hotkey, + additional_stake + )); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + assert_eq!( + Stake::::get(hotkey, new_coldkey), + initial_stake + additional_stake - 1 + ); + assert!(!Stake::::contains_key(hotkey, old_coldkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_with_invalid_subnet_ownership --exact --nocapture +#[test] +fn test_swap_with_invalid_subnet_ownership() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let netuid = 1u16; + + SubnetOwner::::insert(netuid, old_coldkey); + + // Simulate an invalid state where the subnet owner doesn't match the old_coldkey + SubnetOwner::::insert(netuid, U256::from(3)); + + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &old_coldkey, + &new_coldkey, + &mut weight + )); + + // The swap should not affect the mismatched subnet ownership + assert_eq!(SubnetOwner::::get(netuid), U256::from(3)); + }); +} + +#[test] +fn test_do_swap_coldkey_success() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey1 = U256::from(3); + let hotkey2 = U256::from(4); + let netuid = 1u16; + let stake_amount1 = 1000u64; + let stake_amount2 = 2000u64; + let swap_cost = SubtensorModule::get_key_swap_cost(); + let free_balance_old = 12345u64 + swap_cost; + + // Setup initial state + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey1, old_coldkey, 0); + register_ok_neuron(netuid, hotkey2, old_coldkey, 0); + + // Add balance to old coldkey + SubtensorModule::add_balance_to_coldkey_account( + &old_coldkey, + stake_amount1 + stake_amount2 + free_balance_old, + ); + + // Log initial state + log::info!( + "Initial total stake: {}", + SubtensorModule::get_total_stake() + ); + log::info!( + "Initial old coldkey stake: {}", + SubtensorModule::get_total_stake_for_coldkey(&old_coldkey) + ); + log::info!( + "Initial new coldkey stake: {}", + SubtensorModule::get_total_stake_for_coldkey(&new_coldkey) + ); + + // Add stake to the neurons + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(old_coldkey), + hotkey1, + stake_amount1 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(old_coldkey), + hotkey2, + stake_amount2 + )); + + // Insert an Identity + let name: Vec = b"The fourth Coolest Identity".to_vec(); + let identity: ChainIdentity = ChainIdentity { + name: name.clone(), + url: vec![], + image: vec![], + discord: vec![], + description: vec![], + additional: vec![], + }; + + Identities::::insert(old_coldkey, identity.clone()); + + assert!(Identities::::get(old_coldkey).is_some()); + assert!(Identities::::get(new_coldkey).is_none()); + + // Log state after adding stake + log::info!( + "Total stake after adding: {}", + SubtensorModule::get_total_stake() + ); + log::info!( + "Old coldkey stake after adding: {}", + SubtensorModule::get_total_stake_for_coldkey(&old_coldkey) + ); + log::info!( + "New coldkey stake after adding: {}", + SubtensorModule::get_total_stake_for_coldkey(&new_coldkey) + ); + + // Record total stake before swap + let total_stake_before_swap = SubtensorModule::get_total_stake(); + + // Perform the swap + assert_ok!(SubtensorModule::do_swap_coldkey( + // <::RuntimeOrigin>::signed(old_coldkey), + &old_coldkey, + &new_coldkey + )); + + // Log state after swap + log::info!( + "Total stake after swap: {}", + SubtensorModule::get_total_stake() + ); + log::info!( + "Old coldkey stake after swap: {}", + SubtensorModule::get_total_stake_for_coldkey(&old_coldkey) + ); + log::info!( + "New coldkey stake after swap: {}", + SubtensorModule::get_total_stake_for_coldkey(&new_coldkey) + ); + + // Verify the swap + assert_eq!(Owner::::get(hotkey1), new_coldkey); + assert_eq!(Owner::::get(hotkey2), new_coldkey); + assert_eq!( + TotalColdkeyStake::::get(new_coldkey), + stake_amount1 + stake_amount2 + ); + assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); + assert_eq!(Stake::::get(hotkey1, new_coldkey), stake_amount1); + assert_eq!(Stake::::get(hotkey2, new_coldkey), stake_amount2); + assert!(!Stake::::contains_key(hotkey1, old_coldkey)); + assert!(!Stake::::contains_key(hotkey2, old_coldkey)); + + // Verify OwnedHotkeys + let new_owned_hotkeys = OwnedHotkeys::::get(new_coldkey); + assert!(new_owned_hotkeys.contains(&hotkey1)); + assert!(new_owned_hotkeys.contains(&hotkey2)); + assert_eq!(new_owned_hotkeys.len(), 2); + assert!(!OwnedHotkeys::::contains_key(old_coldkey)); + + // Verify balance transfer + assert_eq!( + SubtensorModule::get_coldkey_balance(&new_coldkey), + free_balance_old - swap_cost + ); + assert_eq!(SubtensorModule::get_coldkey_balance(&old_coldkey), 0); + + // Verify total stake remains unchanged + assert_eq!( + SubtensorModule::get_total_stake(), + total_stake_before_swap, + "Total stake changed unexpectedly" + ); + + // Verify identities were swapped + assert!(Identities::::get(old_coldkey).is_none()); + assert!(Identities::::get(new_coldkey).is_some()); + assert_eq!( + Identities::::get(new_coldkey).expect("Expected an Identity"), + identity + ); + + // Verify event emission + System::assert_last_event( + Event::ColdkeySwapped { + old_coldkey, + new_coldkey, + } + .into(), + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap -- test_swap_stake_for_coldkey --exact --nocaptur +#[test] +fn test_swap_stake_for_coldkey() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey1 = U256::from(3); + let hotkey2 = U256::from(4); + let stake_amount1 = 1000u64; + let stake_amount2 = 2000u64; + let stake_amount3 = 3000u64; + let total_stake = stake_amount1 + stake_amount2; + let mut weight = Weight::zero(); + + // Setup initial state + OwnedHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); + StakingHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); + Stake::::insert(hotkey1, old_coldkey, stake_amount1); + Stake::::insert(hotkey2, old_coldkey, stake_amount2); + assert_eq!(Stake::::get(hotkey1, old_coldkey), stake_amount1); + assert_eq!(Stake::::get(hotkey1, old_coldkey), stake_amount1); + + // Insert existing for same hotkey1 + Stake::::insert(hotkey1, new_coldkey, stake_amount3); + StakingHotkeys::::insert(new_coldkey, vec![hotkey1]); + + TotalHotkeyStake::::insert(hotkey1, stake_amount1); + TotalHotkeyStake::::insert(hotkey2, stake_amount2); + TotalColdkeyStake::::insert(old_coldkey, total_stake); + + // Set up total issuance + TotalIssuance::::put(total_stake); + TotalStake::::put(total_stake); + + // Record initial values + let initial_total_issuance = SubtensorModule::get_total_issuance(); + let initial_total_stake = SubtensorModule::get_total_stake(); + + // Perform the swap + SubtensorModule::perform_swap_coldkey(&old_coldkey, &new_coldkey, &mut weight); + + // Verify stake is additive, not replaced + assert_eq!( + Stake::::get(hotkey1, new_coldkey), + stake_amount1 + stake_amount3 + ); + + // Verify ownership transfer + assert_eq!( + SubtensorModule::get_owned_hotkeys(&new_coldkey), + vec![hotkey1, hotkey2] + ); + assert_eq!(SubtensorModule::get_owned_hotkeys(&old_coldkey), vec![]); + + // Verify stake transfer + assert_eq!(Stake::::get(hotkey2, new_coldkey), stake_amount2); + assert_eq!(Stake::::get(hotkey1, old_coldkey), 0); + assert_eq!(Stake::::get(hotkey2, old_coldkey), 0); + + // Verify TotalColdkeyStake + assert_eq!(TotalColdkeyStake::::get(new_coldkey), total_stake); + assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); + + // Verify TotalHotkeyStake remains unchanged + assert_eq!(TotalHotkeyStake::::get(hotkey1), stake_amount1); + assert_eq!(TotalHotkeyStake::::get(hotkey2), stake_amount2); + + // Verify total stake and issuance remain unchanged + assert_eq!( + SubtensorModule::get_total_stake(), + initial_total_stake, + "Total stake changed unexpectedly" + ); + assert_eq!( + SubtensorModule::get_total_issuance(), + initial_total_issuance, + "Total issuance changed unexpectedly" + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_swap_staking_hotkeys_for_coldkey --exact --nocapture +#[test] +fn test_swap_staking_hotkeys_for_coldkey() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey1 = U256::from(3); + let hotkey2 = U256::from(4); + let stake_amount1 = 1000u64; + let stake_amount2 = 2000u64; + let total_stake = stake_amount1 + stake_amount2; + let mut weight = Weight::zero(); + + // Setup initial state + OwnedHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); + Stake::::insert(hotkey1, old_coldkey, stake_amount1); + Stake::::insert(hotkey2, old_coldkey, stake_amount2); + StakingHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); + TotalHotkeyStake::::insert(hotkey1, stake_amount1); + TotalHotkeyStake::::insert(hotkey2, stake_amount2); + TotalColdkeyStake::::insert(old_coldkey, total_stake); + + // Set up total issuance + TotalIssuance::::put(total_stake); + TotalStake::::put(total_stake); + + // Perform the swap + SubtensorModule::perform_swap_coldkey(&old_coldkey, &new_coldkey, &mut weight); + + // Verify StakingHotkeys transfer + assert_eq!( + StakingHotkeys::::get(new_coldkey), + vec![hotkey1, hotkey2] + ); + assert_eq!(StakingHotkeys::::get(old_coldkey), vec![]); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_swap_delegated_stake_for_coldkey --exact --nocapture +#[test] +fn test_swap_delegated_stake_for_coldkey() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey1 = U256::from(3); + let hotkey2 = U256::from(4); + let stake_amount1 = 1000u64; + let stake_amount2 = 2000u64; + let total_stake = stake_amount1 + stake_amount2; + let mut weight = Weight::zero(); + + // Notice hotkey1 and hotkey2 are not in OwnedHotkeys + // coldkey therefore delegates stake to them + + // Setup initial state + StakingHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); + Stake::::insert(hotkey1, old_coldkey, stake_amount1); + Stake::::insert(hotkey2, old_coldkey, stake_amount2); + TotalHotkeyStake::::insert(hotkey1, stake_amount1); + TotalHotkeyStake::::insert(hotkey2, stake_amount2); + TotalColdkeyStake::::insert(old_coldkey, total_stake); + + // Set up total issuance + TotalIssuance::::put(total_stake); + TotalStake::::put(total_stake); + + // Record initial values + let initial_total_issuance = SubtensorModule::get_total_issuance(); + let initial_total_stake = SubtensorModule::get_total_stake(); + + // Perform the swap + SubtensorModule::perform_swap_coldkey(&old_coldkey, &new_coldkey, &mut weight); + + // Verify stake transfer + assert_eq!(Stake::::get(hotkey1, new_coldkey), stake_amount1); + assert_eq!(Stake::::get(hotkey2, new_coldkey), stake_amount2); + assert_eq!(Stake::::get(hotkey1, old_coldkey), 0); + assert_eq!(Stake::::get(hotkey2, old_coldkey), 0); + + // Verify TotalColdkeyStake + assert_eq!(TotalColdkeyStake::::get(new_coldkey), total_stake); + assert_eq!(TotalColdkeyStake::::get(old_coldkey), 0); + + // Verify TotalHotkeyStake remains unchanged + assert_eq!(TotalHotkeyStake::::get(hotkey1), stake_amount1); + assert_eq!(TotalHotkeyStake::::get(hotkey2), stake_amount2); + + // Verify total stake and issuance remain unchanged + assert_eq!( + SubtensorModule::get_total_stake(), + initial_total_stake, + "Total stake changed unexpectedly" + ); + assert_eq!( + SubtensorModule::get_total_issuance(), + initial_total_issuance, + "Total issuance changed unexpectedly" + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_swap_total_hotkey_coldkey_stakes_this_interval_for_coldkey --exact --nocapture +#[test] +fn test_swap_total_hotkey_coldkey_stakes_this_interval_for_coldkey() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey1 = U256::from(3); + let hotkey2 = U256::from(4); + let stake1 = (1000u64, 100u64); + let stake2 = (2000u64, 200u64); + let mut weight = Weight::zero(); + + // Initialize TotalHotkeyColdkeyStakesThisInterval for old_coldkey + TotalHotkeyColdkeyStakesThisInterval::::insert(hotkey1, old_coldkey, stake1); + TotalHotkeyColdkeyStakesThisInterval::::insert(hotkey2, old_coldkey, stake2); + + // Populate OwnedHotkeys map + OwnedHotkeys::::insert(old_coldkey, vec![hotkey1, hotkey2]); + + // Perform the swap + SubtensorModule::perform_swap_coldkey(&old_coldkey, &new_coldkey, &mut weight); + + // Verify the swap + assert_eq!( + TotalHotkeyColdkeyStakesThisInterval::::get(hotkey1, new_coldkey), + stake1 + ); + assert_eq!( + TotalHotkeyColdkeyStakesThisInterval::::get(hotkey2, new_coldkey), + stake2 + ); + assert!(!TotalHotkeyColdkeyStakesThisInterval::::contains_key( + old_coldkey, + hotkey1 + )); + assert!(!TotalHotkeyColdkeyStakesThisInterval::::contains_key( + old_coldkey, + hotkey2 + )); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_swap_subnet_owner_for_coldkey --exact --nocapture +#[test] +fn test_swap_subnet_owner_for_coldkey() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let netuid1 = 1u16; + let netuid2 = 2u16; + let mut weight = Weight::zero(); + + // Initialize SubnetOwner for old_coldkey + add_network(netuid1, 13, 0); + add_network(netuid2, 14, 0); + SubnetOwner::::insert(netuid1, old_coldkey); + SubnetOwner::::insert(netuid2, old_coldkey); + + // Set up TotalNetworks + TotalNetworks::::put(3); + + // Perform the swap + SubtensorModule::perform_swap_coldkey(&old_coldkey, &new_coldkey, &mut weight); + + // Verify the swap + assert_eq!(SubnetOwner::::get(netuid1), new_coldkey); + assert_eq!(SubnetOwner::::get(netuid2), new_coldkey); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_do_swap_coldkey_with_subnet_ownership --exact --nocapture +#[test] +fn test_do_swap_coldkey_with_subnet_ownership() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey = U256::from(3); + let netuid = 1u16; + let stake_amount: u64 = 1000u64; + let swap_cost = SubtensorModule::get_key_swap_cost(); + + // Setup initial state + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, old_coldkey, 0); + + // Set TotalNetworks because swap relies on it + pallet_subtensor::TotalNetworks::::set(1); + + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, stake_amount + swap_cost); + SubnetOwner::::insert(netuid, old_coldkey); + + // Populate OwnedHotkeys map + OwnedHotkeys::::insert(old_coldkey, vec![hotkey]); + + // Perform the swap + assert_ok!(SubtensorModule::do_swap_coldkey(&old_coldkey, &new_coldkey)); + + // Verify subnet ownership transfer + assert_eq!(SubnetOwner::::get(netuid), new_coldkey); + }); +} +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_coldkey_has_associated_hotkeys --exact --nocapture +#[test] +fn test_coldkey_has_associated_hotkeys() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let hotkey = U256::from(2); + let netuid = 1u16; + + // Setup initial state + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, coldkey, 0); + SubtensorModule::add_balance_to_coldkey_account(&coldkey, 1000); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_coldkey_swap_total --exact --nocapture +#[test] +fn test_coldkey_swap_total() { + new_test_ext(1).execute_with(|| { + let coldkey = U256::from(1); + let nominator1 = U256::from(2); + let nominator2 = U256::from(3); + let nominator3 = U256::from(4); + let delegate1 = U256::from(5); + let delegate2 = U256::from(6); + let delegate3 = U256::from(7); + let hotkey1 = U256::from(2); + let hotkey2 = U256::from(3); + let hotkey3 = U256::from(4); + let netuid1 = 1u16; + let netuid2 = 2u16; + let netuid3 = 3u16; + SubtensorModule::add_balance_to_coldkey_account(&coldkey, 1000); + SubtensorModule::add_balance_to_coldkey_account(&delegate1, 1000); + SubtensorModule::add_balance_to_coldkey_account(&delegate2, 1000); + SubtensorModule::add_balance_to_coldkey_account(&delegate3, 1000); + SubtensorModule::add_balance_to_coldkey_account(&nominator1, 1000); + SubtensorModule::add_balance_to_coldkey_account(&nominator2, 1000); + SubtensorModule::add_balance_to_coldkey_account(&nominator3, 1000); + + // Setup initial state + add_network(netuid1, 13, 0); + add_network(netuid2, 14, 0); + add_network(netuid3, 15, 0); + register_ok_neuron(netuid1, hotkey1, coldkey, 0); + register_ok_neuron(netuid2, hotkey2, coldkey, 0); + register_ok_neuron(netuid3, hotkey3, coldkey, 0); + register_ok_neuron(netuid1, delegate1, delegate1, 0); + register_ok_neuron(netuid2, delegate2, delegate2, 0); + register_ok_neuron(netuid3, delegate3, delegate3, 0); + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey), + hotkey1, + u16::MAX / 10 + )); + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey), + hotkey2, + u16::MAX / 10 + )); + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(coldkey), + hotkey3, + u16::MAX / 10 + )); + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(delegate1), + delegate1, + u16::MAX / 10 + )); + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(delegate2), + delegate2, + u16::MAX / 10 + )); + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(delegate3), + delegate3, + u16::MAX / 10 + )); + + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey), + hotkey1, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey), + hotkey2, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey), + hotkey3, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey), + delegate1, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey), + delegate2, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey), + delegate3, + 100 + )); + + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(delegate1), + hotkey1, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(delegate2), + hotkey2, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(delegate3), + hotkey3, + 100 + )); + + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(delegate1), + delegate1, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(delegate2), + delegate2, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(delegate3), + delegate3, + 100 + )); + + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(nominator1), + hotkey1, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(nominator2), + hotkey2, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(nominator3), + hotkey3, + 100 + )); + + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(nominator1), + delegate1, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(nominator2), + delegate2, + 100 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(nominator3), + delegate3, + 100 + )); + + assert_eq!( + SubtensorModule::get_owned_hotkeys(&coldkey), + vec![hotkey1, hotkey2, hotkey3] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&coldkey), + vec![hotkey1, hotkey2, hotkey3, delegate1, delegate2, delegate3] + ); + assert_eq!(SubtensorModule::get_total_stake_for_coldkey(&coldkey), 600); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey2), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey3), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate1), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate2), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate3), 300); + + assert_eq!( + SubtensorModule::get_owned_hotkeys(&delegate1), + vec![delegate1] + ); + assert_eq!( + SubtensorModule::get_owned_hotkeys(&delegate2), + vec![delegate2] + ); + assert_eq!( + SubtensorModule::get_owned_hotkeys(&delegate3), + vec![delegate3] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&delegate1), + vec![delegate1, hotkey1] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&delegate2), + vec![delegate2, hotkey2] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&delegate3), + vec![delegate3, hotkey3] + ); + + assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator1), vec![]); + assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator2), vec![]); + assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator3), vec![]); + + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&nominator1), + vec![hotkey1, delegate1] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&nominator2), + vec![hotkey2, delegate2] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&nominator3), + vec![hotkey3, delegate3] + ); + + // Perform the swap + let new_coldkey = U256::from(1100); + assert_eq!(SubtensorModule::get_total_stake_for_coldkey(&coldkey), 600); + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &coldkey, + &new_coldkey, + &mut weight + )); + assert_eq!( + SubtensorModule::get_total_stake_for_coldkey(&new_coldkey), + 600 + ); + + // Check everything is swapped. + assert_eq!( + SubtensorModule::get_owned_hotkeys(&new_coldkey), + vec![hotkey1, hotkey2, hotkey3] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&new_coldkey), + vec![hotkey1, hotkey2, hotkey3, delegate1, delegate2, delegate3] + ); + assert_eq!( + SubtensorModule::get_total_stake_for_coldkey(&new_coldkey), + 600 + ); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey1), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey2), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&hotkey3), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate1), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate2), 300); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate3), 300); + + assert_eq!( + SubtensorModule::get_owned_hotkeys(&delegate1), + vec![delegate1] + ); + assert_eq!( + SubtensorModule::get_owned_hotkeys(&delegate2), + vec![delegate2] + ); + assert_eq!( + SubtensorModule::get_owned_hotkeys(&delegate3), + vec![delegate3] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&delegate1), + vec![delegate1, hotkey1] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&delegate2), + vec![delegate2, hotkey2] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&delegate3), + vec![delegate3, hotkey3] + ); + + assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator1), vec![]); + assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator2), vec![]); + assert_eq!(SubtensorModule::get_owned_hotkeys(&nominator3), vec![]); + + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&nominator1), + vec![hotkey1, delegate1] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&nominator2), + vec![hotkey2, delegate2] + ); + assert_eq!( + SubtensorModule::get_all_staked_hotkeys(&nominator3), + vec![hotkey3, delegate3] + ); + }); +} +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_swap_senate_member --exact --nocapture +#[test] +fn test_swap_senate_member() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let non_member_hotkey = U256::from(3); + let mut weight = Weight::zero(); + + // Setup: Add old_hotkey as a Senate member + assert_ok!(SenateMembers::add_member( + RawOrigin::Root.into(), + old_hotkey + )); + + // Test 1: Successful swap + assert_ok!(SubtensorModule::swap_senate_member( + &old_hotkey, + &new_hotkey, + &mut weight + )); + assert!(Senate::is_member(&new_hotkey)); + assert!(!Senate::is_member(&old_hotkey)); + + // Verify weight update + let expected_weight = ::DbWeight::get().reads_writes(2, 2); + assert_eq!(weight, expected_weight); + + // Reset weight for next test + weight = Weight::zero(); + + // Test 2: Swap with non-member (should not change anything) + assert_ok!(SubtensorModule::swap_senate_member( + &non_member_hotkey, + &new_hotkey, + &mut weight + )); + assert!(Senate::is_member(&new_hotkey)); + assert!(!Senate::is_member(&non_member_hotkey)); + + // Verify weight update (should only have read operations) + let expected_weight = ::DbWeight::get().reads(1); + assert_eq!(weight, expected_weight); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_coldkey_delegations --exact --nocapture +#[test] +fn test_coldkey_delegations() { + new_test_ext(1).execute_with(|| { + let new_coldkey = U256::from(0); + let owner = U256::from(1); + let coldkey = U256::from(4); + let delegate = U256::from(2); + let netuid = 1u16; + add_network(netuid, 13, 0); + register_ok_neuron(netuid, delegate, owner, 0); + SubtensorModule::add_balance_to_coldkey_account(&coldkey, 1000); + assert_ok!(SubtensorModule::do_become_delegate( + <::RuntimeOrigin>::signed(owner), + delegate, + u16::MAX / 10 + )); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(coldkey), + delegate, + 100 + )); + let mut weight = Weight::zero(); + assert_ok!(SubtensorModule::perform_swap_coldkey( + &coldkey, + &new_coldkey, + &mut weight + )); + assert_eq!(SubtensorModule::get_total_stake_for_hotkey(&delegate), 100); + assert_eq!(SubtensorModule::get_total_stake_for_coldkey(&coldkey), 0); + assert_eq!( + SubtensorModule::get_total_stake_for_coldkey(&new_coldkey), + 100 + ); + assert_eq!(Stake::::get(delegate, new_coldkey), 100); + assert_eq!(Stake::::get(delegate, coldkey), 0); + }); +} + +#[test] +fn test_schedule_swap_coldkey_success() { + new_test_ext(1).execute_with(|| { + // Initialize test accounts + let old_coldkey: U256 = U256::from(1); + let new_coldkey: U256 = U256::from(2); + + // Add balance to the old coldkey account + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, 1000); + + // Schedule the coldkey swap + assert_ok!(SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey + )); + + // Get the current block number + let current_block: u64 = System::block_number(); + + // Calculate the expected execution block (5 days from now) + let expected_execution_block: u64 = current_block + 5 * 24 * 60 * 60 / 12; + + // Check for the SwapScheduled event + System::assert_last_event( + Event::ColdkeySwapScheduled { + old_coldkey, + new_coldkey, + execution_block: expected_execution_block, + } + .into(), + ); + + // TODO: Add additional checks to ensure the swap is correctly scheduled in the system + // For example, verify that the swap is present in the appropriate storage or scheduler + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_schedule_swap_coldkey_duplicate --exact --nocapture +#[test] +fn test_schedule_swap_coldkey_duplicate() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, 2000); + + assert_ok!(SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey + )); + + // Attempt to schedule again + assert_noop!( + SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey + ), + Error::::SwapAlreadyScheduled + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_schedule_swap_coldkey_execution --exact --nocapture +#[test] +fn test_schedule_swap_coldkey_execution() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + let hotkey = U256::from(3); + let netuid = 1u16; + let stake_amount = 100; + + add_network(netuid, 13, 0); + register_ok_neuron(netuid, hotkey, old_coldkey, 0); + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, 1000000000000000); + assert_ok!(SubtensorModule::add_stake( + <::RuntimeOrigin>::signed(old_coldkey), + hotkey, + stake_amount + )); + + // Check initial ownership + assert_eq!( + Owner::::get(hotkey), + old_coldkey, + "Initial ownership check failed" + ); + + // Schedule the swap + assert_ok!(SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey + )); + + // Get the scheduled execution block + let current_block = System::block_number(); + let execution_block = current_block + ColdkeySwapScheduleDuration::::get(); + + System::assert_last_event( + Event::ColdkeySwapScheduled { + old_coldkey, + new_coldkey, + execution_block, + } + .into(), + ); + + run_to_block(execution_block); + + // Run on_initialize for the execution block + SubtensorModule::on_initialize(execution_block); + + // Also run Scheduler's on_initialize + as OnInitialize>::on_initialize( + execution_block, + ); + + // Check if the swap has occurred + let new_owner = Owner::::get(hotkey); + assert_eq!( + new_owner, new_coldkey, + "Ownership was not updated as expected" + ); + + assert_eq!( + Stake::::get(hotkey, new_coldkey), + stake_amount, + "Stake was not transferred to new coldkey" + ); + assert_eq!( + Stake::::get(hotkey, old_coldkey), + 0, + "Old coldkey still has stake" + ); + + // Check for the SwapExecuted event + System::assert_has_event( + Event::ColdkeySwapped { + old_coldkey, + new_coldkey, + } + .into(), + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_direct_swap_coldkey_call_fails --exact --nocapture +#[test] +fn test_direct_swap_coldkey_call_fails() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + + assert_noop!( + SubtensorModule::swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + old_coldkey, + new_coldkey + ), + BadOrigin + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=info cargo test --test swap_coldkey -- test_schedule_swap_coldkey_with_pending_swap --exact --nocapture +#[test] +fn test_schedule_swap_coldkey_with_pending_swap() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey1 = U256::from(2); + let new_coldkey2 = U256::from(3); + + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, 2000); + + assert_ok!(SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey1 + )); + + // Attempt to schedule another swap before the first one executes + assert_noop!( + SubtensorModule::schedule_swap_coldkey( + <::RuntimeOrigin>::signed(old_coldkey), + new_coldkey2 + ), + Error::::SwapAlreadyScheduled + ); + }); +} + +#[test] +fn test_coldkey_swap_delegate_identity_updated() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + + let netuid = 1; + let burn_cost = 10; + let tempo = 1; + + SubtensorModule::set_burn(netuid, burn_cost); + add_network(netuid, tempo, 0); + + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, 100_000_000_000); + + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(old_coldkey), + netuid, + old_coldkey + )); + + let name: Vec = b"The Third Coolest Identity".to_vec(); + let identity: ChainIdentity = ChainIdentity { + name: name.clone(), + url: vec![], + image: vec![], + discord: vec![], + description: vec![], + additional: vec![], + }; + + Identities::::insert(old_coldkey, identity.clone()); + + assert!(Identities::::get(old_coldkey).is_some()); + assert!(Identities::::get(new_coldkey).is_none()); + + assert_ok!(SubtensorModule::do_swap_coldkey(&old_coldkey, &new_coldkey)); + + assert!(Identities::::get(old_coldkey).is_none()); + assert!(Identities::::get(new_coldkey).is_some()); + assert_eq!( + Identities::::get(new_coldkey).expect("Expected an Identity"), + identity + ); + }); +} + +#[test] +fn test_coldkey_swap_no_identity_no_changes() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(1); + let new_coldkey = U256::from(2); + + let netuid = 1; + let burn_cost = 10; + let tempo = 1; + + SubtensorModule::set_burn(netuid, burn_cost); + add_network(netuid, tempo, 0); + + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, 100_000_000_000); + + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(old_coldkey), + netuid, + old_coldkey + )); + + // Ensure the old coldkey does not have an identity before the swap + assert!(Identities::::get(old_coldkey).is_none()); + + // Perform the coldkey swap + assert_ok!(SubtensorModule::do_swap_coldkey(&old_coldkey, &new_coldkey)); + + // Ensure no identities have been changed + assert!(Identities::::get(old_coldkey).is_none()); + assert!(Identities::::get(new_coldkey).is_none()); + }); +} + +#[test] +fn test_coldkey_swap_no_identity_no_changes_newcoldkey_exists() { + new_test_ext(1).execute_with(|| { + let old_coldkey = U256::from(3); + let new_coldkey = U256::from(4); + + let netuid = 1; + let burn_cost = 10; + let tempo = 1; + + SubtensorModule::set_burn(netuid, burn_cost); + add_network(netuid, tempo, 0); + SubtensorModule::add_balance_to_coldkey_account(&old_coldkey, 100_000_000_000); + + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(old_coldkey), + netuid, + old_coldkey + )); + + let name: Vec = b"The Coolest Identity".to_vec(); + let identity: ChainIdentity = ChainIdentity { + name: name.clone(), + url: vec![], + image: vec![], + discord: vec![], + description: vec![], + additional: vec![], + }; + + Identities::::insert(new_coldkey, identity.clone()); + // Ensure the new coldkey does have an identity before the swap + assert!(Identities::::get(new_coldkey).is_some()); + assert!(Identities::::get(old_coldkey).is_none()); + + // Perform the coldkey swap + assert_ok!(SubtensorModule::do_swap_coldkey(&old_coldkey, &new_coldkey)); + + // Ensure no identities have been changed + assert!(Identities::::get(old_coldkey).is_none()); + assert!(Identities::::get(new_coldkey).is_some()); + }); +} diff --git a/pallets/subtensor/tests/swap_hotkey.rs b/pallets/subtensor/tests/swap_hotkey.rs new file mode 100644 index 000000000..bff738b86 --- /dev/null +++ b/pallets/subtensor/tests/swap_hotkey.rs @@ -0,0 +1,1117 @@ +#![allow(unused, clippy::indexing_slicing, clippy::panic, clippy::unwrap_used)] + +use codec::Encode; +use frame_support::weights::Weight; +use frame_support::{assert_err, assert_noop, assert_ok}; +use frame_system::{Config, RawOrigin}; +mod mock; +use mock::*; +use pallet_subtensor::*; +use sp_core::H256; +use sp_core::U256; + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_owner --exact --nocapture +#[test] +fn test_swap_owner() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + Owner::::insert(old_hotkey, coldkey); + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!Owner::::contains_key(old_hotkey)); + assert_eq!(Owner::::get(new_hotkey), coldkey); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_owned_hotkeys --exact --nocapture +#[test] +fn test_swap_owned_hotkeys() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + OwnedHotkeys::::insert(coldkey, vec![old_hotkey]); + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + let hotkeys = OwnedHotkeys::::get(coldkey); + assert!(!hotkeys.contains(&old_hotkey)); + assert!(hotkeys.contains(&new_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_total_hotkey_stake --exact --nocapture +#[test] +fn test_swap_total_hotkey_stake() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + TotalHotkeyStake::::insert(old_hotkey, 100); + TotalHotkeyStake::::insert(new_hotkey, 50); + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!TotalHotkeyStake::::contains_key(old_hotkey)); + assert_eq!(TotalHotkeyStake::::get(new_hotkey), 150); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_total_hotkey_coldkey_stakes_this_interval --exact --nocapture +#[test] +fn test_swap_total_hotkey_coldkey_stakes_this_interval() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + TotalHotkeyColdkeyStakesThisInterval::::insert(old_hotkey, coldkey, (100, 1000)); + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!TotalHotkeyColdkeyStakesThisInterval::::contains_key( + old_hotkey, coldkey + )); + assert_eq!( + TotalHotkeyColdkeyStakesThisInterval::::get(new_hotkey, coldkey), + (100, 1000) + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_last_tx_block --exact --nocapture +#[test] +fn test_swap_last_tx_block() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + LastTxBlock::::insert(old_hotkey, 1000); + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!LastTxBlock::::contains_key(old_hotkey)); + assert_eq!( + LastTxBlock::::get(new_hotkey), + SubtensorModule::get_current_block_as_u64() + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_last_tx_block_delegate_take --exact --nocapture +#[test] +fn test_swap_last_tx_block_delegate_take() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + pallet_subtensor::LastTxBlockDelegateTake::::insert(old_hotkey, 1000); + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!LastTxBlockDelegateTake::::contains_key(old_hotkey)); + assert_eq!( + LastTxBlockDelegateTake::::get(new_hotkey), + SubtensorModule::get_current_block_as_u64() + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_senate_members --exact --nocapture +#[test] +fn test_swap_senate_members() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + // Assuming there's a way to add a member to the senate + // SenateMembers::add_member(&old_hotkey); + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + // Assert that the old_hotkey is no longer a member and new_hotkey is now a member + // assert!(!SenateMembers::is_member(&old_hotkey)); + // assert!(SenateMembers::is_member(&new_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_delegates --exact --nocapture +#[test] +fn test_swap_delegates() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + Delegates::::insert(old_hotkey, 100); + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!Delegates::::contains_key(old_hotkey)); + assert_eq!(Delegates::::get(new_hotkey), 100); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_subnet_membership --exact --nocapture +#[test] +fn test_swap_subnet_membership() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let mut weight = Weight::zero(); + + add_network(netuid, 0, 1); + IsNetworkMember::::insert(old_hotkey, netuid, true); + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!IsNetworkMember::::contains_key(old_hotkey, netuid)); + assert!(IsNetworkMember::::get(new_hotkey, netuid)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_uids_and_keys --exact --nocapture +#[test] +fn test_swap_uids_and_keys() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let uid = 5u16; + let mut weight = Weight::zero(); + + add_network(netuid, 0, 1); + IsNetworkMember::::insert(old_hotkey, netuid, true); + Uids::::insert(netuid, old_hotkey, uid); + Keys::::insert(netuid, uid, old_hotkey); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert_eq!(Uids::::get(netuid, old_hotkey), None); + assert_eq!(Uids::::get(netuid, new_hotkey), Some(uid)); + assert_eq!(Keys::::get(netuid, uid), new_hotkey); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_prometheus --exact --nocapture +#[test] +fn test_swap_prometheus() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let prometheus_info = PrometheusInfo::default(); + let mut weight = Weight::zero(); + + add_network(netuid, 0, 1); + IsNetworkMember::::insert(old_hotkey, netuid, true); + Prometheus::::insert(netuid, old_hotkey, prometheus_info.clone()); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!Prometheus::::contains_key(netuid, old_hotkey)); + assert_eq!( + Prometheus::::get(netuid, new_hotkey), + Some(prometheus_info) + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_axons --exact --nocapture +#[test] +fn test_swap_axons() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let axon_info = AxonInfo::default(); + let mut weight = Weight::zero(); + + add_network(netuid, 0, 1); + IsNetworkMember::::insert(old_hotkey, netuid, true); + Axons::::insert(netuid, old_hotkey, axon_info.clone()); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!Axons::::contains_key(netuid, old_hotkey)); + assert_eq!(Axons::::get(netuid, new_hotkey), Some(axon_info)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_weight_commits --exact --nocapture +#[test] +fn test_swap_weight_commits() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let weight_commits = (H256::from_low_u64_be(100), 200); + let mut weight = Weight::zero(); + + add_network(netuid, 0, 1); + IsNetworkMember::::insert(old_hotkey, netuid, true); + WeightCommits::::insert(netuid, old_hotkey, weight_commits); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!WeightCommits::::contains_key(netuid, old_hotkey)); + assert_eq!( + WeightCommits::::get(netuid, new_hotkey), + Some(weight_commits) + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_loaded_emission --exact --nocapture +#[test] +fn test_swap_loaded_emission() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let server_emission = 1000u64; + let validator_emission = 1000u64; + let mut weight = Weight::zero(); + + add_network(netuid, 0, 1); + IsNetworkMember::::insert(old_hotkey, netuid, true); + LoadedEmission::::insert( + netuid, + vec![(old_hotkey, server_emission, validator_emission)], + ); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + let new_loaded_emission = LoadedEmission::::get(netuid); + assert_eq!( + new_loaded_emission, + Some(vec![(new_hotkey, server_emission, validator_emission)]) + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_stake --exact --nocapture +#[test] +fn test_swap_stake() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let stake_amount = 100u64; + let mut weight = Weight::zero(); + + Stake::::insert(old_hotkey, coldkey, stake_amount); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!Stake::::contains_key(old_hotkey, coldkey)); + assert_eq!(Stake::::get(new_hotkey, coldkey), stake_amount); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_staking_hotkeys --exact --nocapture +#[test] +fn test_swap_staking_hotkeys() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + Stake::::insert(old_hotkey, coldkey, 100); + StakingHotkeys::::insert(coldkey, vec![old_hotkey]); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + let staking_hotkeys = StakingHotkeys::::get(coldkey); + assert!(!staking_hotkeys.contains(&old_hotkey)); + assert!(staking_hotkeys.contains(&new_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_hotkey_with_multiple_coldkeys --exact --nocapture +#[test] +fn test_swap_hotkey_with_multiple_coldkeys() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey1 = U256::from(3); + let coldkey2 = U256::from(4); + let mut weight = Weight::zero(); + + Stake::::insert(old_hotkey, coldkey1, 100); + Stake::::insert(old_hotkey, coldkey2, 200); + StakingHotkeys::::insert(coldkey1, vec![old_hotkey]); + StakingHotkeys::::insert(coldkey2, vec![old_hotkey]); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey1, + &mut weight + )); + + assert_eq!(Stake::::get(new_hotkey, coldkey1), 100); + assert_eq!(Stake::::get(new_hotkey, coldkey2), 200); + assert!(StakingHotkeys::::get(coldkey1).contains(&new_hotkey)); + assert!(StakingHotkeys::::get(coldkey2).contains(&new_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_hotkey_with_existing_stake --exact --nocapture +#[test] +fn test_swap_hotkey_with_existing_stake() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + Stake::::insert(old_hotkey, coldkey, 100); + Stake::::insert(new_hotkey, coldkey, 50); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert_eq!(Stake::::get(new_hotkey, coldkey), 150); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_hotkey_with_multiple_subnets --exact --nocapture +#[test] +fn test_swap_hotkey_with_multiple_subnets() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid1 = 0; + let netuid2 = 1; + let mut weight = Weight::zero(); + + add_network(netuid1, 0, 1); + add_network(netuid2, 0, 1); + IsNetworkMember::::insert(old_hotkey, netuid1, true); + IsNetworkMember::::insert(old_hotkey, netuid2, true); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(IsNetworkMember::::get(new_hotkey, netuid1)); + assert!(IsNetworkMember::::get(new_hotkey, netuid2)); + assert!(!IsNetworkMember::::get(old_hotkey, netuid1)); + assert!(!IsNetworkMember::::get(old_hotkey, netuid2)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_staking_hotkeys_multiple_coldkeys --exact --nocapture +#[test] +fn test_swap_staking_hotkeys_multiple_coldkeys() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey1 = U256::from(3); + let coldkey2 = U256::from(4); + let mut weight = Weight::zero(); + + // Set up initial state + Stake::::insert(old_hotkey, coldkey1, 100); + Stake::::insert(old_hotkey, coldkey2, 200); + StakingHotkeys::::insert(coldkey1, vec![old_hotkey]); + StakingHotkeys::::insert(coldkey2, vec![old_hotkey, U256::from(5)]); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey1, + &mut weight + )); + + // Check if new_hotkey replaced old_hotkey in StakingHotkeys + assert!(StakingHotkeys::::get(coldkey1).contains(&new_hotkey)); + assert!(!StakingHotkeys::::get(coldkey1).contains(&old_hotkey)); + + // Check if new_hotkey replaced old_hotkey for coldkey2 as well + assert!(StakingHotkeys::::get(coldkey2).contains(&new_hotkey)); + assert!(!StakingHotkeys::::get(coldkey2).contains(&old_hotkey)); + assert!(StakingHotkeys::::get(coldkey2).contains(&U256::from(5))); + // Other hotkeys should remain + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_hotkey_with_no_stake --exact --nocapture +#[test] +fn test_swap_hotkey_with_no_stake() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + // Set up initial state with no stake + Owner::::insert(old_hotkey, coldkey); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + // Check if ownership transferred + assert!(!Owner::::contains_key(old_hotkey)); + assert_eq!(Owner::::get(new_hotkey), coldkey); + + // Ensure no unexpected changes in Stake + assert!(!Stake::::contains_key(old_hotkey, coldkey)); + assert!(!Stake::::contains_key(new_hotkey, coldkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_hotkey_with_multiple_coldkeys_and_subnets --exact --nocapture +#[test] +fn test_swap_hotkey_with_multiple_coldkeys_and_subnets() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey1 = U256::from(3); + let coldkey2 = U256::from(4); + let netuid1 = 0; + let netuid2 = 1; + let mut weight = Weight::zero(); + + // Set up initial state + add_network(netuid1, 0, 1); + add_network(netuid2, 0, 1); + Owner::::insert(old_hotkey, coldkey1); + Stake::::insert(old_hotkey, coldkey1, 100); + Stake::::insert(old_hotkey, coldkey2, 200); + IsNetworkMember::::insert(old_hotkey, netuid1, true); + IsNetworkMember::::insert(old_hotkey, netuid2, true); + TotalHotkeyStake::::insert(old_hotkey, 300); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey1, + &mut weight + )); + + // Check ownership transfer + assert!(!Owner::::contains_key(old_hotkey)); + assert_eq!(Owner::::get(new_hotkey), coldkey1); + + // Check stake transfer + assert_eq!(Stake::::get(new_hotkey, coldkey1), 100); + assert_eq!(Stake::::get(new_hotkey, coldkey2), 200); + assert!(!Stake::::contains_key(old_hotkey, coldkey1)); + assert!(!Stake::::contains_key(old_hotkey, coldkey2)); + + // Check subnet membership transfer + assert!(IsNetworkMember::::get(new_hotkey, netuid1)); + assert!(IsNetworkMember::::get(new_hotkey, netuid2)); + assert!(!IsNetworkMember::::get(old_hotkey, netuid1)); + assert!(!IsNetworkMember::::get(old_hotkey, netuid2)); + + // Check total stake transfer + assert_eq!(TotalHotkeyStake::::get(new_hotkey), 300); + assert!(!TotalHotkeyStake::::contains_key(old_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_hotkey_tx_rate_limit_exceeded --exact --nocapture +#[test] +fn test_swap_hotkey_tx_rate_limit_exceeded() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let old_hotkey = U256::from(1); + let new_hotkey_1 = U256::from(2); + let new_hotkey_2 = U256::from(4); + let coldkey = U256::from(3); + let swap_cost = 1_000_000_000u64 * 2; + + let tx_rate_limit = 1; + + // Get the current transaction rate limit + let current_tx_rate_limit = SubtensorModule::get_tx_rate_limit(); + log::info!("current_tx_rate_limit: {:?}", current_tx_rate_limit); + + // Set the transaction rate limit + SubtensorModule::set_tx_rate_limit(tx_rate_limit); + // assert the rate limit is set to 1000 blocks + assert_eq!(SubtensorModule::get_tx_rate_limit(), tx_rate_limit); + + // Setup initial state + add_network(netuid, tempo, 0); + register_ok_neuron(netuid, old_hotkey, coldkey, 0); + SubtensorModule::add_balance_to_coldkey_account(&coldkey, swap_cost); + + // Perform the first swap + assert_ok!(SubtensorModule::do_swap_hotkey( + <::RuntimeOrigin>::signed(coldkey), + &old_hotkey, + &new_hotkey_1 + )); + + // Attempt to perform another swap immediately, which should fail due to rate limit + assert_err!( + SubtensorModule::do_swap_hotkey( + <::RuntimeOrigin>::signed(coldkey), + &new_hotkey_1, + &new_hotkey_2 + ), + Error::::HotKeySetTxRateLimitExceeded + ); + + // move in time past the rate limit + step_block(1001); + assert_ok!(SubtensorModule::do_swap_hotkey( + <::RuntimeOrigin>::signed(coldkey), + &new_hotkey_1, + &new_hotkey_2 + )); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_do_swap_hotkey_err_not_owner --exact --nocapture +#[test] +fn test_do_swap_hotkey_err_not_owner() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let tempo: u16 = 13; + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let not_owner_coldkey = U256::from(4); + let swap_cost = 1_000_000_000u64; + + // Setup initial state + add_network(netuid, tempo, 0); + register_ok_neuron(netuid, old_hotkey, coldkey, 0); + SubtensorModule::add_balance_to_coldkey_account(¬_owner_coldkey, swap_cost); + + // Attempt the swap with a non-owner coldkey + assert_err!( + SubtensorModule::do_swap_hotkey( + <::RuntimeOrigin>::signed(not_owner_coldkey), + &old_hotkey, + &new_hotkey + ), + Error::::NonAssociatedColdKey + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_owner_success --exact --nocapture +#[test] +fn test_swap_owner_success() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + // Initialize Owner for old_hotkey + Owner::::insert(old_hotkey, coldkey); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify the swap + assert_eq!(Owner::::get(new_hotkey), coldkey); + assert!(!Owner::::contains_key(old_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_owner_old_hotkey_not_exist --exact --nocapture +#[test] +fn test_swap_owner_old_hotkey_not_exist() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let mut weight = Weight::zero(); + + // Ensure old_hotkey does not exist + assert!(!Owner::::contains_key(old_hotkey)); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify the swap + assert_eq!(Owner::::get(new_hotkey), coldkey); + assert!(!Owner::::contains_key(old_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_owner_new_hotkey_already_exists --exact --nocapture +#[test] +fn test_swap_owner_new_hotkey_already_exists() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let another_coldkey = U256::from(4); + let mut weight = Weight::zero(); + + // Initialize Owner for old_hotkey and new_hotkey + Owner::::insert(old_hotkey, coldkey); + Owner::::insert(new_hotkey, another_coldkey); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify the swap + assert_eq!(Owner::::get(new_hotkey), coldkey); + assert!(!Owner::::contains_key(old_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_total_hotkey_stake_success --exact --nocapture +#[test] +fn test_swap_total_hotkey_stake_success() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let total_stake = 1000u64; + let mut weight = Weight::zero(); + + // Initialize TotalHotkeyStake for old_hotkey + TotalHotkeyStake::::insert(old_hotkey, total_stake); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify the swap + assert_eq!(TotalHotkeyStake::::get(new_hotkey), total_stake); + assert!(!TotalHotkeyStake::::contains_key(old_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_delegates_success --exact --nocapture +#[test] +fn test_swap_delegates_success() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let delegate_take = 10u16; + let mut weight = Weight::zero(); + + // Initialize Delegates for old_hotkey + Delegates::::insert(old_hotkey, delegate_take); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify the swap + assert_eq!(Delegates::::get(new_hotkey), delegate_take); + assert!(!Delegates::::contains_key(old_hotkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_stake_success --exact --nocapture +#[test] +fn test_swap_stake_success() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let stake_amount = 1000u64; + let mut weight = Weight::zero(); + + // Initialize Stake for old_hotkey + Stake::::insert(old_hotkey, coldkey, stake_amount); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify the swap + assert_eq!(Stake::::get(new_hotkey, coldkey), stake_amount); + assert!(!Stake::::contains_key(old_hotkey, coldkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_stake_old_hotkey_not_exist --exact --nocapture +#[test] +fn test_swap_stake_old_hotkey_not_exist() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let stake_amount = 1000u64; + let mut weight = Weight::zero(); + + // Initialize Stake for old_hotkey + Stake::::insert(old_hotkey, coldkey, stake_amount); + + // Ensure old_hotkey has a stake + assert!(Stake::::contains_key(old_hotkey, coldkey)); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify that new_hotkey has the stake and old_hotkey does not + assert!(Stake::::contains_key(new_hotkey, coldkey)); + assert!(!Stake::::contains_key(old_hotkey, coldkey)); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_total_hotkey_coldkey_stakes_this_interval_success --exact --nocapture +#[test] +fn test_swap_total_hotkey_coldkey_stakes_this_interval_success() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let stake = (1000u64, 42u64); // Example tuple value + let mut weight = Weight::zero(); + + // Initialize TotalHotkeyColdkeyStakesThisInterval for old_hotkey + TotalHotkeyColdkeyStakesThisInterval::::insert(old_hotkey, coldkey, stake); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify the swap + assert_eq!( + TotalHotkeyColdkeyStakesThisInterval::::get(new_hotkey, coldkey), + stake + ); + assert!(!TotalHotkeyColdkeyStakesThisInterval::::contains_key( + old_hotkey, coldkey + )); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_hotkey_error_cases --exact --nocapture +#[test] +fn test_swap_hotkey_error_cases() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let wrong_coldkey = U256::from(4); + + // Set up initial state + Owner::::insert(old_hotkey, coldkey); + TotalNetworks::::put(1); + LastTxBlock::::insert(coldkey, 0); + + // Test not enough balance + let swap_cost = SubtensorModule::get_key_swap_cost(); + assert_noop!( + SubtensorModule::do_swap_hotkey( + RuntimeOrigin::signed(coldkey), + &old_hotkey, + &new_hotkey + ), + Error::::NotEnoughBalanceToPaySwapHotKey + ); + + let initial_balance = SubtensorModule::get_key_swap_cost() + 1000; + SubtensorModule::add_balance_to_coldkey_account(&coldkey, initial_balance); + + // Test new hotkey same as old + assert_noop!( + SubtensorModule::do_swap_hotkey( + RuntimeOrigin::signed(coldkey), + &old_hotkey, + &old_hotkey + ), + Error::::NewHotKeyIsSameWithOld + ); + + // Test new hotkey already registered + IsNetworkMember::::insert(new_hotkey, 0, true); + assert_noop!( + SubtensorModule::do_swap_hotkey( + RuntimeOrigin::signed(coldkey), + &old_hotkey, + &new_hotkey + ), + Error::::HotKeyAlreadyRegisteredInSubNet + ); + IsNetworkMember::::remove(new_hotkey, 0); + + // Test non-associated coldkey + assert_noop!( + SubtensorModule::do_swap_hotkey( + RuntimeOrigin::signed(wrong_coldkey), + &old_hotkey, + &new_hotkey + ), + Error::::NonAssociatedColdKey + ); + + // Run the successful swap + assert_ok!(SubtensorModule::do_swap_hotkey( + RuntimeOrigin::signed(coldkey), + &old_hotkey, + &new_hotkey + )); + + // Check balance after swap + assert_eq!(Balances::free_balance(coldkey), initial_balance - swap_cost); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_child_keys --exact --nocapture +#[test] +fn test_swap_child_keys() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let children = vec![(100u64, U256::from(4)), (200u64, U256::from(5))]; + let mut weight = Weight::zero(); + + // Initialize ChildKeys for old_hotkey + add_network(netuid, 1, 0); + ChildKeys::::insert(old_hotkey, netuid, children.clone()); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify the swap + assert_eq!(ChildKeys::::get(new_hotkey, netuid), children); + assert!(ChildKeys::::get(old_hotkey, netuid).is_empty()); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_parent_keys --exact --nocapture +#[test] +fn test_swap_parent_keys() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let parents = vec![(100u64, U256::from(4)), (200u64, U256::from(5))]; + let mut weight = Weight::zero(); + + // Initialize ParentKeys for old_hotkey + add_network(netuid, 1, 0); + ParentKeys::::insert(old_hotkey, netuid, parents.clone()); + + // Initialize ChildKeys for parent + ChildKeys::::insert(U256::from(4), netuid, vec![(100u64, old_hotkey)]); + ChildKeys::::insert(U256::from(5), netuid, vec![(200u64, old_hotkey)]); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify ParentKeys swap + assert_eq!(ParentKeys::::get(new_hotkey, netuid), parents); + assert!(ParentKeys::::get(old_hotkey, netuid).is_empty()); + + // Verify ChildKeys update for parents + assert_eq!( + ChildKeys::::get(U256::from(4), netuid), + vec![(100u64, new_hotkey)] + ); + assert_eq!( + ChildKeys::::get(U256::from(5), netuid), + vec![(200u64, new_hotkey)] + ); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_multiple_subnets --exact --nocapture +#[test] +fn test_swap_multiple_subnets() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid1 = 0u16; + let netuid2 = 1u16; + let children1 = vec![(100u64, U256::from(4)), (200u64, U256::from(5))]; + let children2 = vec![(300u64, U256::from(6))]; + let mut weight = Weight::zero(); + + add_network(netuid1, 1, 0); + add_network(netuid2, 1, 0); + + // Initialize ChildKeys for old_hotkey in multiple subnets + ChildKeys::::insert(old_hotkey, netuid1, children1.clone()); + ChildKeys::::insert(old_hotkey, netuid2, children2.clone()); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify the swap for both subnets + assert_eq!(ChildKeys::::get(new_hotkey, netuid1), children1); + assert_eq!(ChildKeys::::get(new_hotkey, netuid2), children2); + assert!(ChildKeys::::get(old_hotkey, netuid1).is_empty()); + assert!(ChildKeys::::get(old_hotkey, netuid2).is_empty()); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_complex_parent_child_structure --exact --nocapture +#[test] +fn test_swap_complex_parent_child_structure() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let parent1 = U256::from(4); + let parent2 = U256::from(5); + let child1 = U256::from(6); + let child2 = U256::from(7); + let mut weight = Weight::zero(); + + add_network(netuid, 1, 0); + + // Set up complex parent-child structure + ParentKeys::::insert( + old_hotkey, + netuid, + vec![(100u64, parent1), (200u64, parent2)], + ); + ChildKeys::::insert(old_hotkey, netuid, vec![(300u64, child1), (400u64, child2)]); + ChildKeys::::insert( + parent1, + netuid, + vec![(100u64, old_hotkey), (500u64, U256::from(8))], + ); + ChildKeys::::insert( + parent2, + netuid, + vec![(200u64, old_hotkey), (600u64, U256::from(9))], + ); + + // Perform the swap + SubtensorModule::perform_hotkey_swap(&old_hotkey, &new_hotkey, &coldkey, &mut weight); + + // Verify ParentKeys swap + assert_eq!( + ParentKeys::::get(new_hotkey, netuid), + vec![(100u64, parent1), (200u64, parent2)] + ); + assert!(ParentKeys::::get(old_hotkey, netuid).is_empty()); + + // Verify ChildKeys swap + assert_eq!( + ChildKeys::::get(new_hotkey, netuid), + vec![(300u64, child1), (400u64, child2)] + ); + assert!(ChildKeys::::get(old_hotkey, netuid).is_empty()); + + // Verify parent's ChildKeys update + assert_eq!( + ChildKeys::::get(parent1, netuid), + vec![(100u64, new_hotkey), (500u64, U256::from(8))] + ); + assert_eq!( + ChildKeys::::get(parent2, netuid), + vec![(200u64, new_hotkey), (600u64, U256::from(9))] + ); + }); +} diff --git a/pallets/subtensor/tests/weights.rs b/pallets/subtensor/tests/weights.rs index 020eb1f6b..214e3add0 100644 --- a/pallets/subtensor/tests/weights.rs +++ b/pallets/subtensor/tests/weights.rs @@ -107,7 +107,7 @@ fn test_set_rootweights_validate() { assert_err!( // Should get an invalid transaction error result_no_stake, - TransactionValidityError::Invalid(InvalidTransaction::Call,) + TransactionValidityError::Invalid(InvalidTransaction::Custom(4)) ); // Increase the stake to be equal to the minimum @@ -207,7 +207,7 @@ fn test_commit_weights_validate() { assert_err!( // Should get an invalid transaction error result_no_stake, - TransactionValidityError::Invalid(InvalidTransaction::Call,) + TransactionValidityError::Invalid(InvalidTransaction::Custom(1)) ); // Increase the stake to be equal to the minimum @@ -259,6 +259,66 @@ fn test_reveal_weights_dispatch_info_ok() { }); } +#[test] +fn test_set_weights_validate() { + // Testing the signed extension validate function + // correctly filters the `set_weights` transaction. + + new_test_ext(0).execute_with(|| { + let netuid: u16 = 1; + let coldkey = U256::from(0); + let hotkey: U256 = U256::from(1); + assert_ne!(hotkey, coldkey); + + let who = hotkey; // The hotkey signs this transaction + + let call = RuntimeCall::SubtensorModule(SubtensorCall::set_weights { + netuid, + dests: vec![1, 1], + weights: vec![1, 1], + version_key: 0, + }); + + // Create netuid + add_network(netuid, 0, 0); + // Register the hotkey + SubtensorModule::append_neuron(netuid, &hotkey, 0); + Owner::::insert(hotkey, coldkey); + + let min_stake = 500_000_000_000; + // Set the minimum stake + SubtensorModule::set_weights_min_stake(min_stake); + + // Verify stake is less than minimum + assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) < min_stake); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); + + let extension = pallet_subtensor::SubtensorSignedExtension::::new(); + // Submit to the signed extension validate function + let result_no_stake = extension.validate(&who, &call.clone(), &info, 10); + // Should fail due to insufficient stake + assert_err!( + result_no_stake, + TransactionValidityError::Invalid(InvalidTransaction::Custom(3)) + ); + + // Increase the stake to be equal to the minimum + SubtensorModule::increase_stake_on_hotkey_account(&hotkey, min_stake); + + // Verify stake is equal to minimum + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + min_stake + ); + + // Submit to the signed extension validate function + let result_min_stake = extension.validate(&who, &call.clone(), &info, 10); + // Now the call should pass + assert_ok!(result_min_stake); + }); +} + #[test] fn test_reveal_weights_validate() { // Testing the signed extension validate function @@ -306,7 +366,7 @@ fn test_reveal_weights_validate() { assert_err!( // Should get an invalid transaction error result_no_stake, - TransactionValidityError::Invalid(InvalidTransaction::Call,) + TransactionValidityError::Invalid(InvalidTransaction::Custom(2)) ); // Increase the stake to be equal to the minimum diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index fcb02a24c..8a2886eb1 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -21,7 +21,7 @@ path = "src/spec_version.rs" [dependencies] subtensor-macros.workspace = true -subtensor-custom-rpc-runtime-api = { version = "0.0.2", path = "../pallets/subtensor/runtime-api", default-features = false } +subtensor-custom-rpc-runtime-api = { path = "../pallets/subtensor/runtime-api", default-features = false } smallvec = { workspace = true } log = { workspace = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ @@ -41,6 +41,7 @@ pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-utility = { workspace = true } frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } @@ -111,6 +112,7 @@ std = [ "codec/std", "scale-info/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", @@ -153,7 +155,7 @@ std = [ "sp-tracing/std", "log/std", "sp-storage/std", - "sp-genesis-builder/std" + "sp-genesis-builder/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -176,7 +178,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks" + "pallet-sudo/runtime-benchmarks", ] try-runtime = [ "frame-try-runtime/try-runtime", @@ -202,5 +204,6 @@ try-runtime = [ "sp-runtime/try-runtime", "pallet-admin-utils/try-runtime", "pallet-commitments/try-runtime", - "pallet-registry/try-runtime" + "pallet-registry/try-runtime", ] +metadata-hash = ["substrate-wasm-builder/metadata-hash"] diff --git a/runtime/build.rs b/runtime/build.rs index 8f021e838..c0fa0405b 100644 --- a/runtime/build.rs +++ b/runtime/build.rs @@ -1,5 +1,5 @@ fn main() { - #[cfg(feature = "std")] + #[cfg(all(feature = "std", not(feature = "metadata-hash")))] { substrate_wasm_builder::WasmBuilder::new() .with_current_project() @@ -7,4 +7,13 @@ fn main() { .import_memory() .build(); } + #[cfg(all(feature = "std", feature = "metadata-hash"))] + { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .enable_metadata_hash("TAO", 9) + .build(); + } } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index a5763409d..ca8f83911 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -12,13 +12,15 @@ pub mod check_nonce; mod migrations; use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::Imbalance; use frame_support::{ dispatch::DispatchResultWithPostInfo, genesis_builder_helper::{build_config, create_default_config}, - pallet_prelude::{DispatchError, Get}, - traits::{fungible::HoldConsideration, Contains, LinearStoragePrice}, + pallet_prelude::Get, + traits::{fungible::HoldConsideration, Contains, LinearStoragePrice, OnUnbalanced}, }; use frame_system::{EnsureNever, EnsureRoot, EnsureRootWithSuccess, RawOrigin}; +use pallet_balances::NegativeImbalance; use pallet_commitments::CanCommit; use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, @@ -68,6 +70,7 @@ pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; // Subtensor module +pub use pallet_scheduler; pub use pallet_subtensor; // An index to a block. @@ -139,7 +142,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 166, + spec_version: 195, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -388,11 +391,22 @@ parameter_types! { pub FeeMultiplier: Multiplier = Multiplier::one(); } +/// Deduct the transaction fee from the Subtensor Pallet TotalIssuance when dropping the transaction +/// fee. +pub struct TransactionFeeHandler; +impl OnUnbalanced> for TransactionFeeHandler { + fn on_nonzero_unbalanced(credit: NegativeImbalance) { + let ti_before = pallet_subtensor::TotalIssuance::::get(); + pallet_subtensor::TotalIssuance::::put(ti_before.saturating_sub(credit.peek())); + drop(credit); + } +} + impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = CurrencyAdapter; //type TransactionByteFee = TransactionByteFee; + type OnChargeTransaction = CurrencyAdapter; // Convert dispatch weight to a chargeable fee. type WeightToFee = LinearWeightToFee; @@ -516,7 +530,7 @@ impl pallet_collective::Config for Runtime { } // We call council members Triumvirate -#[allow(unused)] +#[allow(dead_code)] type TriumvirateMembership = pallet_membership::Instance1; impl pallet_membership::Config for Runtime { type RuntimeEvent = RuntimeEvent; @@ -532,7 +546,7 @@ impl pallet_membership::Config for Runtime { } // We call our top K delegates membership Senate -#[allow(unused)] +#[allow(dead_code)] type SenateMembership = pallet_membership::Instance2; impl pallet_membership::Config for Runtime { type RuntimeEvent = RuntimeEvent; @@ -613,7 +627,11 @@ pub enum ProxyType { Governance, // Both above governance Staking, Registration, + Transfer, + SmallTransfer, } +// Transfers below SMALL_TRANSFER_LIMIT are considered small transfers +pub const SMALL_TRANSFER_LIMIT: Balance = 500_000_000; // 0.5 TAO impl Default for ProxyType { fn default() -> Self { Self::Any @@ -632,6 +650,22 @@ impl InstanceFilter for ProxyType { | RuntimeCall::SubtensorModule(pallet_subtensor::Call::burned_register { .. }) | RuntimeCall::SubtensorModule(pallet_subtensor::Call::root_register { .. }) ), + ProxyType::Transfer => matches!( + c, + RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { .. }) + | RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { .. }) + | RuntimeCall::Balances(pallet_balances::Call::transfer_all { .. }) + ), + ProxyType::SmallTransfer => match c { + RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { + value, .. + }) => *value < SMALL_TRANSFER_LIMIT, + RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { + value, + .. + }) => *value < SMALL_TRANSFER_LIMIT, + _ => false, + }, ProxyType::Owner => matches!(c, RuntimeCall::AdminUtils(..)), ProxyType::NonCritical => !matches!( c, @@ -668,8 +702,12 @@ impl InstanceFilter for ProxyType { (x, y) if x == y => true, (ProxyType::Any, _) => true, (_, ProxyType::Any) => false, - (ProxyType::NonTransfer, _) => true, + (ProxyType::NonTransfer, _) => { + // NonTransfer is NOT a superset of Transfer or SmallTransfer + !matches!(o, ProxyType::Transfer | ProxyType::SmallTransfer) + } (ProxyType::Governance, ProxyType::Triumvirate | ProxyType::Senate) => true, + (ProxyType::Transfer, ProxyType::SmallTransfer) => true, _ => false, } } @@ -835,6 +873,18 @@ impl pallet_commitments::Config for Runtime { type RateLimit = CommitmentRateLimit; } +#[cfg(not(feature = "fast-blocks"))] +pub const INITIAL_SUBNET_TEMPO: u16 = 99; + +#[cfg(feature = "fast-blocks")] +pub const INITIAL_SUBNET_TEMPO: u16 = 10; + +#[cfg(not(feature = "fast-blocks"))] +pub const INITIAL_CHILDKEY_TAKE_RATELIMIT: u64 = 216000; // 30 days at 12 seconds per block + +#[cfg(feature = "fast-blocks")] +pub const INITIAL_CHILDKEY_TAKE_RATELIMIT: u64 = 5; + // Configure the pallet subtensor. parameter_types! { pub const SubtensorInitialRho: u16 = 10; @@ -847,7 +897,7 @@ parameter_types! { pub const SubtensorInitialValidatorPruneLen: u64 = 1; pub const SubtensorInitialScalingLawPower: u16 = 50; // 0.5 pub const SubtensorInitialMaxAllowedValidators: u16 = 128; - pub const SubtensorInitialTempo: u16 = 99; + pub const SubtensorInitialTempo: u16 = INITIAL_SUBNET_TEMPO; pub const SubtensorInitialDifficulty: u64 = 10_000_000; pub const SubtensorInitialAdjustmentInterval: u16 = 100; pub const SubtensorInitialAdjustmentAlpha: u64 = 0; // no weight to previous value. @@ -858,7 +908,10 @@ parameter_types! { pub const SubtensorInitialPruningScore : u16 = u16::MAX; pub const SubtensorInitialBondsMovingAverage: u64 = 900_000; pub const SubtensorInitialDefaultTake: u16 = 11_796; // 18% honest number. - pub const SubtensorInitialMinTake: u16 = 5_898; // 9% + pub const SubtensorInitialMinDelegateTake: u16 = 0; // Allow 0% delegate take + pub const SubtensorInitialDefaultChildKeyTake: u16 = 0; // Allow 0% childkey take + pub const SubtensorInitialMinChildKeyTake: u16 = 0; // 0 % + pub const SubtensorInitialMaxChildKeyTake: u16 = 11_796; // 18 % pub const SubtensorInitialWeightsVersionKey: u64 = 0; pub const SubtensorInitialMinDifficulty: u64 = 10_000_000; pub const SubtensorInitialMaxDifficulty: u64 = u64::MAX / 4; @@ -868,6 +921,7 @@ parameter_types! { pub const SubtensorInitialMaxBurn: u64 = 100_000_000_000; // 100 tao pub const SubtensorInitialTxRateLimit: u64 = 1000; pub const SubtensorInitialTxDelegateTakeRateLimit: u64 = 216000; // 30 days at 12 seconds per block + pub const SubtensorInitialTxChildKeyTakeRateLimit: u64 = INITIAL_CHILDKEY_TAKE_RATELIMIT; pub const SubtensorInitialRAORecycledForRegistration: u64 = 0; // 0 rao pub const SubtensorInitialSenateRequiredStakePercentage: u64 = 1; // 1 percent of total stake pub const SubtensorInitialNetworkImmunity: u64 = 7 * 7200; @@ -882,17 +936,22 @@ parameter_types! { pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn - pub const SubtensorInitialBaseDifficulty: u64 = 10_000_000; // Base difficulty + pub const SubtensorInitialHotkeyEmissionTempo: u64 = 7200; // Drain every day. + pub const SubtensorInitialNetworkMaxStake: u64 = u64::MAX; // Maximum possible value for u64, this make the make stake infinity + pub const InitialColdkeySwapScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days + pub const InitialDissolveNetworkScheduleDuration: BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days + } impl pallet_subtensor::Config for Runtime { type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type SudoRuntimeCall = RuntimeCall; type Currency = Balances; type CouncilOrigin = EnsureMajoritySenate; type SenateMembers = ManageSenateMembers; type TriumvirateInterface = TriumvirateVotes; - + type Scheduler = Scheduler; type InitialRho = SubtensorInitialRho; type InitialKappa = SubtensorInitialKappa; type InitialMaxAllowedUids = SubtensorInitialMaxAllowedUids; @@ -913,8 +972,10 @@ impl pallet_subtensor::Config for Runtime { type InitialMaxRegistrationsPerBlock = SubtensorInitialMaxRegistrationsPerBlock; type InitialPruningScore = SubtensorInitialPruningScore; type InitialMaxAllowedValidators = SubtensorInitialMaxAllowedValidators; - type InitialDefaultTake = SubtensorInitialDefaultTake; - type InitialMinTake = SubtensorInitialMinTake; + type InitialDefaultDelegateTake = SubtensorInitialDefaultTake; + type InitialDefaultChildKeyTake = SubtensorInitialDefaultChildKeyTake; + type InitialMinDelegateTake = SubtensorInitialMinDelegateTake; + type InitialMinChildKeyTake = SubtensorInitialMinChildKeyTake; type InitialWeightsVersionKey = SubtensorInitialWeightsVersionKey; type InitialMaxDifficulty = SubtensorInitialMaxDifficulty; type InitialMinDifficulty = SubtensorInitialMinDifficulty; @@ -924,6 +985,8 @@ impl pallet_subtensor::Config for Runtime { type InitialMinBurn = SubtensorInitialMinBurn; type InitialTxRateLimit = SubtensorInitialTxRateLimit; type InitialTxDelegateTakeRateLimit = SubtensorInitialTxDelegateTakeRateLimit; + type InitialTxChildKeyTakeRateLimit = SubtensorInitialTxChildKeyTakeRateLimit; + type InitialMaxChildKeyTake = SubtensorInitialMaxChildKeyTake; type InitialRAORecycledForRegistration = SubtensorInitialRAORecycledForRegistration; type InitialSenateRequiredStakePercentage = SubtensorInitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = SubtensorInitialNetworkImmunity; @@ -938,7 +1001,11 @@ impl pallet_subtensor::Config for Runtime { type AlphaHigh = InitialAlphaHigh; type AlphaLow = InitialAlphaLow; type LiquidAlphaOn = InitialLiquidAlphaOn; - type InitialBaseDifficulty = SubtensorInitialBaseDifficulty; + type InitialHotkeyEmissionTempo = SubtensorInitialHotkeyEmissionTempo; + type InitialNetworkMaxStake = SubtensorInitialNetworkMaxStake; + type Preimages = Preimage; + type InitialColdkeySwapScheduleDuration = InitialColdkeySwapScheduleDuration; + type InitialDissolveNetworkScheduleDuration = InitialDissolveNetworkScheduleDuration; } use sp_runtime::BoundedVec; @@ -950,291 +1017,12 @@ impl pallet_admin_utils::AuraInterface> for AuraPalletIntrf } } -pub struct SubtensorInterface; - -impl - pallet_admin_utils::SubtensorInterface< - AccountId, - as frame_support::traits::Currency>::Balance, - RuntimeOrigin, - > for SubtensorInterface -{ - fn set_max_delegate_take(max_take: u16) { - SubtensorModule::set_max_delegate_take(max_take); - } - - fn set_min_delegate_take(max_take: u16) { - SubtensorModule::set_min_delegate_take(max_take); - } - - fn set_tx_rate_limit(rate_limit: u64) { - SubtensorModule::set_tx_rate_limit(rate_limit); - } - - fn set_tx_delegate_take_rate_limit(rate_limit: u64) { - SubtensorModule::set_tx_delegate_take_rate_limit(rate_limit); - } - - fn set_serving_rate_limit(netuid: u16, rate_limit: u64) { - SubtensorModule::set_serving_rate_limit(netuid, rate_limit); - } - - fn set_max_burn(netuid: u16, max_burn: u64) { - SubtensorModule::set_max_burn(netuid, max_burn); - } - - fn set_min_burn(netuid: u16, min_burn: u64) { - SubtensorModule::set_min_burn(netuid, min_burn); - } - - fn set_burn(netuid: u16, burn: u64) { - SubtensorModule::set_burn(netuid, burn); - } - - fn set_max_difficulty(netuid: u16, max_diff: u64) { - SubtensorModule::set_max_difficulty(netuid, max_diff); - } - - fn set_min_difficulty(netuid: u16, min_diff: u64) { - SubtensorModule::set_min_difficulty(netuid, min_diff); - } - - fn set_difficulty(netuid: u16, diff: u64) { - SubtensorModule::set_difficulty(netuid, diff); - } - - fn set_weights_rate_limit(netuid: u16, rate_limit: u64) { - SubtensorModule::set_weights_set_rate_limit(netuid, rate_limit); - } - - fn set_weights_version_key(netuid: u16, version: u64) { - SubtensorModule::set_weights_version_key(netuid, version); - } - - fn set_bonds_moving_average(netuid: u16, moving_average: u64) { - SubtensorModule::set_bonds_moving_average(netuid, moving_average); - } - - fn set_max_allowed_validators(netuid: u16, max_validators: u16) { - SubtensorModule::set_max_allowed_validators(netuid, max_validators); - } - - fn get_root_netuid() -> u16 { - SubtensorModule::get_root_netuid() - } - - fn if_subnet_exist(netuid: u16) -> bool { - SubtensorModule::if_subnet_exist(netuid) - } - - fn create_account_if_non_existent(coldkey: &AccountId, hotkey: &AccountId) { - SubtensorModule::create_account_if_non_existent(coldkey, hotkey) - } - - fn coldkey_owns_hotkey(coldkey: &AccountId, hotkey: &AccountId) -> bool { - SubtensorModule::coldkey_owns_hotkey(coldkey, hotkey) - } - - fn increase_stake_on_coldkey_hotkey_account( - coldkey: &AccountId, - hotkey: &AccountId, - increment: u64, - ) { - SubtensorModule::increase_stake_on_coldkey_hotkey_account(coldkey, hotkey, increment); - } - - fn add_balance_to_coldkey_account(coldkey: &AccountId, amount: Balance) { - SubtensorModule::add_balance_to_coldkey_account(coldkey, amount); - } - - fn get_current_block_as_u64() -> u64 { - SubtensorModule::get_current_block_as_u64() - } - - fn get_subnetwork_n(netuid: u16) -> u16 { - SubtensorModule::get_subnetwork_n(netuid) - } - - fn get_max_allowed_uids(netuid: u16) -> u16 { - SubtensorModule::get_max_allowed_uids(netuid) - } - - fn append_neuron(netuid: u16, new_hotkey: &AccountId, block_number: u64) { - SubtensorModule::append_neuron(netuid, new_hotkey, block_number) - } - - fn get_neuron_to_prune(netuid: u16) -> u16 { - SubtensorModule::get_neuron_to_prune(netuid) - } - - fn replace_neuron(netuid: u16, uid_to_replace: u16, new_hotkey: &AccountId, block_number: u64) { - SubtensorModule::replace_neuron(netuid, uid_to_replace, new_hotkey, block_number); - } - - fn set_total_issuance(total_issuance: u64) { - SubtensorModule::set_total_issuance(total_issuance); - } - - fn set_network_immunity_period(net_immunity_period: u64) { - SubtensorModule::set_network_immunity_period(net_immunity_period); - } - - fn set_network_min_lock(net_min_lock: u64) { - SubtensorModule::set_network_min_lock(net_min_lock); - } - - fn set_subnet_limit(limit: u16) { - SubtensorModule::set_max_subnets(limit); - } - - fn set_lock_reduction_interval(interval: u64) { - SubtensorModule::set_lock_reduction_interval(interval); - } - - fn set_tempo(netuid: u16, tempo: u16) { - SubtensorModule::set_tempo(netuid, tempo); - } - - fn set_subnet_owner_cut(subnet_owner_cut: u16) { - SubtensorModule::set_subnet_owner_cut(subnet_owner_cut); - } - - fn set_network_rate_limit(limit: u64) { - SubtensorModule::set_network_rate_limit(limit); - } - - fn set_max_registrations_per_block(netuid: u16, max_registrations_per_block: u16) { - SubtensorModule::set_max_registrations_per_block(netuid, max_registrations_per_block); - } - - fn set_adjustment_alpha(netuid: u16, adjustment_alpha: u64) { - SubtensorModule::set_adjustment_alpha(netuid, adjustment_alpha); - } - - fn set_target_registrations_per_interval(netuid: u16, target_registrations_per_interval: u16) { - SubtensorModule::set_target_registrations_per_interval( - netuid, - target_registrations_per_interval, - ); - } - - fn set_network_pow_registration_allowed(netuid: u16, registration_allowed: bool) { - SubtensorModule::set_network_pow_registration_allowed(netuid, registration_allowed); - } - - fn set_network_registration_allowed(netuid: u16, registration_allowed: bool) { - SubtensorModule::set_network_registration_allowed(netuid, registration_allowed); - } - - fn set_activity_cutoff(netuid: u16, activity_cutoff: u16) { - SubtensorModule::set_activity_cutoff(netuid, activity_cutoff); - } - - fn ensure_subnet_owner_or_root(o: RuntimeOrigin, netuid: u16) -> Result<(), DispatchError> { - SubtensorModule::ensure_subnet_owner_or_root(o, netuid) - } - - fn set_rho(netuid: u16, rho: u16) { - SubtensorModule::set_rho(netuid, rho); - } - - fn set_kappa(netuid: u16, kappa: u16) { - SubtensorModule::set_kappa(netuid, kappa); - } - - fn set_max_allowed_uids(netuid: u16, max_allowed: u16) { - SubtensorModule::set_max_allowed_uids(netuid, max_allowed); - } - - fn set_min_allowed_weights(netuid: u16, min_allowed_weights: u16) { - SubtensorModule::set_min_allowed_weights(netuid, min_allowed_weights); - } - - fn set_immunity_period(netuid: u16, immunity_period: u16) { - SubtensorModule::set_immunity_period(netuid, immunity_period); - } - - fn set_max_weight_limit(netuid: u16, max_weight_limit: u16) { - SubtensorModule::set_max_weight_limit(netuid, max_weight_limit); - } - - fn set_scaling_law_power(netuid: u16, scaling_law_power: u16) { - SubtensorModule::set_scaling_law_power(netuid, scaling_law_power); - } - - fn set_validator_prune_len(netuid: u16, validator_prune_len: u64) { - SubtensorModule::set_validator_prune_len(netuid, validator_prune_len); - } - - fn set_adjustment_interval(netuid: u16, adjustment_interval: u16) { - SubtensorModule::set_adjustment_interval(netuid, adjustment_interval); - } - - fn set_weights_set_rate_limit(netuid: u16, weights_set_rate_limit: u64) { - SubtensorModule::set_weights_set_rate_limit(netuid, weights_set_rate_limit); - } - - fn set_rao_recycled(netuid: u16, rao_recycled: u64) { - SubtensorModule::set_rao_recycled(netuid, rao_recycled); - } - - fn is_hotkey_registered_on_network(netuid: u16, hotkey: &AccountId) -> bool { - SubtensorModule::is_hotkey_registered_on_network(netuid, hotkey) - } - - fn init_new_network(netuid: u16, tempo: u16) { - SubtensorModule::init_new_network(netuid, tempo); - } - - fn set_weights_min_stake(min_stake: u64) { - SubtensorModule::set_weights_min_stake(min_stake); - } - - fn clear_small_nominations() { - SubtensorModule::clear_small_nominations(); - } - - fn set_nominator_min_required_stake(min_stake: u64) { - SubtensorModule::set_nominator_min_required_stake(min_stake); - } - - fn get_nominator_min_required_stake() -> u64 { - SubtensorModule::get_nominator_min_required_stake() - } - - fn set_target_stakes_per_interval(target_stakes_per_interval: u64) { - SubtensorModule::set_target_stakes_per_interval(target_stakes_per_interval) - } - - fn set_commit_reveal_weights_interval(netuid: u16, interval: u64) { - SubtensorModule::set_commit_reveal_weights_interval(netuid, interval); - } - - fn set_commit_reveal_weights_enabled(netuid: u16, enabled: bool) { - SubtensorModule::set_commit_reveal_weights_enabled(netuid, enabled); - } - - fn set_liquid_alpha_enabled(netuid: u16, enabled: bool) { - SubtensorModule::set_liquid_alpha_enabled(netuid, enabled); - } - - fn do_set_alpha_values( - origin: RuntimeOrigin, - netuid: u16, - alpha_low: u16, - alpha_high: u16, - ) -> Result<(), DispatchError> { - SubtensorModule::do_set_alpha_values(origin, netuid, alpha_low, alpha_high) - } -} - impl pallet_admin_utils::Config for Runtime { type RuntimeEvent = RuntimeEvent; type AuthorityId = AuraId; type MaxAuthorities = ConstU32<32>; type Aura = AuraPalletIntrf; type Balance = Balance; - type Subtensor = SubtensorInterface; type WeightInfo = pallet_admin_utils::weights::SubstrateWeight; } @@ -1242,27 +1030,27 @@ impl pallet_admin_utils::Config for Runtime { construct_runtime!( pub struct Runtime { - System: frame_system, - RandomnessCollectiveFlip: pallet_insecure_randomness_collective_flip, - Timestamp: pallet_timestamp, - Aura: pallet_aura, - Grandpa: pallet_grandpa, - Balances: pallet_balances, - TransactionPayment: pallet_transaction_payment, - SubtensorModule: pallet_subtensor, - Triumvirate: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config}, - TriumvirateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config}, - SenateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config}, - Utility: pallet_utility, - Sudo: pallet_sudo, - Multisig: pallet_multisig, - Preimage: pallet_preimage, - Scheduler: pallet_scheduler, - Proxy: pallet_proxy, - Registry: pallet_registry, - Commitments: pallet_commitments, - AdminUtils: pallet_admin_utils, - SafeMode: pallet_safe_mode, + System: frame_system = 0, + RandomnessCollectiveFlip: pallet_insecure_randomness_collective_flip = 1, + Timestamp: pallet_timestamp = 2, + Aura: pallet_aura = 3, + Grandpa: pallet_grandpa = 4, + Balances: pallet_balances = 5, + TransactionPayment: pallet_transaction_payment = 6, + SubtensorModule: pallet_subtensor = 7, + Triumvirate: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 8, + TriumvirateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 9, + SenateMembers: pallet_membership::::{Pallet, Call, Storage, Event, Config} = 10, + Utility: pallet_utility = 11, + Sudo: pallet_sudo = 12, + Multisig: pallet_multisig = 13, + Preimage: pallet_preimage = 14, + Scheduler: pallet_scheduler = 15, + Proxy: pallet_proxy = 16, + Registry: pallet_registry = 17, + Commitments: pallet_commitments = 18, + AdminUtils: pallet_admin_utils = 19, + SafeMode: pallet_safe_mode = 20, } ); @@ -1284,9 +1072,13 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, pallet_subtensor::SubtensorSignedExtension, pallet_commitments::CommitmentsSignedExtension, + frame_metadata_hash_extension::CheckMetadataHash, ); -type Migrations = pallet_grandpa::migrations::MigrateV4ToV5; +type Migrations = + pallet_subtensor::migrations::migrate_init_total_issuance::initialise_total_issuance::Migration< + Runtime, + >; // Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = @@ -1527,10 +1319,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; use baseline::Pallet as BaselineBench; - #[allow(non_local_definitions)] impl frame_system_benchmarking::Config for Runtime {} - - #[allow(non_local_definitions)] impl baseline::Config for Runtime {} use frame_support::traits::WhitelistedStorageKeys; @@ -1637,6 +1426,21 @@ impl_runtime_apis! { result.encode() } + fn get_subnet_info_v2(netuid: u16) -> Vec { + let _result = SubtensorModule::get_subnet_info_v2(netuid); + if _result.is_some() { + let result = _result.expect("Could not get SubnetInfo"); + result.encode() + } else { + vec![] + } + } + + fn get_subnets_info_v2() -> Vec { + let result = SubtensorModule::get_subnets_info_v2(); + result.encode() + } + fn get_subnet_hyperparams(netuid: u16) -> Vec { let _result = SubtensorModule::get_subnet_hyperparams(netuid); if _result.is_some() { diff --git a/runtime/tests/pallet_proxy.rs b/runtime/tests/pallet_proxy.rs index 796dfc471..eea250938 100644 --- a/runtime/tests/pallet_proxy.rs +++ b/runtime/tests/pallet_proxy.rs @@ -200,3 +200,30 @@ fn test_proxy_pallet() { } } } + +#[test] +fn test_non_transfer_cannot_transfer() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(AccountId::from(ACCOUNT)), + AccountId::from(DELEGATE).into(), + ProxyType::NonTransfer, + 0 + )); + + let call = call_transfer(); + assert_ok!(Proxy::proxy( + RuntimeOrigin::signed(AccountId::from(DELEGATE)), + AccountId::from(ACCOUNT).into(), + None, + Box::new(call.clone()), + )); + + System::assert_last_event( + pallet_proxy::Event::ProxyExecuted { + result: Err(SystemError::CallFiltered.into()), + } + .into(), + ); + }); +} diff --git a/scripts/benchmark_all.sh b/scripts/benchmark_all.sh new file mode 100755 index 000000000..580e5425e --- /dev/null +++ b/scripts/benchmark_all.sh @@ -0,0 +1,24 @@ +#!/bin/sh +set -ex + +# List of pallets you want to benchmark +pallets=("pallet_subtensor" "pallet_collective" "pallet_commitments" "pallet_registry" "pallet_admin_utils") + +# Chain spec and output directory +chain_spec="finney" # or your specific chain spec + +for pallet in "${pallets[@]}" +do + echo "Benchmarking $pallet..." + cargo run --profile=production --features=runtime-benchmarks,try-runtime --bin node-subtensor -- benchmark pallet \ + --chain $chain_spec \ + --wasm-execution=compiled \ + --pallet $pallet \ + --extrinsic '*' \ + --steps 50 \ + --repeat 5 \ + --output "pallets/$pallet/src/weights.rs" \ + --template ./.maintain/frame-weight-template.hbs # Adjust this path to your template file +done + +echo "All pallets have been benchmarked and weights updated." diff --git a/scripts/build.sh b/scripts/build.sh index 548af664b..3f588a1cc 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -1,2 +1,2 @@ -cargo build --profile production --features runtime-benchmarks +cargo build --profile production --features "runtime-benchmarks metadata-hash" diff --git a/scripts/localnet.sh b/scripts/localnet.sh index 65ca5c0a9..850a314d8 100755 --- a/scripts/localnet.sh +++ b/scripts/localnet.sh @@ -1,5 +1,14 @@ #!/bin/bash +# Check if `--no-purge` passed as a parameter +NO_PURGE=0 +for arg in "$@"; do + if [ "$arg" = "--no-purge" ]; then + NO_PURGE=1 + break + fi +done + # Determine the directory this script resides in. This allows invoking it from any location. SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" @@ -39,7 +48,7 @@ fi if [[ $BUILD_BINARY == "1" ]]; then echo "*** Building substrate binary..." - cargo build --release --features "$FEATURES" --manifest-path "$BASE_DIR/Cargo.toml" + cargo build --workspace --profile=release --features "$FEATURES" --manifest-path "$BASE_DIR/Cargo.toml" echo "*** Binary compiled" fi @@ -47,10 +56,14 @@ echo "*** Building chainspec..." "$BASE_DIR/target/release/node-subtensor" build-spec --disable-default-bootnode --raw --chain $CHAIN >$FULL_PATH echo "*** Chainspec built and output to file" -echo "*** Purging previous state..." -"$BASE_DIR/target/release/node-subtensor" purge-chain -y --base-path /tmp/bob --chain="$FULL_PATH" >/dev/null 2>&1 -"$BASE_DIR/target/release/node-subtensor" purge-chain -y --base-path /tmp/alice --chain="$FULL_PATH" >/dev/null 2>&1 -echo "*** Previous chainstate purged" +if [ $NO_PURGE -eq 1 ]; then + echo "*** Purging previous state skipped..." +else + echo "*** Purging previous state..." + "$BASE_DIR/target/release/node-subtensor" purge-chain -y --base-path /tmp/bob --chain="$FULL_PATH" >/dev/null 2>&1 + "$BASE_DIR/target/release/node-subtensor" purge-chain -y --base-path /tmp/alice --chain="$FULL_PATH" >/dev/null 2>&1 + echo "*** Previous chainstate purged" +fi echo "*** Starting localnet nodes..." alice_start=( diff --git a/scripts/publish.sh b/scripts/publish.sh new file mode 100644 index 000000000..3eb0fc6a5 --- /dev/null +++ b/scripts/publish.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -ex +cd support/macros +cargo publish +cd ../.. +cd pallets/commitments +cargo publish +cd .. +cd collective +cargo publish +cd .. +cd registry +cargo publish +cd .. +cd subtensor +cargo publish +cd runtime-api +cargo publish +cd ../.. +cd admin-utils +cargo publish +cd ../.. +cd runtime +cargo publish +cd .. +cd node +cargo publish +echo "published successfully." diff --git a/scripts/test_specific.sh b/scripts/test_specific.sh index 018872d33..85f3ebe30 100755 --- a/scripts/test_specific.sh +++ b/scripts/test_specific.sh @@ -1,6 +1,4 @@ pallet="${3:-pallet-subtensor}" features="${4:-pow-faucet}" -# RUST_LOG="pallet_subtensor=info" cargo test --release --features=$features -p $pallet --test $1 -- $2 --nocapture --exact - -RUST_LOG=INFO cargo test --release --features=$features -p $pallet --test $1 -- $2 --nocapture --exact \ No newline at end of file +SKIP_WASM_BUILD=1 RUST_LOG=DEBUG cargo test --release --features=$features -p $pallet --test $1 -- $2 --nocapture --exact \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1 @@ + diff --git a/support/linting/Cargo.toml b/support/linting/Cargo.toml new file mode 100644 index 000000000..1e37d8163 --- /dev/null +++ b/support/linting/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "subtensor-linting" +version = "0.1.0" +edition = "2021" + +[dependencies] +syn.workspace = true +quote.workspace = true +proc-macro2.workspace = true + +[lints] +workspace = true diff --git a/support/linting/src/lib.rs b/support/linting/src/lib.rs new file mode 100644 index 000000000..d02a70a2b --- /dev/null +++ b/support/linting/src/lib.rs @@ -0,0 +1,6 @@ +pub mod lint; +pub use lint::*; + +mod require_freeze_struct; + +pub use require_freeze_struct::RequireFreezeStruct; diff --git a/support/linting/src/lint.rs b/support/linting/src/lint.rs new file mode 100644 index 000000000..3c099d40c --- /dev/null +++ b/support/linting/src/lint.rs @@ -0,0 +1,13 @@ +use syn::File; + +pub type Result = core::result::Result<(), Vec>; + +/// A trait that defines custom lints that can be run within our workspace. +/// +/// Each lint is run in parallel on all Rust source files in the workspace. Within a lint you +/// can issue an error the same way you would in a proc macro, and otherwise return `Ok(())` if +/// there are no errors. +pub trait Lint: Send + Sync { + /// Lints the given Rust source file, returning a compile error if any issues are found. + fn lint(source: &File) -> Result; +} diff --git a/support/linting/src/require_freeze_struct.rs b/support/linting/src/require_freeze_struct.rs new file mode 100644 index 000000000..8f02e2697 --- /dev/null +++ b/support/linting/src/require_freeze_struct.rs @@ -0,0 +1,184 @@ +use super::*; +use syn::{ + parse_quote, punctuated::Punctuated, visit::Visit, Attribute, File, ItemStruct, Meta, MetaList, + Path, Token, +}; + +pub struct RequireFreezeStruct; + +impl Lint for RequireFreezeStruct { + fn lint(source: &File) -> Result { + let mut visitor = EncodeDecodeVisitor::default(); + + visitor.visit_file(source); + + if !visitor.errors.is_empty() { + return Err(visitor.errors); + } + + Ok(()) + } +} + +#[derive(Default)] +struct EncodeDecodeVisitor { + errors: Vec, +} + +impl<'ast> Visit<'ast> for EncodeDecodeVisitor { + fn visit_item_struct(&mut self, node: &'ast ItemStruct) { + let has_encode_decode = node.attrs.iter().any(is_derive_encode_or_decode); + let has_freeze_struct = node.attrs.iter().any(is_freeze_struct); + + if has_encode_decode && !has_freeze_struct { + self.errors.push(syn::Error::new( + node.ident.span(), + "Struct with Encode/Decode derive must also have #[freeze_struct(..)] attribute.", + )); + } + + syn::visit::visit_item_struct(self, node); + } +} + +fn is_freeze_struct(attr: &Attribute) -> bool { + if let Meta::List(meta_list) = &attr.meta { + let Some(seg) = meta_list.path.segments.last() else { + return false; + }; + if seg.ident == "freeze_struct" && !meta_list.tokens.is_empty() { + return true; + } + } + false +} + +fn is_derive_encode_or_decode(attr: &Attribute) -> bool { + if let Meta::List(MetaList { path, tokens, .. }) = &attr.meta { + if path.is_ident("derive") { + let nested: Punctuated = parse_quote!(#tokens); + return nested.iter().any(|nested| { + nested.segments.iter().any(|seg| seg.ident == "Encode") + || nested.segments.iter().any(|seg| seg.ident == "Decode") + }); + } + } + false +} + +#[cfg(test)] +mod tests { + use super::*; + + fn lint_struct(input: &str) -> Result { + let item_struct: ItemStruct = syn::parse_str(input).expect("should only use on a struct"); + let mut visitor = EncodeDecodeVisitor::default(); + visitor.visit_item_struct(&item_struct); + if !visitor.errors.is_empty() { + return Err(visitor.errors); + } + Ok(()) + } + + #[test] + fn test_no_attributes() { + let input = r#" + pub struct Test { + field: u32, + } + "#; + assert!(lint_struct(input).is_ok()); + } + + #[test] + fn test_freeze_struct_only() { + let input = r#" + #[freeze_struct("12345")] + pub struct Test { + field: u32, + } + "#; + assert!(lint_struct(input).is_ok()); + } + + #[test] + fn test_encode_only() { + let input = r#" + #[derive(Encode)] + pub struct Test { + field: u32, + } + "#; + assert!(lint_struct(input).is_err()); + } + + #[test] + fn test_decode_only() { + let input = r#" + #[derive(Decode)] + pub struct Test { + field: u32, + } + "#; + assert!(lint_struct(input).is_err()); + } + + #[test] + fn test_encode_and_freeze_struct() { + let input = r#" + #[freeze_struct("12345")] + #[derive(Encode)] + pub struct Test { + field: u32, + } + "#; + assert!(lint_struct(input).is_ok()); + } + + #[test] + fn test_decode_and_freeze_struct() { + let input = r#" + #[freeze_struct("12345")] + #[derive(Decode)] + pub struct Test { + field: u32, + } + "#; + assert!(lint_struct(input).is_ok()); + } + + #[test] + fn test_encode_decode_without_freeze_struct() { + let input = r#" + #[derive(Encode, Decode)] + pub struct Test { + field: u32, + } + "#; + assert!(lint_struct(input).is_err()); + } + + #[test] + fn test_encode_decode_with_freeze_struct() { + let input = r#" + #[freeze_struct("12345")] + #[derive(Encode, Decode)] + pub struct Test { + field: u32, + } + "#; + assert!(lint_struct(input).is_ok()); + } + + #[test] + fn test_temporary_freeze_struct() { + let input = r#" + #[freeze_struct] + #[derive(Encode, Decode)] + pub struct Test { + field: u32, + } + "#; + assert!(lint_struct(input).is_err()); + } +} diff --git a/support/macros/Cargo.toml b/support/macros/Cargo.toml index 10a15ba0d..b5a5febad 100644 --- a/support/macros/Cargo.toml +++ b/support/macros/Cargo.toml @@ -12,9 +12,9 @@ homepage = "https://bittensor.com/" proc-macro = true [dependencies] -syn = { version = "2", features = ["full", "visit-mut", "extra-traits"] } -proc-macro2 = "1" -quote = "1" +syn.workspace = true +proc-macro2.workspace = true +quote.workspace = true ahash = "0.8" [lints] diff --git a/support/tools/Cargo.toml b/support/tools/Cargo.toml new file mode 100644 index 000000000..fa3e1fd50 --- /dev/null +++ b/support/tools/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "subtensor-tools" +version = "0.1.0" +edition = "2021" +license = "MIT" + +description = "support tools for Subtensor" +repository = "https://github.com/opentensor/subtensor" +homepage = "https://bittensor.com" + +[[bin]] +name = "bump-version" +path = "src/bump_version.rs" + +[dependencies] +anyhow = "1.0" +clap = { version = "4.5", features = ["derive"] } +semver = "1.0" +toml_edit = "0.22" diff --git a/support/tools/src/bump_version.rs b/support/tools/src/bump_version.rs new file mode 100644 index 000000000..a16293c30 --- /dev/null +++ b/support/tools/src/bump_version.rs @@ -0,0 +1,49 @@ +use clap::Parser; +use semver::Version; +use std::{ + fs, + io::{Read, Seek, Write}, + str::FromStr, +}; +use toml_edit::{DocumentMut, Item, Value}; + +const TOML_PATHS: [&str; 9] = [ + "support/macros", + "pallets/commitments", + "pallets/collective", + "pallets/registry", + "pallets/subtensor", + "pallets/subtensor/runtime-api", + "pallets/admin-utils", + "runtime", + "node", +]; + +#[derive(Parser)] +struct CliArgs { + #[arg(required = true)] + version: Version, +} + +fn main() -> anyhow::Result<()> { + let args = CliArgs::parse(); + let version = args.version; + + for path in TOML_PATHS { + let cargo_toml_path = format!("{path}/Cargo.toml"); + let mut toml_file = fs::File::options() + .read(true) + .write(true) + .open(&cargo_toml_path)?; + let mut toml_str = String::new(); + toml_file.read_to_string(&mut toml_str)?; + let mut modified_toml_doc = DocumentMut::from_str(&toml_str)?; + + modified_toml_doc["package"]["version"] = Item::Value(Value::from(version.to_string())); + toml_file.set_len(0)?; + toml_file.rewind()?; + toml_file.write_all(modified_toml_doc.to_string().as_bytes())?; + } + + Ok(()) +}