diff --git a/.dockerignore b/.dockerignore index 2886a059a9..e12ea8709b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,26 @@ +# IDE/Editor configs .devcontainer -.github .vscode +.idea +*.swp +*.swo + +# Build artifacts target/ +*.pyc +*.pyo +*.pyd +__pycache__/ + +# Git-related +.git +.gitignore + +# CI/CD +.github .dockerignore +.gitattributes + +# Dockerfiles Dockerfile +Dockerfile-localnet \ No newline at end of file diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml index 902ea2cd39..721eb96994 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -110,6 +110,7 @@ jobs: build-image-with-current-branch: needs: check-label + if: needs.check-label.outputs.skip-bittensor-e2e-tests == 'false' runs-on: [self-hosted, type-ccx33] steps: - name: Checkout code @@ -211,12 +212,6 @@ jobs: - name: Retag Docker Image run: docker tag localnet ghcr.io/opentensor/subtensor-localnet:devnet-ready -# - name: Run tests -# working-directory: ${{ github.workspace }}/btcli -# run: | -# source ${{ github.workspace }}/venv/bin/activate -# uv run pytest ${{ matrix.test-file }} -s - - name: Run with retry working-directory: ${{ github.workspace }}/btcli run: | @@ -311,12 +306,6 @@ jobs: - name: Retag Docker Image run: docker tag localnet ghcr.io/opentensor/subtensor-localnet:devnet-ready -# - name: Run tests -# working-directory: ${{ github.workspace }}/bittensor -# run: | -# source ${{ github.workspace }}/venv/bin/activate -# uv run pytest ${{ matrix.test-file }} -s - - name: Run with retry working-directory: ${{ github.workspace }}/bittensor run: | diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index da9732f186..c79be55103 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -6,7 +6,7 @@ on: workflow_dispatch: inputs: branch-or-tag: - description: "Branch or tag to use for the Docker image tag and ref to checkout (optional)" + description: "The branch or tag to use as the Docker image tag (optional)." required: false default: "" push: @@ -15,7 +15,7 @@ on: - main - testnet - devnet - + concurrency: group: docker-localnet-${{ github.ref }} cancel-in-progress: true @@ -27,29 +27,130 @@ permissions: security-events: write jobs: - publish: - runs-on: [self-hosted, type-ccx53, type-ccx43, type-ccx33] - + setup: + runs-on: ubuntu-latest + outputs: + tag: ${{ steps.vars.outputs.tag }} + ref: ${{ steps.vars.outputs.ref }} + latest_tag: ${{ steps.vars.outputs.latest_tag }} steps: - name: Determine Docker tag and ref - id: tag + id: vars + run: | + if [[ "${{ github.event_name }}" == "pull_request" ]]; then + echo "ref=${{ github.head_ref }}" >> $GITHUB_OUTPUT + echo "tag=${{ github.base_ref }}" >> $GITHUB_OUTPUT + else + tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" + echo "ref=${{ github.ref_name }}" >> $GITHUB_OUTPUT + echo "tag=$tag" >> $GITHUB_OUTPUT + fi + + if [[ "$tag" != "devnet-ready" ]]; then + echo "latest_tag=true" >> $GITHUB_OUTPUT + else + echo "latest_tag=false" >> $GITHUB_OUTPUT + fi + + # build artifacts for fast-runtime and non-fast-runtime + artifacts: + name: Node • ${{ matrix.runtime }} • ${{ matrix.platform.arch }} + needs: setup + strategy: + matrix: + platform: + # triple names used `in scripts/install_prebuilt_binaries.sh` file + - runner: [self-hosted, type-ccx33] + triple: x86_64-unknown-linux-gnu + arch: amd64 + - runner: [ubuntu-24.04-arm] + triple: aarch64-unknown-linux-gnu + arch: arm64 + + runtime: ["fast-runtime", "non-fast-runtime"] + + runs-on: ${{ matrix.platform.runner }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ needs.setup.outputs.ref }} + + - name: Install Rust + dependencies + run: | + chmod +x ./scripts/install_build_env.sh + ./scripts/install_build_env.sh + + - name: Add Rust target triple + run: | + source "$HOME/.cargo/env" + rustup target add ${{ matrix.platform.triple }} + + - name: Patch limits for local run run: | - branch_or_tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" - echo "Determined branch or tag: $branch_or_tag" - echo "tag=$branch_or_tag" >> $GITHUB_ENV - echo "ref=$branch_or_tag" >> $GITHUB_ENV - - # Check if this is a tagged release (not devnet-ready/devnet/testnet) - if [[ "$branch_or_tag" != "devnet-ready" ]]; then - echo "latest_tag=true" >> $GITHUB_ENV + chmod +x ./scripts/localnet_patch.sh + ./scripts/localnet_patch.sh + + - name: Build binaries + run: | + export PATH="$HOME/.cargo/bin:$PATH" + export CARGO_BUILD_TARGET="${{ matrix.platform.triple }}" + + if [ "${{ matrix.runtime }}" = "fast-runtime" ]; then + ./scripts/localnet.sh --build-only else - echo "latest_tag=false" >> $GITHUB_ENV + ./scripts/localnet.sh False --build-only fi + # use `ci_target` name bc .dockerignore excludes `target` + - name: Prepare artifacts for upload + run: | + RUNTIME="${{ matrix.runtime }}" + TRIPLE="${{ matrix.platform.triple }}" + + mkdir -p build/ci_target/${RUNTIME}/${TRIPLE}/release/ + cp -v target/${RUNTIME}/${TRIPLE}/release/node-subtensor \ + build/ci_target/${RUNTIME}/${TRIPLE}/release/ + + mkdir -p build/ci_target/${RUNTIME}/${TRIPLE}/release/wbuild/node-subtensor-runtime/ + cp -v target/${RUNTIME}/${TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ + build/ci_target/${RUNTIME}/${TRIPLE}/release/wbuild/node-subtensor-runtime/ + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: binaries-${{ matrix.platform.triple }}-${{ matrix.runtime }} + path: build/ + if-no-files-found: error + + # Collect all artifacts and publish them to docker repo + docker: + needs: [setup, artifacts] + runs-on: [self-hosted, type-ccx33] + defaults: + run: + working-directory: ${{ github.workspace }} + + steps: - name: Checkout code uses: actions/checkout@v4 with: - ref: ${{ env.ref }} + ref: ${{ needs.setup.outputs.ref }} + + - name: Download all binary artifacts + uses: actions/download-artifact@v5 + with: + pattern: binaries-* + path: build/ + merge-multiple: true + + - name: Show current Git branch + run: | + echo "===============================" + echo "Current Git branch:" + git rev-parse --abbrev-ref HEAD + echo "===============================" - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -64,18 +165,16 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Patch non-fast-block node - run: | - chmod +x ./scripts/localnet_patch.sh - ./scripts/localnet_patch.sh - - name: Build and push Docker image uses: docker/build-push-action@v6 with: context: . file: Dockerfile-localnet + build-args: | + BUILT_IN_CI="Boom shakalaka" + push: true platforms: linux/amd64,linux/arm64 tags: | - ghcr.io/${{ github.repository }}-localnet:${{ env.tag }} - ${{ env.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} + ghcr.io/${{ github.repository }}-localnet:${{ needs.setup.outputs.tag }} + ${{ needs.setup.outputs.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} diff --git a/Cargo.lock b/Cargo.lock index b2048237da..18ee1cd90f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -972,11 +972,11 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.4.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "event-listener-strategy", "pin-project-lite", ] @@ -1005,7 +1005,7 @@ dependencies = [ "async-task", "blocking", "cfg-if", - "event-listener 5.4.0", + "event-listener 5.4.1", "futures-lite", "rustix 1.0.8", ] @@ -1544,9 +1544,9 @@ checksum = "fd6c0e7b807d60291f42f33f58480c0bfafe28ed08286446f45e463728cf9c1c" [[package]] name = "cc" -version = "1.2.30" +version = "1.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" +checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2" dependencies = [ "jobserver", "libc", @@ -3050,9 +3050,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", @@ -3065,7 +3065,7 @@ version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "pin-project-lite", ] @@ -3499,6 +3499,12 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "flume" version = "0.11.1" @@ -4106,9 +4112,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" dependencies = [ "fastrand", "futures-core", @@ -6913,7 +6919,7 @@ dependencies = [ "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-preimage", - "pallet-proxy 38.0.0", + "pallet-proxy 40.1.0", "pallet-registry", "pallet-safe-mode", "pallet-scheduler", @@ -6929,7 +6935,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility 38.0.0", + "pallet-utility 40.0.0", "parity-scale-codec", "polkadot-runtime-common", "precompile-utils", @@ -7291,7 +7297,7 @@ dependencies = [ "expander", "indexmap 2.10.0", "itertools 0.11.0", - "petgraph", + "petgraph 0.6.5", "proc-macro-crate 3.3.0", "proc-macro2", "quote", @@ -7946,18 +7952,13 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "38.0.0" +version = "40.1.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", "pallet-balances", - "pallet-utility 38.0.0", + "pallet-utility 40.0.0", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", - "sp-core", - "sp-io", - "sp-runtime", "subtensor-macros", ] @@ -8012,8 +8013,8 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "pallet-proxy 40.1.0", - "pallet-utility 40.0.0", + "pallet-proxy 40.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6)", + "pallet-utility 40.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6)", "parity-scale-codec", "scale-info", "sp-arithmetic", @@ -8133,7 +8134,7 @@ dependencies = [ "pallet-preimage", "pallet-scheduler", "pallet-subtensor-swap", - "pallet-utility 38.0.0", + "pallet-utility 40.0.0", "parity-scale-codec", "parity-util-mem", "polkadot-runtime-common", @@ -8313,7 +8314,7 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "38.0.0" +version = "40.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -8617,7 +8618,17 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ - "fixedbitset", + "fixedbitset 0.4.2", + "indexmap 2.10.0", +] + +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset 0.5.7", "indexmap 2.10.0", ] @@ -9146,9 +9157,9 @@ checksum = "23eff02c070c70f31878a3d915e88a914ecf3e153741e2fb572dde28cce20fde" [[package]] name = "polling" -version = "3.9.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ee9b2fa7a4517d2c91ff5bc6c297a427a96749d15f98fcdbb22c05571a4d4b7" +checksum = "b5bd19146350fe804f7cb2669c851c03d69da628803dab0d98018142aaa5d829" dependencies = [ "cfg-if", "concurrent-queue", @@ -9521,7 +9532,7 @@ dependencies = [ "log", "multimap", "once_cell", - "petgraph", + "petgraph 0.7.1", "prettyplease", "prost 0.13.5", "prost-types", @@ -10089,9 +10100,9 @@ dependencies = [ [[package]] name = "ruint" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4" +checksum = "9ecb38f82477f20c5c3d62ef52d7c4e536e38ea9b73fb570a20c5cae0e14bcf6" dependencies = [ "alloy-rlp", "ark-ff 0.3.0", @@ -11958,9 +11969,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.141" +version = "1.0.142" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" +checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" dependencies = [ "itoa", "memchr", @@ -12080,9 +12091,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.5" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] @@ -12201,7 +12212,7 @@ dependencies = [ "derive_more 0.99.20", "ed25519-zebra", "either", - "event-listener 5.4.0", + "event-listener 5.4.1", "fnv", "futures-lite", "futures-util", @@ -12250,7 +12261,7 @@ dependencies = [ "bs58", "derive_more 0.99.20", "either", - "event-listener 5.4.0", + "event-listener 5.4.1", "fnv", "futures-channel", "futures-lite", @@ -12570,7 +12581,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.10.0" -source = "git+https://github.com/paritytech/polkadot-sdk#177b03958c766fe053f28424ee6f6748644bb794" +source = "git+https://github.com/paritytech/polkadot-sdk#59fb2e7482d471a7ec4e8d3b30499497efa7b34c" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -12666,7 +12677,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#177b03958c766fe053f28424ee6f6748644bb794" +source = "git+https://github.com/paritytech/polkadot-sdk#59fb2e7482d471a7ec4e8d3b30499497efa7b34c" dependencies = [ "proc-macro2", "quote", @@ -12676,7 +12687,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk#177b03958c766fe053f28424ee6f6748644bb794" +source = "git+https://github.com/paritytech/polkadot-sdk#59fb2e7482d471a7ec4e8d3b30499497efa7b34c" dependencies = [ "environmental", "parity-scale-codec", @@ -12886,7 +12897,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#177b03958c766fe053f28424ee6f6748644bb794" +source = "git+https://github.com/paritytech/polkadot-sdk#59fb2e7482d471a7ec4e8d3b30499497efa7b34c" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -12923,7 +12934,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#177b03958c766fe053f28424ee6f6748644bb794" +source = "git+https://github.com/paritytech/polkadot-sdk#59fb2e7482d471a7ec4e8d3b30499497efa7b34c" dependencies = [ "Inflector", "expander", @@ -13025,12 +13036,12 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#177b03958c766fe053f28424ee6f6748644bb794" +source = "git+https://github.com/paritytech/polkadot-sdk#59fb2e7482d471a7ec4e8d3b30499497efa7b34c" [[package]] name = "sp-storage" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#177b03958c766fe053f28424ee6f6748644bb794" +source = "git+https://github.com/paritytech/polkadot-sdk#59fb2e7482d471a7ec4e8d3b30499497efa7b34c" dependencies = [ "impl-serde 0.5.0", "parity-scale-codec", @@ -13066,7 +13077,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#177b03958c766fe053f28424ee6f6748644bb794" +source = "git+https://github.com/paritytech/polkadot-sdk#59fb2e7482d471a7ec4e8d3b30499497efa7b34c" dependencies = [ "parity-scale-codec", "regex", @@ -13163,7 +13174,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#177b03958c766fe053f28424ee6f6748644bb794" +source = "git+https://github.com/paritytech/polkadot-sdk#59fb2e7482d471a7ec4e8d3b30499497efa7b34c" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -13253,7 +13264,7 @@ dependencies = [ "crc", "crossbeam-queue", "either", - "event-listener 5.4.0", + "event-listener 5.4.1", "futures-core", "futures-intrusive", "futures-io", @@ -13702,7 +13713,7 @@ dependencies = [ "pallet-evm-precompile-modexp", "pallet-evm-precompile-sha3fips", "pallet-evm-precompile-simple", - "pallet-proxy 38.0.0", + "pallet-proxy 40.1.0", "pallet-subtensor", "pallet-subtensor-swap", "precompile-utils", @@ -14270,9 +14281,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.47.0" +version = "1.47.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35" +checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" dependencies = [ "backtrace", "bytes", @@ -14339,9 +14350,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", @@ -16090,9 +16101,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "bdbb9122ea75b11bf96e7492afb723e8a7fbe12c67417aa95e7e3d18144d37cd" dependencies = [ "yoke", "zerofrom", diff --git a/Cargo.toml b/Cargo.toml index 3415b8624d..2fb9ea9644 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -113,6 +113,7 @@ expander = "2" ahash = { version = "0.8", default-features = false } regex = { version = "1.11.1", default-features = false } +frame = { package = "polkadot-sdk-frame", git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 70232b0553..b4be1f9291 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -1,6 +1,7 @@ ARG BASE_IMAGE=ubuntu:latest FROM $BASE_IMAGE AS builder + SHELL ["/bin/bash", "-c"] # Set noninteractive mode for apt-get @@ -12,28 +13,34 @@ LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ ai.opentensor.image.description="Opentensor Subtensor Blockchain" \ ai.opentensor.image.documentation="https://docs.bittensor.com" -# Set up Rust environment -ENV RUST_BACKTRACE=1 - -RUN sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list -RUN apt-get update -RUN apt-get install -y curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev - -# Copy entire repository +# Copy repo first (you want this *before* RUN to enable layer cache reuse) COPY . /build WORKDIR /build -# Install Rust -RUN set -o pipefail && curl https://sh.rustup.rs -sSf | sh -s -- -y +# Set up env var +ARG BUILT_IN_CI +ARG TARGETARCH + +ENV BUILT_IN_CI=${BUILT_IN_CI} +ENV RUST_BACKTRACE=1 ENV PATH="/root/.cargo/bin:${PATH}" -RUN rustup toolchain install -RUN rustup target add wasm32v1-none + +## Ubdate certificates +RUN apt-get update && apt-get install -y ca-certificates + +# Install requirements +RUN chmod +x ./scripts/install_build_env.sh +RUN ./scripts/install_build_env.sh ## Build fast-runtime node RUN ./scripts/localnet.sh --build-only # Build non-fast-runtime RUN ./scripts/localnet.sh False --build-only +# We will prepare the necessary binaries if they are created in CI +RUN chmod +x ./scripts/install_prebuilt_binaries.sh +RUN ./scripts/install_prebuilt_binaries.sh + # Verify the binaries was produced RUN test -e /build/target/fast-runtime/release/node-subtensor RUN test -e /build/target/non-fast-runtime/release/node-subtensor @@ -56,7 +63,7 @@ RUN chmod +x /scripts/localnet.sh COPY --from=builder /build/target/fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm COPY --from=builder /build/target/non-fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/non-fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm -## Ubdate certificates +# Update certificates for next layer RUN apt-get update && apt-get install -y ca-certificates # Do not build (just run) diff --git a/common/src/lib.rs b/common/src/lib.rs index 82f0916894..a5d09ad974 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -9,7 +9,7 @@ use runtime_common::prod_or_fast; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_runtime::{ - MultiSignature, + MultiSignature, Vec, traits::{IdentifyAccount, Verify}, }; use subtensor_macros::freeze_struct; @@ -175,6 +175,9 @@ pub trait SubnetInfo { fn mechanism(netuid: NetUid) -> u16; fn is_owner(account_id: &AccountId, netuid: NetUid) -> bool; fn is_subtoken_enabled(netuid: NetUid) -> bool; + fn get_validator_trust(netuid: NetUid) -> Vec; + fn get_validator_permit(netuid: NetUid) -> Vec; + fn hotkey_of_uid(netuid: NetUid, uid: u16) -> Option; } pub trait BalanceOps { @@ -224,6 +227,162 @@ pub mod time { pub const DAYS: BlockNumber = HOURS * 24; } +#[freeze_struct("7e5202d7f18b39d4")] +#[repr(transparent)] +#[derive( + Deserialize, + Serialize, + Clone, + Copy, + Decode, + DecodeWithMemTracking, + Default, + Encode, + Eq, + Hash, + MaxEncodedLen, + Ord, + PartialEq, + PartialOrd, + RuntimeDebug, +)] +#[serde(transparent)] +pub struct MechId(u8); + +impl MechId { + pub const MAIN: MechId = Self(0); +} + +impl From for MechId { + fn from(value: u8) -> Self { + Self(value) + } +} + +impl From for u16 { + fn from(val: MechId) -> Self { + u16::from(val.0) + } +} + +impl From for u64 { + fn from(val: MechId) -> Self { + u64::from(val.0) + } +} + +impl From for u8 { + fn from(val: MechId) -> Self { + u8::from(val.0) + } +} + +impl Display for MechId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl CompactAs for MechId { + type As = u8; + + fn encode_as(&self) -> &Self::As { + &self.0 + } + + fn decode_from(v: Self::As) -> Result { + Ok(Self(v)) + } +} + +impl From> for MechId { + fn from(c: Compact) -> Self { + c.0 + } +} + +impl TypeInfo for MechId { + type Identity = ::Identity; + fn type_info() -> scale_info::Type { + ::type_info() + } +} + +#[freeze_struct("2d995c5478e16d4d")] +#[repr(transparent)] +#[derive( + Deserialize, + Serialize, + Clone, + Copy, + Decode, + DecodeWithMemTracking, + Default, + Encode, + Eq, + Hash, + MaxEncodedLen, + Ord, + PartialEq, + PartialOrd, + RuntimeDebug, +)] +#[serde(transparent)] +pub struct NetUidStorageIndex(u16); + +impl NetUidStorageIndex { + pub const ROOT: NetUidStorageIndex = Self(0); +} + +impl Display for NetUidStorageIndex { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl CompactAs for NetUidStorageIndex { + type As = u16; + + fn encode_as(&self) -> &Self::As { + &self.0 + } + + fn decode_from(v: Self::As) -> Result { + Ok(Self(v)) + } +} + +impl From> for NetUidStorageIndex { + fn from(c: Compact) -> Self { + c.0 + } +} + +impl From for NetUidStorageIndex { + fn from(val: NetUid) -> Self { + val.0.into() + } +} + +impl From for u16 { + fn from(val: NetUidStorageIndex) -> Self { + val.0 + } +} + +impl From for NetUidStorageIndex { + fn from(value: u16) -> Self { + Self(value) + } +} + +impl TypeInfo for NetUidStorageIndex { + type Identity = ::Identity; + fn type_info() -> scale_info::Type { + ::type_info() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/evm-tests/README.md b/evm-tests/README.md index 83dc8f326f..ed3782e0f7 100644 --- a/evm-tests/README.md +++ b/evm-tests/README.md @@ -13,6 +13,12 @@ between runtime and precompile contracts. ## polkadot api +You need `polkadot-api` globally installed: + +```bash +$ npm i -g polkadot-api +``` + To get the metadata, you need start the localnet via run `./scripts/localnet.sh`. then run following command to get metadata, a folder name .papi will be created, which include the metadata and type definitions. diff --git a/evm-tests/src/contracts/stakeWrap.sol b/evm-tests/src/contracts/stakeWrap.sol index 78f3e9f999..c7d11d42a0 100644 --- a/evm-tests/src/contracts/stakeWrap.sol +++ b/evm-tests/src/contracts/stakeWrap.sol @@ -15,13 +15,32 @@ interface Staking { ) external; function addStake(bytes32 hotkey, uint256 amount, uint256 netuid) external; + + function removeStake( + bytes32 hotkey, + uint256 amount, + uint256 netuid + ) external; } contract StakeWrap { - constructor() {} + address public owner; + constructor() { + owner = msg.sender; + } + + modifier onlyOwner() { + require(msg.sender == owner, "Only owner can call this function"); + _; + } + receive() external payable {} - function stake(bytes32 hotkey, uint256 netuid, uint256 amount) external { + function stake( + bytes32 hotkey, + uint256 netuid, + uint256 amount + ) external onlyOwner { // can't call precompile like this way, the call never go to runtime precompile //Staking(ISTAKING_ADDRESS).addStake(hotkey, amount, netuid); @@ -41,7 +60,7 @@ contract StakeWrap { uint256 limitPrice, uint256 amount, bool allowPartial - ) external { + ) external onlyOwner { // can't call precompile like this way, the call never go to runtime precompile // Staking(ISTAKING_ADDRESS).addStakeLimit( // hotkey, @@ -62,4 +81,19 @@ contract StakeWrap { (bool success, ) = ISTAKING_ADDRESS.call{gas: gasleft()}(data); require(success, "addStakeLimit call failed"); } + + function removeStake( + bytes32 hotkey, + uint256 netuid, + uint256 amount + ) external onlyOwner { + bytes memory data = abi.encodeWithSelector( + Staking.removeStake.selector, + hotkey, + amount, + netuid + ); + (bool success, ) = ISTAKING_ADDRESS.call{gas: gasleft()}(data); + require(success, "addStake call failed"); + } } diff --git a/evm-tests/src/contracts/stakeWrap.ts b/evm-tests/src/contracts/stakeWrap.ts index 16434addc8..07853470a1 100644 --- a/evm-tests/src/contracts/stakeWrap.ts +++ b/evm-tests/src/contracts/stakeWrap.ts @@ -4,6 +4,42 @@ export const abi = [ "stateMutability": "nonpayable", "type": "constructor" }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "removeStake", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { @@ -59,8 +95,12 @@ export const abi = [ "outputs": [], "stateMutability": "nonpayable", "type": "function" + }, + { + "stateMutability": "payable", + "type": "receive" } ]; // compiled with 0.8.20 -export const bytecode = "608060405234801561000f575f80fd5b5061069e8061001d5f395ff3fe60806040526004361061002c575f3560e01c80632daedd521461003757806390b9d5341461005f57610033565b3661003357005b5f80fd5b348015610042575f80fd5b5061005d60048036038101906100589190610357565b610087565b005b34801561006a575f80fd5b50610085600480360381019061008091906103dc565b6101b7565b005b5f631fc9b14160e01b8483856040516024016100a593929190610471565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a8360405161012d9190610512565b5f604051808303815f8787f1925050503d805f8114610167576040519150601f19603f3d011682016040523d82523d5f602084013e61016c565b606091505b50509050806101b0576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101a790610582565b60405180910390fd5b5050505050565b5f635beb6b7460e01b86848685896040516024016101d99594939291906105af565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516102619190610512565b5f604051808303815f8787f1925050503d805f811461029b576040519150601f19603f3d011682016040523d82523d5f602084013e6102a0565b606091505b50509050806102e4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016102db9061064a565b60405180910390fd5b50505050505050565b5f80fd5b5f819050919050565b610303816102f1565b811461030d575f80fd5b50565b5f8135905061031e816102fa565b92915050565b5f819050919050565b61033681610324565b8114610340575f80fd5b50565b5f813590506103518161032d565b92915050565b5f805f6060848603121561036e5761036d6102ed565b5b5f61037b86828701610310565b935050602061038c86828701610343565b925050604061039d86828701610343565b9150509250925092565b5f8115159050919050565b6103bb816103a7565b81146103c5575f80fd5b50565b5f813590506103d6816103b2565b92915050565b5f805f805f60a086880312156103f5576103f46102ed565b5b5f61040288828901610310565b955050602061041388828901610343565b945050604061042488828901610343565b935050606061043588828901610343565b9250506080610446888289016103c8565b9150509295509295909350565b61045c816102f1565b82525050565b61046b81610324565b82525050565b5f6060820190506104845f830186610453565b6104916020830185610462565b61049e6040830184610462565b949350505050565b5f81519050919050565b5f81905092915050565b5f5b838110156104d75780820151818401526020810190506104bc565b5f8484015250505050565b5f6104ec826104a6565b6104f681856104b0565b93506105068185602086016104ba565b80840191505092915050565b5f61051d82846104e2565b915081905092915050565b5f82825260208201905092915050565b7f6164645374616b652063616c6c206661696c65640000000000000000000000005f82015250565b5f61056c601483610528565b915061057782610538565b602082019050919050565b5f6020820190508181035f83015261059981610560565b9050919050565b6105a9816103a7565b82525050565b5f60a0820190506105c25f830188610453565b6105cf6020830187610462565b6105dc6040830186610462565b6105e960608301856105a0565b6105f66080830184610462565b9695505050505050565b7f6164645374616b654c696d69742063616c6c206661696c6564000000000000005f82015250565b5f610634601983610528565b915061063f82610600565b602082019050919050565b5f6020820190508181035f83015261066181610628565b905091905056fea264697066735822122083351bec20bd75de90a1b6e405922bedadf9ff260c02f34ef9dbb5ee1bda11cd64736f6c63430008140033" +export const bytecode = "6080604052348015600e575f5ffd5b50335f5f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550610ad08061005b5f395ff3fe608060405260043610610042575f3560e01c80632daedd521461004d5780637d691e30146100755780638da5cb5b1461009d57806390b9d534146100c757610049565b3661004957005b5f5ffd5b348015610058575f5ffd5b50610073600480360381019061006e91906106bd565b6100ef565b005b348015610080575f5ffd5b5061009b600480360381019061009691906106bd565b6102ad565b005b3480156100a8575f5ffd5b506100b161046b565b6040516100be919061074c565b60405180910390f35b3480156100d2575f5ffd5b506100ed60048036038101906100e8919061079a565b61048f565b005b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461017d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161017490610891565b60405180910390fd5b5f631fc9b14160e01b84838560405160240161019b939291906108cd565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516102239190610954565b5f604051808303815f8787f1925050503d805f811461025d576040519150601f19603f3d011682016040523d82523d5f602084013e610262565b606091505b50509050806102a6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161029d906109b4565b60405180910390fd5b5050505050565b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461033b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161033290610891565b60405180910390fd5b5f637d691e3060e01b848385604051602401610359939291906108cd565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516103e19190610954565b5f604051808303815f8787f1925050503d805f811461041b576040519150601f19603f3d011682016040523d82523d5f602084013e610420565b606091505b5050905080610464576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161045b906109b4565b60405180910390fd5b5050505050565b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461051d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161051490610891565b60405180910390fd5b5f635beb6b7460e01b868486858960405160240161053f9594939291906109e1565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516105c79190610954565b5f604051808303815f8787f1925050503d805f8114610601576040519150601f19603f3d011682016040523d82523d5f602084013e610606565b606091505b505090508061064a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161064190610a7c565b60405180910390fd5b50505050505050565b5f5ffd5b5f819050919050565b61066981610657565b8114610673575f5ffd5b50565b5f8135905061068481610660565b92915050565b5f819050919050565b61069c8161068a565b81146106a6575f5ffd5b50565b5f813590506106b781610693565b92915050565b5f5f5f606084860312156106d4576106d3610653565b5b5f6106e186828701610676565b93505060206106f2868287016106a9565b9250506040610703868287016106a9565b9150509250925092565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6107368261070d565b9050919050565b6107468161072c565b82525050565b5f60208201905061075f5f83018461073d565b92915050565b5f8115159050919050565b61077981610765565b8114610783575f5ffd5b50565b5f8135905061079481610770565b92915050565b5f5f5f5f5f60a086880312156107b3576107b2610653565b5b5f6107c088828901610676565b95505060206107d1888289016106a9565b94505060406107e2888289016106a9565b93505060606107f3888289016106a9565b925050608061080488828901610786565b9150509295509295909350565b5f82825260208201905092915050565b7f4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f5f8201527f6e00000000000000000000000000000000000000000000000000000000000000602082015250565b5f61087b602183610811565b915061088682610821565b604082019050919050565b5f6020820190508181035f8301526108a88161086f565b9050919050565b6108b881610657565b82525050565b6108c78161068a565b82525050565b5f6060820190506108e05f8301866108af565b6108ed60208301856108be565b6108fa60408301846108be565b949350505050565b5f81519050919050565b5f81905092915050565b8281835e5f83830152505050565b5f61092e82610902565b610938818561090c565b9350610948818560208601610916565b80840191505092915050565b5f61095f8284610924565b915081905092915050565b7f6164645374616b652063616c6c206661696c65640000000000000000000000005f82015250565b5f61099e601483610811565b91506109a98261096a565b602082019050919050565b5f6020820190508181035f8301526109cb81610992565b9050919050565b6109db81610765565b82525050565b5f60a0820190506109f45f8301886108af565b610a0160208301876108be565b610a0e60408301866108be565b610a1b60608301856109d2565b610a2860808301846108be565b9695505050505050565b7f6164645374616b654c696d69742063616c6c206661696c6564000000000000005f82015250565b5f610a66601983610811565b9150610a7182610a32565b602082019050919050565b5f6020820190508181035f830152610a9381610a5a565b905091905056fea2646970667358221220f8ad692d7919fb10f08e5311c64a0aa705a4e665689967633c2ddade5398076664736f6c634300081e0033" diff --git a/evm-tests/src/subtensor.ts b/evm-tests/src/subtensor.ts index 9b351628d0..ab1508e4b4 100644 --- a/evm-tests/src/subtensor.ts +++ b/evm-tests/src/subtensor.ts @@ -7,7 +7,7 @@ import { convertH160ToSS58, convertPublicKeyToSs58, ethAddressToH160 } from './a import { tao } from './balance-math' import internal from "stream"; -// create a new subnet and return netuid +// create a new subnet and return netuid export async function addNewSubnetwork(api: TypedApi, hotkey: KeyPair, coldkey: KeyPair) { const alice = getAliceSigner() const totalNetworks = await api.query.SubtensorModule.TotalNetworks.getValue() @@ -377,4 +377,28 @@ export async function setTargetRegistrationsPerInterval( call: internal_tx.decodedCall, }); await waitForTransactionWithRetry(api, tx, alice); -} \ No newline at end of file +} + +// Disable admin freeze window and owner hyperparam rate limiting for tests +export async function disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api: TypedApi) { + const alice = getAliceSigner() + + const currentAdminFreezeWindow = await api.query.SubtensorModule.AdminFreezeWindow.getValue() + if (currentAdminFreezeWindow !== 0) { + // Set AdminFreezeWindow to 0 + const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) + const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) + await waitForTransactionWithRetry(api, sudoFreezeTx, alice) + } + + const currentOwnerHyperparamRateLimit = await api.query.SubtensorModule.OwnerHyperparamRateLimit.getValue() + if (currentOwnerHyperparamRateLimit !== 0) { + // Set OwnerHyperparamRateLimit to 0 + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ epochs: 0 }) + const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) + await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) + } + + assert.equal(0, await api.query.SubtensorModule.AdminFreezeWindow.getValue()) + assert.equal(BigInt(0), await api.query.SubtensorModule.OwnerHyperparamRateLimit.getValue()) +} diff --git a/evm-tests/test/alpha.precompile.test.ts b/evm-tests/test/alpha.precompile.test.ts index 1ca3c755af..9c1a5daa8e 100644 --- a/evm-tests/test/alpha.precompile.test.ts +++ b/evm-tests/test/alpha.precompile.test.ts @@ -1,15 +1,14 @@ import * as assert from "assert"; -import { getAliceSigner, getDevnetApi, waitForTransactionCompletion, convertPublicKeyToMultiAddress, getRandomSubstrateKeypair, getSignerFromKeypair } from "../src/substrate" +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { getPublicClient } from "../src/utils"; -import { ETH_LOCAL_URL, SUB_LOCAL_URL } from "../src/config"; +import { ETH_LOCAL_URL } from "../src/config"; import { devnet } from "@polkadot-api/descriptors" import { PublicClient } from "viem"; -import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { TypedApi } from "polkadot-api"; import { toViemAddress, convertPublicKeyToSs58 } from "../src/address-utils" import { IAlphaABI, IALPHA_ADDRESS } from "../src/contracts/alpha" -import { u64 } from "@polkadot-api/substrate-bindings"; - +import { forceSetBalanceToSs58Address, addNewSubnetwork, startCall } from "../src/subtensor"; describe("Test Alpha Precompile", () => { // init substrate part const hotkey = getRandomSubstrateKeypair(); @@ -18,9 +17,6 @@ describe("Test Alpha Precompile", () => { let api: TypedApi; - // sudo account alice as signer - let alice: PolkadotSigner; - // init other variable let subnetId = 0; @@ -28,50 +24,13 @@ describe("Test Alpha Precompile", () => { // init variables got from await and async publicClient = await getPublicClient(ETH_LOCAL_URL) api = await getDevnetApi() - alice = await getAliceSigner(); - - // Fund the hotkey account - { - const multiAddress = convertPublicKeyToMultiAddress(hotkey.publicKey) - const internalCall = api.tx.Balances.force_set_balance({ who: multiAddress, new_free: BigInt(1e12) }) - const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - } - - // Fund the coldkey account - { - const multiAddress = convertPublicKeyToMultiAddress(coldkey.publicKey) - const internalCall = api.tx.Balances.force_set_balance({ who: multiAddress, new_free: BigInt(1e12) }) - const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - } - - // Register a new subnet - const signer = getSignerFromKeypair(coldkey) - const registerNetworkTx = api.tx.SubtensorModule.register_network({ hotkey: convertPublicKeyToSs58(hotkey.publicKey) }) - await waitForTransactionCompletion(api, registerNetworkTx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - - // Get the newly created subnet ID - let totalNetworks = await api.query.SubtensorModule.TotalNetworks.getValue() - assert.ok(totalNetworks > 1) - subnetId = totalNetworks - 1 - - // Register a neuron on the subnet if needed - let uid_count = await api.query.SubtensorModule.SubnetworkN.getValue(subnetId) - if (uid_count === 0) { - const tx = api.tx.SubtensorModule.burned_register({ hotkey: convertPublicKeyToSs58(hotkey.publicKey), netuid: subnetId }) - await waitForTransactionCompletion(api, tx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - } + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + + let netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) + }) describe("Alpha Price Functions", () => { diff --git a/evm-tests/test/evm-uid.precompile.lookup.test.ts b/evm-tests/test/evm-uid.precompile.lookup.test.ts index f6e22ce032..6e702d612e 100644 --- a/evm-tests/test/evm-uid.precompile.lookup.test.ts +++ b/evm-tests/test/evm-uid.precompile.lookup.test.ts @@ -55,11 +55,12 @@ describe("Test the UID Lookup precompile", () => { const signature = await evmWallet.signMessage(concatenatedArray); const associateEvmKeyTx = api.tx.SubtensorModule.associate_evm_key({ netuid: netuid, + hotkey: convertPublicKeyToSs58(hotkey.publicKey), evm_key: convertToFixedSizeBinary(evmWallet.address, 20), block_number: BigInt(blockNumber), signature: convertToFixedSizeBinary(signature, 65) }); - const signer = getSignerFromKeypair(hotkey); + const signer = getSignerFromKeypair(coldkey); await waitForTransactionCompletion(api, associateEvmKeyTx, signer) .then(() => { }) .catch((error) => { console.log(`transaction error ${error}`) }); diff --git a/evm-tests/test/neuron.precompile.reveal-weights.test.ts b/evm-tests/test/neuron.precompile.reveal-weights.test.ts index 4ac63468db..99d608585d 100644 --- a/evm-tests/test/neuron.precompile.reveal-weights.test.ts +++ b/evm-tests/test/neuron.precompile.reveal-weights.test.ts @@ -14,6 +14,7 @@ import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, setWeightsSetRateLimit, burnedRegister, setTempo, setCommitRevealWeightsInterval, startCall, + disableAdminFreezeWindowAndOwnerHyperparamRateLimit, } from "../src/subtensor" // hardcode some values for reveal hash @@ -70,6 +71,7 @@ describe("Test neuron precompile reveal weights", () => { await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) + await disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api) await setWeightsSetRateLimit(api, netuid, BigInt(0)) @@ -164,4 +166,4 @@ describe("Test neuron precompile reveal weights", () => { assert.ok(weight[1] !== undefined) } }) -}); \ No newline at end of file +}); diff --git a/evm-tests/test/neuron.precompile.set-weights.test.ts b/evm-tests/test/neuron.precompile.set-weights.test.ts index 1c9f62e773..8ff9258664 100644 --- a/evm-tests/test/neuron.precompile.set-weights.test.ts +++ b/evm-tests/test/neuron.precompile.set-weights.test.ts @@ -10,7 +10,8 @@ import { generateRandomEthersWallet } from "../src/utils" import { forceSetBalanceToSs58Address, forceSetBalanceToEthAddress, addNewSubnetwork, burnedRegister, setCommitRevealWeightsEnabled, setWeightsSetRateLimit, - startCall + startCall, + disableAdminFreezeWindowAndOwnerHyperparamRateLimit } from "../src/subtensor" describe("Test neuron precompile contract, set weights function", () => { @@ -38,6 +39,7 @@ describe("Test neuron precompile contract, set weights function", () => { await burnedRegister(api, netuid, convertH160ToSS58(wallet.address), coldkey) const uid = await api.query.SubtensorModule.Uids.getValue(netuid, convertH160ToSS58(wallet.address)) assert.notEqual(uid, undefined) + await disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api) // disable reveal and enable direct set weights await setCommitRevealWeightsEnabled(api, netuid, false) await setWeightsSetRateLimit(api, netuid, BigInt(0)) @@ -68,4 +70,4 @@ describe("Test neuron precompile contract, set weights function", () => { }); } }) -}); \ No newline at end of file +}); diff --git a/evm-tests/test/runtime.call.precompile.test.ts b/evm-tests/test/runtime.call.precompile.test.ts index 1b96c474a5..dd91ce7f79 100644 --- a/evm-tests/test/runtime.call.precompile.test.ts +++ b/evm-tests/test/runtime.call.precompile.test.ts @@ -1,26 +1,40 @@ import * as assert from "assert"; -import { getAliceSigner, getDevnetApi } from "../src/substrate" +import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { generateRandomEthersWallet, getPublicClient } from "../src/utils"; import { IDISPATCH_ADDRESS, ISTORAGE_QUERY_ADDRESS, ETH_LOCAL_URL } from "../src/config"; import { devnet, MultiAddress } from "@polkadot-api/descriptors" -import { hexToNumber, PublicClient } from "viem"; -import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { PublicClient } from "viem"; +import { PolkadotSigner, TypedApi, getTypedCodecs } from "polkadot-api"; import { convertPublicKeyToSs58 } from "../src/address-utils" -import { forceSetBalanceToEthAddress, setMaxChildkeyTake } from "../src/subtensor"; -import { xxhashAsU8a } from '@polkadot/util-crypto'; -import { u8aToHex } from '@polkadot/util'; +import { forceSetBalanceToEthAddress, setMaxChildkeyTake, burnedRegister, forceSetBalanceToSs58Address, addStake, setTxRateLimit, addNewSubnetwork, startCall, setTempo } from "../src/subtensor"; describe("Test the dispatch precompile", () => { let publicClient: PublicClient; const wallet1 = generateRandomEthersWallet(); let api: TypedApi let alice: PolkadotSigner; + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + let netuid: number; before(async () => { publicClient = await getPublicClient(ETH_LOCAL_URL) api = await getDevnetApi() alice = await getAliceSigner() await forceSetBalanceToEthAddress(api, wallet1.address) + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + + + netuid = await addNewSubnetwork(api, hotkey, coldkey) + // set tempo big enough to avoid stake value updated with fast block feature + await setTempo(api, netuid, 10000) + await startCall(api, netuid, coldkey) + await setTxRateLimit(api, BigInt(0)) + + await burnedRegister(api, netuid, convertPublicKeyToSs58(hotkey.publicKey), coldkey) + await addStake(api, netuid, convertPublicKeyToSs58(hotkey.publicKey), BigInt(1_000_000_000), coldkey) }) it("Dispatch transfer call via precompile contract works correctly", async () => { @@ -49,13 +63,8 @@ describe("Test the dispatch precompile", () => { }) - it("Storage query call via precompile contract works correctly", async () => { - const palletPrefixBytes = xxhashAsU8a("SubtensorModule", 128); - const storageItemPrefixBytes = xxhashAsU8a("MaxChildkeyTake", 128); - const fullStorageKeyBytes = new Uint8Array([...palletPrefixBytes, ...storageItemPrefixBytes]); - // 0x658faa385070e074c85bf6b568cf0555dba018859cab7e989f77669457b394be - // key for max child key take - const fullStorageKeyHex = u8aToHex(fullStorageKeyBytes); + it("Value type storage query call via precompile contract works correctly", async () => { + const key = await api.query.SubtensorModule.MaxChildkeyTake.getKey(); let maxChildkeyTake = 257; await setMaxChildkeyTake(api, maxChildkeyTake) @@ -63,13 +72,64 @@ describe("Test the dispatch precompile", () => { api.query.SubtensorModule.MaxChildkeyTake.getValue(); const rawCallResponse = await publicClient.call({ to: ISTORAGE_QUERY_ADDRESS, - data: fullStorageKeyHex, + data: key.toString() as `0x${string}`, + }) + const rawResultData = rawCallResponse.data ?? ""; + + const codec = await getTypedCodecs(devnet); + const maxChildkeyTakeCodec = codec.query.SubtensorModule.MaxChildkeyTake.value; + const maxChildkeyTakeFromContract = maxChildkeyTakeCodec.dec(rawResultData); + assert.equal(maxChildkeyTakeFromContract, maxChildkeyTake, "value should be 257") + }) + + it("Map type storage query call via precompile contract works correctly", async () => { + + const key = await api.query.SubtensorModule.Tempo.getKey(netuid); + + const tempoOnChain = await api.query.SubtensorModule.Tempo.getValue(netuid); + const rawCallResponse = await publicClient.call({ + to: ISTORAGE_QUERY_ADDRESS, + data: key.toString() as `0x${string}`, }) - const rawResultData = rawCallResponse.data; - if (rawResultData === undefined) { - throw new Error("rawResultData is undefined"); - } - let value = hexToNumber(rawResultData); - assert.equal(value, maxChildkeyTake, "value should be 257") + const rawResultData = rawCallResponse.data ?? ""; + + const codec = await getTypedCodecs(devnet); + const maxChildkeyTakeValueCodec = codec.query.SubtensorModule.Tempo.value; + const decodedValue = maxChildkeyTakeValueCodec.dec(rawResultData); + assert.equal(tempoOnChain, decodedValue, "value should be the same as on chain") + }) + + it("Double map type storage query call via precompile contract works correctly", async () => { + const key = await api.query.SubtensorModule.TotalHotkeyAlpha.getKey(convertPublicKeyToSs58(hotkey.publicKey), netuid); + const totalHotkeyAlphaOnChain = await api.query.SubtensorModule.TotalHotkeyAlpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), netuid); + + const rawCallResponse = await publicClient.call({ + to: ISTORAGE_QUERY_ADDRESS, + data: key.toString() as `0x${string}`, + }) + const rawResultData = rawCallResponse.data ?? ""; + const codec = await getTypedCodecs(devnet); + const totalHotkeyAlphaValueCodec = codec.query.SubtensorModule.TotalHotkeyAlpha.value; + const decodedValue = totalHotkeyAlphaValueCodec.dec(rawResultData); + assert.equal(totalHotkeyAlphaOnChain, decodedValue, "value should be the same as on chain") + }) + + // Polkadot api can't decode the boolean type for now. + // it("Double map type storage query call via precompile contract works correctly", async () => { + // const key = await api.query.SubtensorModule.IsNetworkMember.getKey(convertPublicKeyToSs58(alice.publicKey), netuid); + + // const isNetworkMemberOnChain = await api.query.SubtensorModule.IsNetworkMember.getValue(convertPublicKeyToSs58(alice.publicKey), netuid); + // const rawCallResponse = await publicClient.call({ + // to: ISTORAGE_QUERY_ADDRESS, + // data: key.toString() as `0x${string}`, + // }) + + // const rawResultData = rawCallResponse.data ?? ""; + // const codec = await getTypedCodecs(devnet); + // const isNetworkMemberValueCodec = codec.query.SubtensorModule.IsNetworkMember.value; + // const decodedValue = isNetworkMemberValueCodec.dec(rawResultData); + // assert.equal(isNetworkMemberOnChain, decodedValue, "value should be the same as on chain") + // }) + }); diff --git a/evm-tests/test/staking.precompile.reward.test.ts b/evm-tests/test/staking.precompile.reward.test.ts index 79ad977515..251fb41ea5 100644 --- a/evm-tests/test/staking.precompile.reward.test.ts +++ b/evm-tests/test/staking.precompile.reward.test.ts @@ -8,7 +8,8 @@ import { forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister, setTxRateLimit, setTempo, setWeightsSetRateLimit, setSubnetOwnerCut, setMaxAllowedUids, setMinDelegateTake, setActivityCutoff, addStake, setWeight, rootRegister, - startCall + startCall, + disableAdminFreezeWindowAndOwnerHyperparamRateLimit } from "../src/subtensor" describe("Test neuron precompile reward", () => { @@ -39,6 +40,7 @@ describe("Test neuron precompile reward", () => { await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) + await disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api) await setTxRateLimit(api, BigInt(0)) await setTempo(api, root_netuid, root_tempo) diff --git a/evm-tests/test/staking.precompile.stake-get.test.ts b/evm-tests/test/staking.precompile.stake-get.test.ts index d9cc79aeab..4730e310d9 100644 --- a/evm-tests/test/staking.precompile.stake-get.test.ts +++ b/evm-tests/test/staking.precompile.stake-get.test.ts @@ -45,7 +45,7 @@ describe("Test staking precompile get methods", () => { await contract.getStake(hotkey.publicKey, coldkey.publicKey, netuid) ); - // validator returned as bigint now. + // validator returned as bigint now. const validators = await contract.getAlphaStakedValidators(hotkey.publicKey, netuid) diff --git a/evm-tests/test/staking.precompile.wrap.test.ts b/evm-tests/test/staking.precompile.wrap.test.ts index 01b605beb5..e4d666adf1 100644 --- a/evm-tests/test/staking.precompile.wrap.test.ts +++ b/evm-tests/test/staking.precompile.wrap.test.ts @@ -18,7 +18,6 @@ import { } from "../src/subtensor"; import { ethers } from "ethers"; import { generateRandomEthersWallet } from "../src/utils"; -import { log } from "console"; import { abi, bytecode } from "../src/contracts/stakeWrap"; @@ -47,14 +46,13 @@ describe("Test staking precompile add from deployed contract", () => { console.log("will test in subnet: ", netuid); }); - it("Staker add stake", async () => { + it("Staker add and remove stake", async () => { let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; const contractFactory = new ethers.ContractFactory(abi, bytecode, wallet1) const contract = await contractFactory.deploy() await contract.waitForDeployment() - // stake will remove the balance from contract, need transfer token to deployed contract const ethTransfer = { to: contract.target.toString(), @@ -64,9 +62,6 @@ describe("Test staking precompile add from deployed contract", () => { const txResponse = await wallet1.sendTransaction(ethTransfer) await txResponse.wait(); - const balance = await api.query.System.Account.getValue(convertH160ToSS58(contract.target.toString())) - console.log(" == balance is ", balance.data.free) - const deployedContract = new ethers.Contract( contract.target.toString(), abi, @@ -76,10 +71,17 @@ describe("Test staking precompile add from deployed contract", () => { const tx = await deployedContract.stake( hotkey.publicKey, netuid, - tao(2000), + tao(2), ); await tx.wait(); + const tx2 = await deployedContract.removeStake( + hotkey.publicKey, + netuid, + tao(1), + ); + await tx2.wait(); + }); it("Staker add stake limit", async () => { diff --git a/evm-tests/test/subnet.precompile.hyperparameter.test.ts b/evm-tests/test/subnet.precompile.hyperparameter.test.ts index b8a6f19075..e3b5708e50 100644 --- a/evm-tests/test/subnet.precompile.hyperparameter.test.ts +++ b/evm-tests/test/subnet.precompile.hyperparameter.test.ts @@ -7,7 +7,7 @@ import { convertPublicKeyToSs58 } from "../src/address-utils" import { generateRandomEthersWallet } from "../src/utils"; import { ISubnetABI, ISUBNET_ADDRESS } from "../src/contracts/subnet" import { ethers } from "ethers" -import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address } from "../src/subtensor" +import { disableAdminFreezeWindowAndOwnerHyperparamRateLimit, forceSetBalanceToEthAddress, forceSetBalanceToSs58Address } from "../src/subtensor" describe("Test the Subnet precompile contract", () => { // init eth part @@ -25,6 +25,8 @@ describe("Test the Subnet precompile contract", () => { await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey1.publicKey)) await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey2.publicKey)) await forceSetBalanceToEthAddress(api, wallet.address) + + await disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api) }) it("Can register network without identity info", async () => { diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index c824a879a5..67fba62b10 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -11,6 +11,8 @@ use frame_benchmarking::v1::account; use frame_benchmarking::v2::*; use frame_support::BoundedVec; use frame_system::RawOrigin; +use pallet_subtensor::SubnetworkN; +use subtensor_runtime_common::NetUid; use super::*; @@ -20,6 +22,9 @@ mod benchmarks { #[benchmark] fn swap_authorities(a: Linear<0, 32>) { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); + let mut value: BoundedVec< ::AuthorityId, ::MaxAuthorities, @@ -39,6 +44,8 @@ mod benchmarks { #[benchmark] fn schedule_grandpa_change(a: Linear<0, 32>) { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); let next_authorities = (1..=a) .map(|idx| account("Authority", idx, 0u32)) .collect::>(); @@ -50,18 +57,24 @@ mod benchmarks { #[benchmark] fn sudo_set_default_take() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); #[extrinsic_call] _(RawOrigin::Root, 100u16/*default_take*/)/*sudo_set_default_take*/; } #[benchmark] fn sudo_set_serving_rate_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); #[extrinsic_call] _(RawOrigin::Root, 1u16.into()/*netuid*/, 100u64/*serving_rate_limit*/)/*sudo_set_serving_rate_limit*/; } #[benchmark] fn sudo_set_max_difficulty() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -73,6 +86,8 @@ mod benchmarks { #[benchmark] fn sudo_set_min_difficulty() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -84,6 +99,8 @@ mod benchmarks { #[benchmark] fn sudo_set_weights_set_rate_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -95,6 +112,8 @@ mod benchmarks { #[benchmark] fn sudo_set_weights_version_key() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -106,6 +125,8 @@ mod benchmarks { #[benchmark] fn sudo_set_bonds_moving_average() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -117,6 +138,8 @@ mod benchmarks { #[benchmark] fn sudo_set_bonds_penalty() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -128,6 +151,8 @@ mod benchmarks { #[benchmark] fn sudo_set_max_allowed_validators() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -139,6 +164,8 @@ mod benchmarks { #[benchmark] fn sudo_set_difficulty() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -150,6 +177,8 @@ mod benchmarks { #[benchmark] fn sudo_set_adjustment_interval() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -161,6 +190,8 @@ mod benchmarks { #[benchmark] fn sudo_set_target_registrations_per_interval() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -172,6 +203,8 @@ mod benchmarks { #[benchmark] fn sudo_set_activity_cutoff() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -183,6 +216,8 @@ mod benchmarks { #[benchmark] fn sudo_set_rho() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -194,6 +229,8 @@ mod benchmarks { #[benchmark] fn sudo_set_kappa() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ @@ -203,8 +240,23 @@ mod benchmarks { _(RawOrigin::Root, 1u16.into()/*netuid*/, 3u16/*kappa*/)/*set_kappa*/; } + #[benchmark] + fn sudo_set_min_allowed_uids() { + let netuid = NetUid::from(1); + pallet_subtensor::Pallet::::set_admin_freeze_window(0); + pallet_subtensor::Pallet::::init_new_network(netuid, 1u16 /*tempo*/); + + // Artificially set that some neurons are already registered + SubnetworkN::::set(netuid, 32); + + #[extrinsic_call] + _(RawOrigin::Root, netuid, 16u16/*min_allowed_uids*/)/*sudo_set_min_allowed_uids*/; + } + #[benchmark] fn sudo_set_max_allowed_uids() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -216,6 +268,8 @@ mod benchmarks { #[benchmark] fn sudo_set_min_allowed_weights() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -227,6 +281,8 @@ mod benchmarks { #[benchmark] fn sudo_set_immunity_period() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -238,6 +294,8 @@ mod benchmarks { #[benchmark] fn sudo_set_max_weight_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -249,6 +307,8 @@ mod benchmarks { #[benchmark] fn sudo_set_max_registrations_per_block() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -260,6 +320,8 @@ mod benchmarks { #[benchmark] fn sudo_set_max_burn() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -271,6 +333,8 @@ mod benchmarks { #[benchmark] fn sudo_set_min_burn() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -282,6 +346,8 @@ mod benchmarks { #[benchmark] fn sudo_set_network_registration_allowed() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -304,6 +370,8 @@ mod benchmarks { */ #[benchmark] fn sudo_set_tempo() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -315,6 +383,8 @@ mod benchmarks { #[benchmark] fn sudo_set_commit_reveal_weights_interval() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ @@ -326,6 +396,8 @@ mod benchmarks { #[benchmark] fn sudo_set_commit_reveal_weights_enabled() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ @@ -337,6 +409,8 @@ mod benchmarks { #[benchmark] fn sudo_set_commit_reveal_version() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ @@ -346,8 +420,24 @@ mod benchmarks { _(RawOrigin::Root, 5u16/*version*/)/*sudo_set_commit_reveal_version()*/; } + #[benchmark] + fn sudo_set_admin_freeze_window() { + #[extrinsic_call] + _(RawOrigin::Root, 5u16/*window*/)/*sudo_set_admin_freeze_window*/; + } + + #[benchmark] + fn sudo_set_owner_hparam_rate_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); + #[extrinsic_call] + _(RawOrigin::Root, 2u16/*epochs*/)/*sudo_set_owner_hparam_rate_limit*/; + } + #[benchmark] fn sudo_set_owner_immune_neuron_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ @@ -357,5 +447,17 @@ mod benchmarks { _(RawOrigin::Root, 1u16.into()/*netuid*/, 5u16/*immune_neurons*/)/*sudo_set_owner_immune_neuron_limit()*/; } + #[benchmark] + fn sudo_trim_to_max_allowed_uids() { + pallet_subtensor::Pallet::::set_admin_freeze_window(0); + pallet_subtensor::Pallet::::init_new_network( + 1u16.into(), /*netuid*/ + 1u16, /*sudo_tempo*/ + ); + + #[extrinsic_call] + _(RawOrigin::Root, 1u16.into()/*netuid*/, 256u16/*max_n*/)/*sudo_trim_to_max_allowed_uids()*/; + } + //impl_benchmark_test_suite!(AdminUtils, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 2a785d7c88..61e4b8a49b 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -9,7 +9,7 @@ pub use pallet::*; // - we could use a type parameter for `AuthorityId`, but there is // no sense for this as GRANDPA's `AuthorityId` is not a parameter -- it's always the same use sp_consensus_grandpa::AuthorityList; -use sp_runtime::{DispatchResult, RuntimeAppPublic, traits::Member}; +use sp_runtime::{DispatchResult, RuntimeAppPublic, Vec, traits::Member}; mod benchmarking; @@ -22,16 +22,13 @@ pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_support::traits::tokens::Balance; - use frame_support::{ - dispatch::{DispatchResult, RawOrigin}, - pallet_prelude::StorageMap, - }; + use frame_support::{dispatch::DispatchResult, pallet_prelude::StorageMap}; use frame_system::pallet_prelude::*; use pallet_evm_chain_id::{self, ChainId}; - use pallet_subtensor::utils::rate_limiting::TransactionType; + use pallet_subtensor::utils::rate_limiting::{Hyperparameter, TransactionType}; use sp_runtime::BoundedVec; use substrate_fixed::types::I96F32; - use subtensor_runtime_common::{NetUid, TaoCurrency}; + use subtensor_runtime_common::{MechId, NetUid, TaoCurrency}; /// The main data structure of the module. #[pallet::pallet] @@ -109,6 +106,10 @@ pub mod pallet { NegativeSigmoidSteepness, /// Value not in allowed bounds. ValueNotInBounds, + /// The minimum allowed UIDs must be less than the current number of UIDs in the subnet. + MinAllowedUidsGreaterThanCurrentUids, + /// The minimum allowed UIDs must be less than the maximum allowed UIDs. + MinAllowedUidsGreaterThanMaxAllowedUids, } /// Enum for specifying the type of precompile operation. #[derive( @@ -208,18 +209,26 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the serving rate limit. #[pallet::call_index(3)] - #[pallet::weight(Weight::from_parts(6_522_000, 0) - .saturating_add(::DbWeight::get().reads(0_u64)) + #[pallet::weight(Weight::from_parts(22_980_000, 0) + .saturating_add(::DbWeight::get().reads(2_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_serving_rate_limit( origin: OriginFor, netuid: NetUid, serving_rate_limit: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::ServingRateLimit.into()], + )?; pallet_subtensor::Pallet::::set_serving_rate_limit(netuid, serving_rate_limit); log::debug!("ServingRateLimitSet( serving_rate_limit: {serving_rate_limit:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::ServingRateLimit.into()], + ); Ok(()) } @@ -227,15 +236,15 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum difficulty. #[pallet::call_index(4)] - #[pallet::weight(Weight::from_parts(15_230_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_390_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_difficulty( origin: OriginFor, netuid: NetUid, min_difficulty: u64, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -252,15 +261,19 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the maximum difficulty. #[pallet::call_index(5)] - #[pallet::weight(Weight::from_parts(15_700_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_990_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_difficulty( origin: OriginFor, netuid: NetUid, max_difficulty: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::MaxDifficulty.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -270,6 +283,11 @@ pub mod pallet { log::debug!( "MaxDifficultySet( netuid: {netuid:?} max_difficulty: {max_difficulty:?} ) " ); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::MaxDifficulty.into()], + ); Ok(()) } @@ -277,42 +295,30 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the weights version key. #[pallet::call_index(6)] - #[pallet::weight(Weight::from_parts(17_460_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_220_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_weights_version_key( origin: OriginFor, netuid: NetUid, weights_version_key: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin.clone(), + netuid, + &[TransactionType::SetWeightsVersionKey], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - if let Ok(RawOrigin::Signed(who)) = origin.into() { - // SN Owner - // Ensure the origin passes the rate limit. - ensure!( - pallet_subtensor::Pallet::::passes_rate_limit_on_subnet( - &TransactionType::SetWeightsVersionKey, - &who, - netuid, - ), - pallet_subtensor::Error::::TxRateLimitExceeded - ); - - // Set last transaction block - let current_block = pallet_subtensor::Pallet::::get_current_block_as_u64(); - pallet_subtensor::Pallet::::set_last_transaction_block_on_subnet( - &who, - netuid, - &TransactionType::SetWeightsVersionKey, - current_block, - ); - } + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::SetWeightsVersionKey], + ); pallet_subtensor::Pallet::::set_weights_version_key(netuid, weights_version_key); log::debug!( @@ -388,13 +394,22 @@ pub mod pallet { netuid: NetUid, adjustment_alpha: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::AdjustmentAlpha.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); pallet_subtensor::Pallet::::set_adjustment_alpha(netuid, adjustment_alpha); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::AdjustmentAlpha.into()], + ); log::debug!("AdjustmentAlphaSet( adjustment_alpha: {adjustment_alpha:?} ) "); Ok(()) } @@ -403,21 +418,30 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the adjustment beta. #[pallet::call_index(12)] - #[pallet::weight(Weight::from_parts(15_170_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_890_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_weight_limit( origin: OriginFor, netuid: NetUid, max_weight_limit: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::MaxWeightLimit.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); pallet_subtensor::Pallet::::set_max_weight_limit(netuid, max_weight_limit); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::MaxWeightLimit.into()], + ); log::debug!( "MaxWeightLimitSet( netuid: {netuid:?} max_weight_limit: {max_weight_limit:?} ) " ); @@ -428,21 +452,30 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the immunity period. #[pallet::call_index(13)] - #[pallet::weight(Weight::from_parts(15_510_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_620_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_immunity_period( origin: OriginFor, netuid: NetUid, immunity_period: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::ImmunityPeriod.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); pallet_subtensor::Pallet::::set_immunity_period(netuid, immunity_period); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::ImmunityPeriod.into()], + ); log::debug!( "ImmunityPeriodSet( netuid: {netuid:?} immunity_period: {immunity_period:?} ) " ); @@ -453,15 +486,19 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum allowed weights. #[pallet::call_index(14)] - #[pallet::weight(Weight::from_parts(15_220_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_630_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_allowed_weights( origin: OriginFor, netuid: NetUid, min_allowed_weights: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::MinAllowedWeights.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -471,6 +508,11 @@ pub mod pallet { log::debug!( "MinAllowedWeightSet( netuid: {netuid:?} min_allowed_weights: {min_allowed_weights:?} ) " ); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::MinAllowedWeights.into()], + ); Ok(()) } @@ -506,11 +548,15 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the kappa. #[pallet::call_index(16)] - #[pallet::weight(Weight::from_parts(16_740_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_210_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_kappa(origin: OriginFor, netuid: NetUid, kappa: u16) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::Kappa.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -518,6 +564,11 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_kappa(netuid, kappa); log::debug!("KappaSet( netuid: {netuid:?} kappa: {kappa:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::Kappa.into()], + ); Ok(()) } @@ -525,11 +576,15 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the rho. #[pallet::call_index(17)] - #[pallet::weight(Weight::from_parts(12_570_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(23_360_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_rho(origin: OriginFor, netuid: NetUid, rho: u16) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::Rho.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -537,6 +592,11 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_rho(netuid, rho); log::debug!("RhoSet( netuid: {netuid:?} rho: {rho:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::Rho.into()], + ); Ok(()) } @@ -544,15 +604,19 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the activity cutoff. #[pallet::call_index(18)] - #[pallet::weight(Weight::from_parts(17_510_000, 0) - .saturating_add(::DbWeight::get().reads(2_u64)) + #[pallet::weight(Weight::from_parts(28_720_000, 0) + .saturating_add(::DbWeight::get().reads(4_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_activity_cutoff( origin: OriginFor, netuid: NetUid, activity_cutoff: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::ActivityCutoff.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -568,6 +632,11 @@ pub mod pallet { log::debug!( "ActivityCutoffSet( netuid: {netuid:?} activity_cutoff: {activity_cutoff:?} ) " ); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::ActivityCutoff.into()], + ); Ok(()) } @@ -611,7 +680,11 @@ pub mod pallet { netuid: NetUid, registration_allowed: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::PowRegistrationAllowed.into()], + )?; pallet_subtensor::Pallet::::set_network_pow_registration_allowed( netuid, @@ -620,6 +693,11 @@ pub mod pallet { log::debug!( "NetworkPowRegistrationAllowed( registration_allowed: {registration_allowed:?} ) " ); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::PowRegistrationAllowed.into()], + ); Ok(()) } @@ -627,15 +705,15 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the target registrations per interval. #[pallet::call_index(21)] - #[pallet::weight(Weight::from_parts(15_320_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(25_860_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_target_registrations_per_interval( origin: OriginFor, netuid: NetUid, target_registrations_per_interval: u16, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -655,15 +733,19 @@ pub mod pallet { /// It is only callable by root and subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum burn. #[pallet::call_index(22)] - #[pallet::weight(Weight::from_parts(18_870_000, 0) - .saturating_add(::DbWeight::get().reads(2_u64)) + #[pallet::weight(Weight::from_parts(29_970_000, 0) + .saturating_add(::DbWeight::get().reads(4_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_burn( origin: OriginFor, netuid: NetUid, min_burn: TaoCurrency, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::MinBurn.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist @@ -679,6 +761,11 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_min_burn(netuid, min_burn); log::debug!("MinBurnSet( netuid: {netuid:?} min_burn: {min_burn:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::MinBurn.into()], + ); Ok(()) } @@ -686,15 +773,19 @@ pub mod pallet { /// It is only callable by root and subnet owner. /// The extrinsic will call the Subtensor pallet to set the maximum burn. #[pallet::call_index(23)] - #[pallet::weight(Weight::from_parts(19_420_000, 0) - .saturating_add(::DbWeight::get().reads(2_u64)) + #[pallet::weight(Weight::from_parts(30_510_000, 0) + .saturating_add(::DbWeight::get().reads(4_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_burn( origin: OriginFor, netuid: NetUid, max_burn: TaoCurrency, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::MaxBurn.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist @@ -710,6 +801,11 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_max_burn(netuid, max_burn); log::debug!("MaxBurnSet( netuid: {netuid:?} max_burn: {max_burn:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::MaxBurn.into()], + ); Ok(()) } @@ -717,15 +813,15 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the difficulty. #[pallet::call_index(24)] - #[pallet::weight(Weight::from_parts(15_650_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_230_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_difficulty( origin: OriginFor, netuid: NetUid, difficulty: u64, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist @@ -739,15 +835,15 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum allowed validators. #[pallet::call_index(25)] - #[pallet::weight(Weight::from_parts(19_300_000, 0) - .saturating_add(::DbWeight::get().reads(2_u64)) + #[pallet::weight(Weight::from_parts(30_930_000, 0) + .saturating_add(::DbWeight::get().reads(4_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_allowed_validators( origin: OriginFor, netuid: NetUid, max_allowed_validators: u16, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist @@ -772,17 +868,20 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the bonds moving average. #[pallet::call_index(26)] - #[pallet::weight(Weight::from_parts(15_140_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_270_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_bonds_moving_average( origin: OriginFor, netuid: NetUid, bonds_moving_average: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; - - if pallet_subtensor::Pallet::::ensure_subnet_owner(origin, netuid).is_ok() { + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::BondsMovingAverage.into()], + )?; + if maybe_owner.is_some() { ensure!( bonds_moving_average <= 975000, Error::::BondsMovingAverageMaxReached @@ -797,6 +896,11 @@ pub mod pallet { log::debug!( "BondsMovingAverageSet( netuid: {netuid:?} bonds_moving_average: {bonds_moving_average:?} ) " ); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::BondsMovingAverage.into()], + ); Ok(()) } @@ -804,15 +908,19 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the bonds penalty. #[pallet::call_index(60)] - #[pallet::weight(Weight::from_parts(16_220_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_890_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_bonds_penalty( origin: OriginFor, netuid: NetUid, bonds_penalty: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::BondsPenalty.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -820,6 +928,11 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_bonds_penalty(netuid, bonds_penalty); log::debug!("BondsPenalty( netuid: {netuid:?} bonds_penalty: {bonds_penalty:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::BondsPenalty.into()], + ); Ok(()) } @@ -827,15 +940,15 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum registrations per block. #[pallet::call_index(27)] - #[pallet::weight(Weight::from_parts(15_080_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_970_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_registrations_per_block( origin: OriginFor, netuid: NetUid, max_registrations_per_block: u16, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -895,11 +1008,11 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the tempo. #[pallet::call_index(30)] - #[pallet::weight(Weight::from_parts(15_180_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(25_790_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_tempo(origin: OriginFor, netuid: NetUid, tempo: u16) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist @@ -971,6 +1084,23 @@ pub mod pallet { Ok(()) } + /// The extrinsic sets the subnet limit for the network. + /// It is only callable by the root account. + /// The extrinsic will call the Subtensor pallet to set the subnet limit. + #[pallet::call_index(37)] + #[pallet::weight(( + Weight::from_parts(14_000_000, 0) + .saturating_add(::DbWeight::get().writes(1)), + DispatchClass::Operational, + Pays::No + ))] + pub fn sudo_set_subnet_limit(origin: OriginFor, max_subnets: u16) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_max_subnets(max_subnets); + log::debug!("MaxSubnets ( max_subnets: {max_subnets:?} ) "); + Ok(()) + } + /// The extrinsic sets the lock reduction interval for the network. /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the lock reduction interval. @@ -1079,15 +1209,19 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the value. #[pallet::call_index(49)] - #[pallet::weight(Weight::from_parts(15_150_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_730_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_commit_reveal_weights_enabled( origin: OriginFor, netuid: NetUid, enabled: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::CommitRevealEnabled.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -1096,6 +1230,11 @@ pub mod pallet { pallet_subtensor::Pallet::::set_commit_reveal_weights_enabled(netuid, enabled); log::debug!("ToggleSetWeightsCommitReveal( netuid: {netuid:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::CommitRevealEnabled.into()], + ); Ok(()) } @@ -1115,9 +1254,18 @@ pub mod pallet { netuid: NetUid, enabled: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::LiquidAlphaEnabled.into()], + )?; pallet_subtensor::Pallet::::set_liquid_alpha_enabled(netuid, enabled); log::debug!("LiquidAlphaEnableToggled( netuid: {netuid:?}, Enabled: {enabled:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::LiquidAlphaEnabled.into()], + ); Ok(()) } @@ -1130,10 +1278,22 @@ pub mod pallet { alpha_low: u16, alpha_high: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; - pallet_subtensor::Pallet::::do_set_alpha_values( + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin.clone(), + netuid, + &[Hyperparameter::AlphaValues.into()], + )?; + let res = pallet_subtensor::Pallet::::do_set_alpha_values( origin, netuid, alpha_low, alpha_high, - ) + ); + if res.is_ok() { + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::AlphaValues.into()], + ); + } + res } /// Sets the duration of the coldkey swap schedule. @@ -1217,15 +1377,19 @@ pub mod pallet { /// # Weight /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(57)] - #[pallet::weight(Weight::from_parts(19_320_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_950_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_commit_reveal_weights_interval( origin: OriginFor, netuid: NetUid, interval: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::WeightCommitInterval.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -1235,6 +1399,11 @@ pub mod pallet { log::debug!("SetWeightCommitInterval( netuid: {netuid:?}, interval: {interval:?} ) "); pallet_subtensor::Pallet::::set_reveal_period(netuid, interval)?; + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::WeightCommitInterval.into()], + ); Ok(()) } @@ -1308,8 +1477,52 @@ pub mod pallet { netuid: NetUid, toggle: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - pallet_subtensor::Pallet::::toggle_transfer(netuid, toggle) + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::TransferEnabled.into()], + )?; + let res = pallet_subtensor::Pallet::::toggle_transfer(netuid, toggle); + if res.is_ok() { + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::TransferEnabled.into()], + ); + } + res + } + + /// Set the behaviour of the "burn" UID(s) for a given subnet. + /// If set to `Burn`, the miner emission sent to the burn UID(s) will be burned. + /// If set to `Recycle`, the miner emission sent to the burn UID(s) will be recycled. + /// + /// # Parameters + /// - `origin`: The origin of the call, which must be the root account or subnet owner. + /// - `netuid`: The unique identifier for the subnet. + /// - `recycle_or_burn`: The desired behaviour of the "burn" UID(s) for the subnet. + /// + #[pallet::call_index(80)] + #[pallet::weight((1_000_000, DispatchClass::Normal, Pays::Yes))] // TODO: add proper weights + pub fn sudo_set_recycle_or_burn( + origin: OriginFor, + netuid: NetUid, + recycle_or_burn: pallet_subtensor::RecycleOrBurnEnum, + ) -> DispatchResult { + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::RecycleOrBurn.into()], + )?; + + pallet_subtensor::Pallet::::set_recycle_or_burn(netuid, recycle_or_burn); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::RecycleOrBurn.into()], + ); + + Ok(()) } /// Toggles the enablement of an EVM precompile. @@ -1438,7 +1651,11 @@ pub mod pallet { netuid: NetUid, steepness: i16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin.clone(), + netuid, + &[Hyperparameter::AlphaSigmoidSteepness.into()], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -1454,6 +1671,11 @@ pub mod pallet { pallet_subtensor::Pallet::::set_alpha_sigmoid_steepness(netuid, steepness); log::debug!("AlphaSigmoidSteepnessSet( netuid: {netuid:?}, steepness: {steepness:?} )"); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::AlphaSigmoidSteepness.into()], + ); Ok(()) } @@ -1473,11 +1695,20 @@ pub mod pallet { netuid: NetUid, enabled: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::Yuma3Enabled.into()], + )?; pallet_subtensor::Pallet::::set_yuma3_enabled(netuid, enabled); Self::deposit_event(Event::Yuma3EnableToggled { netuid, enabled }); log::debug!("Yuma3EnableToggled( netuid: {netuid:?}, Enabled: {enabled:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::Yuma3Enabled.into()], + ); Ok(()) } @@ -1497,11 +1728,20 @@ pub mod pallet { netuid: NetUid, enabled: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::BondsResetEnabled.into()], + )?; pallet_subtensor::Pallet::::set_bonds_reset(netuid, enabled); Self::deposit_event(Event::BondsResetToggled { netuid, enabled }); log::debug!("BondsResetToggled( netuid: {netuid:?} bonds_reset: {enabled:?} ) "); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::BondsResetEnabled.into()], + ); Ok(()) } @@ -1564,7 +1804,7 @@ pub mod pallet { netuid: NetUid, subtoken_enabled: bool, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; pallet_subtensor::SubtokenEnabled::::set(netuid, subtoken_enabled); log::debug!( @@ -1593,16 +1833,25 @@ pub mod pallet { /// Sets the number of immune owner neurons #[pallet::call_index(72)] - #[pallet::weight(Weight::from_parts(4_639_000, 0) - .saturating_add(::DbWeight::get().reads(0_u64)) + #[pallet::weight(Weight::from_parts(18_020_000, 0) + .saturating_add(::DbWeight::get().reads(2_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_owner_immune_neuron_limit( origin: OriginFor, netuid: NetUid, immune_neurons: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::ImmuneNeuronLimit.into()], + )?; pallet_subtensor::Pallet::::set_owner_immune_neuron_limit(netuid, immune_neurons)?; + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::ImmuneNeuronLimit.into()], + ); Ok(()) } @@ -1611,14 +1860,165 @@ pub mod pallet { /// The extrinsic will call the Subtensor pallet to set the childkey burn. #[pallet::call_index(73)] #[pallet::weight(Weight::from_parts(15_650_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) - .saturating_add(::DbWeight::get().writes(1_u64)))] + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_ck_burn(origin: OriginFor, burn: u64) -> DispatchResult { ensure_root(origin)?; pallet_subtensor::Pallet::::set_ck_burn(burn); log::debug!("CKBurnSet( burn: {burn:?} ) "); Ok(()) } + + /// Sets the admin freeze window length (in blocks) at the end of a tempo. + /// Only callable by root. + #[pallet::call_index(74)] + #[pallet::weight(( + Weight::from_parts(5_771_000, 0) + .saturating_add(::DbWeight::get().reads(0_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)), + DispatchClass::Operational + ))] + pub fn sudo_set_admin_freeze_window(origin: OriginFor, window: u16) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_admin_freeze_window(window); + log::debug!("AdminFreezeWindowSet( window: {window:?} ) "); + Ok(()) + } + + /// Sets the owner hyperparameter rate limit in epochs (global multiplier). + /// Only callable by root. + #[pallet::call_index(75)] + #[pallet::weight(( + Weight::from_parts(5_701_000, 0) + .saturating_add(::DbWeight::get().reads(0_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)), + DispatchClass::Operational + ))] + pub fn sudo_set_owner_hparam_rate_limit( + origin: OriginFor, + epochs: u16, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_owner_hyperparam_rate_limit(epochs); + log::debug!("OwnerHyperparamRateLimitSet( epochs: {epochs:?} ) "); + Ok(()) + } + + /// Sets the desired number of mechanisms in a subnet + #[pallet::call_index(76)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_mechanism_count( + origin: OriginFor, + netuid: NetUid, + mechanism_count: MechId, + ) -> DispatchResult { + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::MechanismCountUpdate], + )?; + + pallet_subtensor::Pallet::::do_set_mechanism_count(netuid, mechanism_count)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::MechanismCountUpdate], + ); + Ok(()) + } + + /// Sets the emission split between mechanisms in a subnet + #[pallet::call_index(77)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_mechanism_emission_split( + origin: OriginFor, + netuid: NetUid, + maybe_split: Option>, + ) -> DispatchResult { + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::MechanismEmission], + )?; + + pallet_subtensor::Pallet::::do_set_emission_split(netuid, maybe_split)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::MechanismEmission], + ); + Ok(()) + } + + /// Trims the maximum number of UIDs for a subnet. + /// + /// The trimming is done by sorting the UIDs by emission descending and then trimming + /// the lowest emitters while preserving temporally and owner immune UIDs. The UIDs are + /// then compressed to the left and storage is migrated to the new compressed UIDs. + #[pallet::call_index(78)] + #[pallet::weight(Weight::from_parts(32_880_000, 0) + .saturating_add(::DbWeight::get().reads(6_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_trim_to_max_allowed_uids( + origin: OriginFor, + netuid: NetUid, + max_n: u16, + ) -> DispatchResult { + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin.clone(), + netuid, + &[TransactionType::MaxUidsTrimming], + )?; + + pallet_subtensor::Pallet::::trim_to_max_allowed_uids(netuid, max_n)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::MaxUidsTrimming], + ); + Ok(()) + } + + /// The extrinsic sets the minimum allowed UIDs for a subnet. + /// It is only callable by the root account. + #[pallet::call_index(79)] + #[pallet::weight(Weight::from_parts(31_550_000, 0) + .saturating_add(::DbWeight::get().reads(5_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_min_allowed_uids( + origin: OriginFor, + netuid: NetUid, + min_allowed_uids: u16, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; + + ensure!( + pallet_subtensor::Pallet::::if_subnet_exist(netuid), + Error::::SubnetDoesNotExist + ); + ensure!( + min_allowed_uids < pallet_subtensor::Pallet::::get_max_allowed_uids(netuid), + Error::::MinAllowedUidsGreaterThanMaxAllowedUids + ); + ensure!( + min_allowed_uids < pallet_subtensor::Pallet::::get_subnetwork_n(netuid), + Error::::MinAllowedUidsGreaterThanCurrentUids + ); + + pallet_subtensor::Pallet::::set_min_allowed_uids(netuid, min_allowed_uids); + + log::debug!( + "MinAllowedUidsSet( netuid: {netuid:?} min_allowed_uids: {min_allowed_uids:?} ) " + ); + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 7bba7929dd..2a8c559c23 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -13,7 +13,7 @@ use sp_consensus_grandpa::AuthorityList as GrandpaAuthorityList; use sp_core::U256; use sp_core::{ConstU64, H256}; use sp_runtime::{ - BuildStorage, KeyTypeId, Perbill, + BuildStorage, KeyTypeId, Perbill, Percent, testing::TestXt, traits::{BlakeTwo256, ConstU32, IdentityLookup}, }; @@ -91,7 +91,8 @@ parameter_types! { pub const InitialTempo: u16 = 0; pub const SelfOwnership: u64 = 2; pub const InitialImmunityPeriod: u16 = 2; - pub const InitialMaxAllowedUids: u16 = 2; + pub const InitialMinAllowedUids: u16 = 2; + pub const InitialMaxAllowedUids: u16 = 4; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty: u16 = u16::MAX; pub const InitialBondsResetOn: bool = false; @@ -128,8 +129,7 @@ parameter_types! { pub const InitialMaxDifficulty: u64 = u64::MAX; pub const InitialRAORecycledForRegistration: u64 = 0; pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake - pub const InitialNetworkImmunityPeriod: u64 = 7200 * 7; - pub const InitialNetworkMinAllowedUids: u16 = 128; + pub const InitialNetworkImmunityPeriod: u64 = 1_296_000; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. pub const InitialNetworkLockReductionInterval: u64 = 2; // 2 blocks. @@ -151,6 +151,7 @@ parameter_types! { pub const InitialKeySwapOnSubnetCost: u64 = 10_000_000; pub const HotkeySwapOnSubnetInterval: u64 = 7 * 24 * 60 * 60 / 12; // 7 days pub const LeaseDividendsDistributionInterval: u32 = 100; // 100 blocks + pub const MaxImmuneUidsPercentage: Percent = Percent::from_percent(80); } impl pallet_subtensor::Config for Test { @@ -174,6 +175,7 @@ impl pallet_subtensor::Config for Test { type InitialRho = InitialRho; type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; + type InitialMinAllowedUids = InitialMinAllowedUids; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; type InitialScalingLawPower = InitialScalingLawPower; @@ -205,7 +207,6 @@ impl pallet_subtensor::Config for Test { type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; type InitialSenateRequiredStakePercentage = InitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; - type InitialNetworkMinAllowedUids = InitialNetworkMinAllowedUids; type InitialNetworkMinLockCost = InitialNetworkMinLockCost; type InitialSubnetOwnerCut = InitialSubnetOwnerCut; type InitialNetworkLockReductionInterval = InitialNetworkLockReductionInterval; @@ -228,6 +229,8 @@ impl pallet_subtensor::Config for Test { type ProxyInterface = (); type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; type GetCommitments = (); + type MaxImmuneUidsPercentage = MaxImmuneUidsPercentage; + type CommitmentsInterface = CommitmentsI; } parameter_types! { @@ -354,6 +357,11 @@ impl PrivilegeCmp for OriginPrivilegeCmp { } } +pub struct CommitmentsI; +impl pallet_subtensor::CommitmentsInterface for CommitmentsI { + fn purge_netuid(_netuid: NetUid) {} +} + pub struct GrandpaInterfaceImpl; impl crate::GrandpaInterface for GrandpaInterfaceImpl { fn schedule_change( @@ -481,7 +489,10 @@ pub fn new_test_ext() -> sp_io::TestExternalities { .build_storage() .unwrap(); let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); + ext.execute_with(|| { + System::set_block_number(1); + SubtensorModule::set_admin_freeze_window(1); + }); ext } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index ef36bb7856..b6cafb71b7 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -5,13 +5,16 @@ use frame_support::{ traits::Hooks, }; use frame_system::Config; -use pallet_subtensor::{Error as SubtensorError, SubnetOwner, Tempo, WeightsVersionKeyRateLimit}; +use pallet_subtensor::{ + Error as SubtensorError, MaxRegistrationsPerBlock, Rank, SubnetOwner, + TargetRegistrationsPerInterval, Tempo, WeightsVersionKeyRateLimit, *, +}; // use pallet_subtensor::{migrations, Event}; -use pallet_subtensor::Event; +use pallet_subtensor::{Event, utils::rate_limiting::TransactionType}; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{Get, Pair, U256, ed25519}; use substrate_fixed::types::I96F32; -use subtensor_runtime_common::{Currency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{Currency, MechId, NetUid, TaoCurrency}; use crate::Error; use crate::pallet::PrecompileEnable; @@ -175,7 +178,7 @@ fn test_sudo_set_weights_version_key_rate_limit() { SubnetOwner::::insert(netuid, sn_owner); let rate_limit = WeightsVersionKeyRateLimit::::get(); - let tempo: u16 = Tempo::::get(netuid); + let tempo = Tempo::::get(netuid); let rate_limit_period = rate_limit * (tempo as u64); @@ -188,11 +191,10 @@ fn test_sudo_set_weights_version_key_rate_limit() { // Try to set again with // Assert rate limit not passed - assert!(!SubtensorModule::passes_rate_limit_on_subnet( - &pallet_subtensor::utils::rate_limiting::TransactionType::SetWeightsVersionKey, - &sn_owner, - netuid - )); + assert!( + !TransactionType::SetWeightsVersionKey + .passes_rate_limit_on_subnet::(&sn_owner, netuid) + ); // Try transaction assert_noop!( @@ -205,12 +207,11 @@ fn test_sudo_set_weights_version_key_rate_limit() { ); // Wait for rate limit to pass - run_to_block(rate_limit_period + 2); - assert!(SubtensorModule::passes_rate_limit_on_subnet( - &pallet_subtensor::utils::rate_limiting::TransactionType::SetWeightsVersionKey, - &sn_owner, - netuid - )); + run_to_block(rate_limit_period + 1); + assert!( + TransactionType::SetWeightsVersionKey + .passes_rate_limit_on_subnet::(&sn_owner, netuid) + ); // Try transaction assert_ok!(AdminUtils::sudo_set_weights_version_key( @@ -827,7 +828,7 @@ fn test_sudo_set_bonds_moving_average() { let netuid = NetUid::from(1); let to_be_set: u64 = 10; add_network(netuid, 10); - let init_value: u64 = SubtensorModule::get_bonds_moving_average(netuid); + let init_value: u64 = SubtensorModule::get_bonds_moving_average(netuid.into()); assert_eq!( AdminUtils::sudo_set_bonds_moving_average( <::RuntimeOrigin>::signed(U256::from(1)), @@ -845,7 +846,7 @@ fn test_sudo_set_bonds_moving_average() { Err(Error::::SubnetDoesNotExist.into()) ); assert_eq!( - SubtensorModule::get_bonds_moving_average(netuid), + SubtensorModule::get_bonds_moving_average(netuid.into()), init_value ); assert_ok!(AdminUtils::sudo_set_bonds_moving_average( @@ -853,7 +854,10 @@ fn test_sudo_set_bonds_moving_average() { netuid, to_be_set )); - assert_eq!(SubtensorModule::get_bonds_moving_average(netuid), to_be_set); + assert_eq!( + SubtensorModule::get_bonds_moving_average(netuid.into()), + to_be_set + ); }); } @@ -1952,6 +1956,80 @@ fn test_sudo_set_commit_reveal_version() { }); } +#[test] +fn test_sudo_set_admin_freeze_window_and_rate() { + new_test_ext().execute_with(|| { + // Non-root fails + assert_eq!( + AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::signed(U256::from(1)), + 7 + ), + Err(DispatchError::BadOrigin) + ); + // Root succeeds + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::root(), + 7 + )); + assert_eq!(pallet_subtensor::AdminFreezeWindow::::get(), 7); + + // Owner hyperparam tempos setter + assert_eq!( + AdminUtils::sudo_set_owner_hparam_rate_limit( + <::RuntimeOrigin>::signed(U256::from(1)), + 5 + ), + Err(DispatchError::BadOrigin) + ); + assert_ok!(AdminUtils::sudo_set_owner_hparam_rate_limit( + <::RuntimeOrigin>::root(), + 5 + )); + assert_eq!(pallet_subtensor::OwnerHyperparamRateLimit::::get(), 5); + }); +} + +#[test] +fn test_freeze_window_blocks_root_and_owner() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let tempo = 10; + // Create subnet with tempo 10 + add_network(netuid, tempo); + // Set freeze window to 3 blocks + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::root(), + 3 + )); + // Advance to a block where remaining < 3 + run_to_block((tempo - 2).into()); + + // Root should be blocked during freeze window + assert_noop!( + AdminUtils::sudo_set_min_burn( + <::RuntimeOrigin>::root(), + netuid, + 123.into() + ), + SubtensorError::::AdminActionProhibitedDuringWeightsWindow + ); + + // Owner should be blocked during freeze window as well + // Set owner + let owner: U256 = U256::from(9); + SubnetOwner::::insert(netuid, owner); + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 77 + ), + SubtensorError::::AdminActionProhibitedDuringWeightsWindow + ); + }); +} + #[test] fn test_sudo_set_min_burn() { new_test_ext().execute_with(|| { @@ -2011,6 +2089,179 @@ fn test_sudo_set_min_burn() { }); } +#[test] +fn test_owner_hyperparam_update_rate_limit_enforced() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + add_network(netuid, 10); + // Set owner + let owner: U256 = U256::from(5); + SubnetOwner::::insert(netuid, owner); + + // Set tempo to 1 so owner hyperparam RL = 2 tempos = 2 blocks + SubtensorModule::set_tempo(netuid, 1); + // Disable admin freeze window to avoid blocking on small tempo + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::root(), + 0 + )); + + // First update succeeds + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 11 + )); + // Immediate second update fails due to TxRateLimitExceeded + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 12 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // Advance less than limit still fails + run_to_block(SubtensorModule::get_current_block_as_u64() + 1); + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 13 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // Advance one more block to pass the limit; should succeed + run_to_block(SubtensorModule::get_current_block_as_u64() + 1); + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 14 + )); + }); +} + +// Verifies that owner hyperparameter rate limit is enforced based on tempo (2 tempos). +#[test] +fn test_hyperparam_rate_limit_enforced_by_tempo() { + new_test_ext().execute_with(|| { + // Setup subnet and owner + let netuid = NetUid::from(42); + add_network(netuid, 10); + let owner: U256 = U256::from(77); + SubnetOwner::::insert(netuid, owner); + + // Set tempo to 1 so RL = 2 blocks + SubtensorModule::set_tempo(netuid, 1); + // Disable admin freeze window to avoid blocking on small tempo + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::root(), + 0 + )); + + // First owner update should succeed + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 1 + )); + + // Immediate second update should fail due to tempo-based RL + assert_noop!( + AdminUtils::sudo_set_kappa(<::RuntimeOrigin>::signed(owner), netuid, 2), + SubtensorError::::TxRateLimitExceeded + ); + + // Advance 2 blocks (2 tempos with tempo=1) then succeed + run_to_block(SubtensorModule::get_current_block_as_u64() + 2); + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 3 + )); + }); +} + +// Verifies owner hyperparameters are rate-limited independently per parameter. +// Setting one hyperparameter should not block setting a different hyperparameter +// during the same rate-limit window, but it should still block itself. +#[test] +fn test_owner_hyperparam_rate_limit_independent_per_param() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(7); + add_network(netuid, 10); + + // Set subnet owner + let owner: U256 = U256::from(123); + SubnetOwner::::insert(netuid, owner); + + // Use small tempo to make RL short and deterministic (2 blocks when tempo=1) + SubtensorModule::set_tempo(netuid, 1); + // Disable admin freeze window so it doesn't interfere with small tempo + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::root(), + 0 + )); + + // First update to kappa should succeed + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 10 + )); + + // Immediate second update to the SAME param (kappa) should be blocked by RL + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 11 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // Updating a DIFFERENT param (rho) should pass immediately — independent RL key + assert_ok!(AdminUtils::sudo_set_rho( + <::RuntimeOrigin>::signed(owner), + netuid, + 5 + )); + + // kappa should still be blocked until its own RL window passes + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 12 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // rho should also be blocked for itself immediately after being set + assert_noop!( + AdminUtils::sudo_set_rho(<::RuntimeOrigin>::signed(owner), netuid, 6), + SubtensorError::::TxRateLimitExceeded + ); + + // Advance enough blocks to pass the RL window (2 blocks when tempo=1 and default epochs=2) + run_to_block(SubtensorModule::get_current_block_as_u64() + 2); + + // Now both hyperparameters can be updated again + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 13 + )); + assert_ok!(AdminUtils::sudo_set_rho( + <::RuntimeOrigin>::signed(owner), + netuid, + 7 + )); + }); +} + #[test] fn test_sudo_set_max_burn() { new_test_ext().execute_with(|| { @@ -2069,3 +2320,504 @@ fn test_sudo_set_max_burn() { ); }); } + +#[test] +fn test_sudo_set_mechanism_count() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let ss_count_ok = MaxMechanismCount::::get(); + let ss_count_bad = MechId::from(u8::from(ss_count_ok) + 1); + + let sn_owner = U256::from(1324); + add_network(netuid, 10); + // Set the Subnet Owner + SubnetOwner::::insert(netuid, sn_owner); + + assert_eq!( + AdminUtils::sudo_set_mechanism_count( + <::RuntimeOrigin>::signed(U256::from(1)), + netuid, + ss_count_ok + ), + Err(DispatchError::BadOrigin) + ); + assert_noop!( + AdminUtils::sudo_set_mechanism_count(RuntimeOrigin::root(), netuid, ss_count_bad), + pallet_subtensor::Error::::InvalidValue + ); + + assert_ok!(AdminUtils::sudo_set_mechanism_count( + <::RuntimeOrigin>::root(), + netuid, + ss_count_ok + )); + + assert_ok!(AdminUtils::sudo_set_mechanism_count( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + ss_count_ok + )); + }); +} + +// cargo test --package pallet-admin-utils --lib -- tests::test_sudo_set_mechanism_count_and_emissions --exact --show-output +#[test] +fn test_sudo_set_mechanism_count_and_emissions() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let ss_count_ok = MechId::from(2); + + let sn_owner = U256::from(1324); + add_network(netuid, 10); + // Set the Subnet Owner + SubnetOwner::::insert(netuid, sn_owner); + + assert_ok!(AdminUtils::sudo_set_mechanism_count( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + ss_count_ok + )); + + // Cannot set emission split with wrong number of entries + // With two mechanisms the size of the split vector should be 2, not 3 + assert_noop!( + AdminUtils::sudo_set_mechanism_emission_split( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + Some(vec![0xFFFF / 5 * 2, 0xFFFF / 5 * 2, 0xFFFF / 5]) + ), + pallet_subtensor::Error::::InvalidValue + ); + + // Cannot set emission split with wrong total of entries + // Split vector entries should sum up to exactly 0xFFFF + assert_noop!( + AdminUtils::sudo_set_mechanism_emission_split( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + Some(vec![0xFFFF / 5 * 4, 0xFFFF / 5 - 1]) + ), + pallet_subtensor::Error::::InvalidValue + ); + + // Can set good split ok + // We also verify here that it can happen in the same block as setting mechanism counts + // or soon, without rate limiting + assert_ok!(AdminUtils::sudo_set_mechanism_emission_split( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + Some(vec![0xFFFF / 5, 0xFFFF / 5 * 4]) + )); + + // Cannot set it again due to rate limits + assert_noop!( + AdminUtils::sudo_set_mechanism_emission_split( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + Some(vec![0xFFFF / 5 * 4, 0xFFFF / 5]) + ), + pallet_subtensor::Error::::TxRateLimitExceeded + ); + }); +} + +#[test] +fn test_trim_to_max_allowed_uids() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let sn_owner = U256::from(1); + let sn_owner_hotkey1 = U256::from(2); + let sn_owner_hotkey2 = U256::from(3); + add_network(netuid, 10); + SubnetOwner::::insert(netuid, sn_owner); + SubnetOwnerHotkey::::insert(netuid, sn_owner_hotkey1); + MaxRegistrationsPerBlock::::insert(netuid, 256); + TargetRegistrationsPerInterval::::insert(netuid, 256); + ImmuneOwnerUidsLimit::::insert(netuid, 2); + // We set a low value here to make testing easier + MinAllowedUids::::set(netuid, 4); + // We define 4 mechanisms + let mechanism_count = MechId::from(4); + MechanismCountCurrent::::insert(netuid, mechanism_count); + + // Add some neurons + let max_n = 16; + for i in 1..=max_n { + let n = i * 1000; + register_ok_neuron(netuid, U256::from(n), U256::from(n + i), 0); + } + + // Run some block to ensure stake weights are set and that we are past the immunity period + // for all neurons + run_to_block((ImmunityPeriod::::get(netuid) + 1).into()); + + // Set some randomized values that we can keep track of + let values = vec![ + 17u16, 42u16, 8u16, 56u16, 23u16, 91u16, 34u16, // owner owned + 77u16, // temporally immune + 12u16, 65u16, 3u16, 88u16, // owner owned + 29u16, 51u16, 74u16, // temporally immune + 39u16, + ]; + let bool_values = vec![ + false, false, false, true, false, true, true, // owner owned + true, // temporally immune + false, true, false, true, // owner owned + false, true, true, // temporally immune + false, + ]; + let alpha_values = values.iter().map(|&v| (v as u64).into()).collect(); + let u64_values: Vec = values.iter().map(|&v| v as u64).collect(); + + Emission::::set(netuid, alpha_values); + Rank::::insert(netuid, values.clone()); + Trust::::insert(netuid, values.clone()); + Consensus::::insert(netuid, values.clone()); + Dividends::::insert(netuid, values.clone()); + PruningScores::::insert(netuid, values.clone()); + ValidatorTrust::::insert(netuid, values.clone()); + StakeWeight::::insert(netuid, values.clone()); + ValidatorPermit::::insert(netuid, bool_values.clone()); + Active::::insert(netuid, bool_values); + + for mecid in 0..mechanism_count.into() { + let netuid_index = + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); + Incentive::::insert(netuid_index, values.clone()); + LastUpdate::::insert(netuid_index, u64_values.clone()); + } + + // We set some owner immune uids + let now = frame_system::Pallet::::block_number(); + BlockAtRegistration::::set(netuid, 6, now); + BlockAtRegistration::::set(netuid, 11, now); + + // And some temporally immune uids + Keys::::insert(netuid, 7, sn_owner_hotkey1); + Uids::::insert(netuid, sn_owner_hotkey1, 7); + Keys::::insert(netuid, 14, sn_owner_hotkey2); + Uids::::insert(netuid, sn_owner_hotkey2, 14); + + // Populate Weights and Bonds storage items to test trimming + // Create weights and bonds that span across the range that will be trimmed + for uid in 0..max_n { + let mut weights = Vec::new(); + let mut bonds = Vec::new(); + + // Add connections to all other uids, including those that will be trimmed + for target_uid in 0..max_n { + if target_uid != uid { + // Use some non-zero values to make the test more meaningful + let weight_value = (uid + target_uid) % 1000; + let bond_value = (uid * target_uid) % 1000; + weights.push((target_uid, weight_value)); + bonds.push((target_uid, bond_value)); + } + } + + for mecid in 0..mechanism_count.into() { + let netuid_index = + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); + Weights::::insert(netuid_index, uid, weights.clone()); + Bonds::::insert(netuid_index, uid, bonds.clone()); + } + } + + // Normal case + let new_max_n = 8; + assert_ok!(AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + new_max_n + )); + + // Ensure the max allowed uids has been set correctly + assert_eq!(MaxAllowedUids::::get(netuid), new_max_n); + + // Ensure the emission has been trimmed correctly, keeping the highest emitters + // and immune and compressed to the left + assert_eq!( + Emission::::get(netuid), + vec![ + 56.into(), + 91.into(), + 34.into(), + 77.into(), + 65.into(), + 88.into(), + 51.into(), + 74.into() + ] + ); + // Ensure rest of storage has been trimmed correctly + let expected_values = vec![56, 91, 34, 77, 65, 88, 51, 74]; + let expected_bools = vec![true, true, true, true, true, true, true, true]; + let expected_u64_values = vec![56, 91, 34, 77, 65, 88, 51, 74]; + assert_eq!(Rank::::get(netuid), expected_values); + assert_eq!(Trust::::get(netuid), expected_values); + assert_eq!(Active::::get(netuid), expected_bools); + assert_eq!(Consensus::::get(netuid), expected_values); + assert_eq!(Dividends::::get(netuid), expected_values); + assert_eq!(PruningScores::::get(netuid), expected_values); + assert_eq!(ValidatorTrust::::get(netuid), expected_values); + assert_eq!(ValidatorPermit::::get(netuid), expected_bools); + assert_eq!(StakeWeight::::get(netuid), expected_values); + + for mecid in 0..mechanism_count.into() { + let netuid_index = + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); + assert_eq!(Incentive::::get(netuid_index), expected_values); + assert_eq!(LastUpdate::::get(netuid_index), expected_u64_values); + } + + // Ensure trimmed uids related storage has been cleared + for uid in new_max_n..max_n { + assert!(!Keys::::contains_key(netuid, uid)); + assert!(!BlockAtRegistration::::contains_key(netuid, uid)); + for mecid in 0..mechanism_count.into() { + let netuid_index = + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); + assert!(!Weights::::contains_key(netuid_index, uid)); + assert!(!Bonds::::contains_key(netuid_index, uid)); + } + } + + // Ensure trimmed uids hotkey related storage has been cleared + let trimmed_hotkeys = vec![ + U256::from(1000), + U256::from(2000), + U256::from(3000), + U256::from(5000), + U256::from(9000), + U256::from(11000), + U256::from(13000), + U256::from(16000), + ]; + for hotkey in trimmed_hotkeys { + assert!(!Uids::::contains_key(netuid, hotkey)); + assert!(!IsNetworkMember::::contains_key(hotkey, netuid)); + assert!(!LastHotkeyEmissionOnNetuid::::contains_key( + hotkey, netuid + )); + assert!(!AlphaDividendsPerSubnet::::contains_key( + netuid, hotkey + )); + assert!(!TaoDividendsPerSubnet::::contains_key(netuid, hotkey)); + assert!(!Axons::::contains_key(netuid, hotkey)); + assert!(!NeuronCertificates::::contains_key(netuid, hotkey)); + assert!(!Prometheus::::contains_key(netuid, hotkey)); + } + + // Ensure trimmed uids weights and bonds connections have been trimmed correctly + for uid in 0..new_max_n { + for mecid in 0..mechanism_count.into() { + let netuid_index = + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); + assert!( + Weights::::get(netuid_index, uid) + .iter() + .all(|(target_uid, _)| *target_uid < new_max_n), + "Found a weight with target_uid >= new_max_n" + ); + assert!( + Bonds::::get(netuid_index, uid) + .iter() + .all(|(target_uid, _)| *target_uid < new_max_n), + "Found a bond with target_uid >= new_max_n" + ); + } + } + + // Actual number of neurons on the network updated after trimming + assert_eq!(SubnetworkN::::get(netuid), new_max_n); + + // Non existent subnet + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + NetUid::from(42), + new_max_n + ), + pallet_subtensor::Error::::SubnetNotExists + ); + + // New max n less than lower bound + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + 2 + ), + pallet_subtensor::Error::::InvalidValue + ); + + // New max n greater than upper bound + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + SubtensorModule::get_max_allowed_uids(netuid) + 1 + ), + pallet_subtensor::Error::::InvalidValue + ); + }); +} + +#[test] +fn test_trim_to_max_allowed_uids_too_many_immune() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let sn_owner = U256::from(1); + add_network(netuid, 10); + SubnetOwner::::insert(netuid, sn_owner); + MaxRegistrationsPerBlock::::insert(netuid, 256); + TargetRegistrationsPerInterval::::insert(netuid, 256); + ImmuneOwnerUidsLimit::::insert(netuid, 2); + MinAllowedUids::::set(netuid, 4); + + // Add 5 neurons + let max_n = 5; + for i in 1..=max_n { + let n = i * 1000; + register_ok_neuron(netuid, U256::from(n), U256::from(n + i), 0); + } + + // Run some blocks to ensure stake weights are set + run_to_block((ImmunityPeriod::::get(netuid) + 1).into()); + + // Set owner immune uids (2 UIDs) by adding them to OwnedHotkeys + let owner_hotkey1 = U256::from(1000); + let owner_hotkey2 = U256::from(2000); + OwnedHotkeys::::insert(sn_owner, vec![owner_hotkey1, owner_hotkey2]); + Keys::::insert(netuid, 0, owner_hotkey1); + Uids::::insert(netuid, owner_hotkey1, 0); + Keys::::insert(netuid, 1, owner_hotkey2); + Uids::::insert(netuid, owner_hotkey2, 1); + + // Set temporally immune uids (2 UIDs) to make total immune count 4 out of 5 (80%) + // Set their registration block to current block to make them temporally immune + let current_block = frame_system::Pallet::::block_number(); + for uid in 2..4 { + let hotkey = U256::from(uid * 1000 + 1000); + Keys::::insert(netuid, uid, hotkey); + Uids::::insert(netuid, hotkey, uid); + BlockAtRegistration::::insert(netuid, uid, current_block); + } + + // Try to trim to 4 UIDs - this should fail because 4/4 = 100% immune (>= 80%) + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + 4 + ), + pallet_subtensor::Error::::InvalidValue + ); + + // Try to trim to 3 UIDs - this should also fail because 4/3 > 80% immune (>= 80%) + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + 3 + ), + pallet_subtensor::Error::::InvalidValue + ); + + // Now test a scenario where trimming should succeed + // Remove one immune UID to make it 3 immune out of 4 total + let uid_to_remove = 3; + let hotkey_to_remove = U256::from(uid_to_remove * 1000 + 1000); + #[allow(unknown_lints)] + Keys::::remove(netuid, uid_to_remove); + Uids::::remove(netuid, hotkey_to_remove); + BlockAtRegistration::::remove(netuid, uid_to_remove); + + // Now we have 3 immune out of 4 total UIDs + // Try to trim to 3 UIDs - this should succeed because 3/3 = 100% immune, but that's exactly 80% + // Wait, 100% is > 80%, so this should fail. Let me test with a scenario where we have fewer immune UIDs + + // Remove another immune UID to make it 2 immune out of 3 total + let uid_to_remove2 = 2; + let hotkey_to_remove2 = U256::from(uid_to_remove2 * 1000 + 1000); + #[allow(unknown_lints)] + Keys::::remove(netuid, uid_to_remove2); + Uids::::remove(netuid, hotkey_to_remove2); + BlockAtRegistration::::remove(netuid, uid_to_remove2); + + // Now we have 2 immune out of 2 total UIDs + // Try to trim to 1 UID - this should fail because 2/1 is impossible, but the check prevents it + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + 1 + ), + pallet_subtensor::Error::::InvalidValue + ); + }); +} + +#[test] +fn test_sudo_set_min_allowed_uids() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let to_be_set: u16 = 8; + add_network(netuid, 10); + MaxRegistrationsPerBlock::::insert(netuid, 256); + TargetRegistrationsPerInterval::::insert(netuid, 256); + + // Register some neurons + for i in 0..=16 { + register_ok_neuron(netuid, U256::from(i * 1000), U256::from(i * 1000 + i), 0); + } + + // Normal case + assert_ok!(AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + to_be_set + )); + assert_eq!(SubtensorModule::get_min_allowed_uids(netuid), to_be_set); + + // Non root + assert_err!( + AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::signed(U256::from(0)), + netuid, + to_be_set + ), + DispatchError::BadOrigin + ); + + // Non existent subnet + assert_err!( + AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::root(), + NetUid::from(42), + to_be_set + ), + Error::::SubnetDoesNotExist + ); + + // Min allowed uids greater than max allowed uids + assert_err!( + AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + SubtensorModule::get_max_allowed_uids(netuid) + 1 + ), + Error::::MinAllowedUidsGreaterThanMaxAllowedUids + ); + + // Min allowed uids greater than current uids + assert_err!( + AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + SubtensorModule::get_subnetwork_n(netuid) + 1 + ), + Error::::MinAllowedUidsGreaterThanCurrentUids + ); + }); +} diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 6d0d826ab4..5fa37bf5e1 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -566,6 +566,18 @@ impl Pallet { .collect(); commitments } + + pub fn purge_netuid(netuid: NetUid) { + let _ = CommitmentOf::::clear_prefix(netuid, u32::MAX, None); + let _ = LastCommitment::::clear_prefix(netuid, u32::MAX, None); + let _ = LastBondsReset::::clear_prefix(netuid, u32::MAX, None); + let _ = RevealedCommitments::::clear_prefix(netuid, u32::MAX, None); + let _ = UsedSpaceOf::::clear_prefix(netuid, u32::MAX, None); + + TimelockedIndex::::mutate(|index| { + index.retain(|(n, _)| *n != netuid); + }); + } } pub trait GetCommitments { diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 6866ebdeec..5f19070ea2 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -4,8 +4,9 @@ use subtensor_runtime_common::NetUid; #[cfg(test)] use crate::{ - BalanceOf, CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, - Registration, RevealedCommitments, TimelockedIndex, UsedSpaceOf, + BalanceOf, CommitmentInfo, CommitmentOf, Config, Data, Error, Event, LastBondsReset, + LastCommitment, MaxSpace, Pallet, Registration, RevealedCommitments, TimelockedIndex, + UsageTracker, UsedSpaceOf, mock::{ Balances, DRAND_QUICKNET_SIG_2000_HEX, DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, TestMaxFields, insert_drand_pulse, new_test_ext, produce_ciphertext, @@ -2185,3 +2186,119 @@ fn mixed_timelocked_and_raw_fields_works() { ); }); } + +#[test] +fn purge_netuid_clears_only_that_netuid() { + new_test_ext().execute_with(|| { + // Setup + System::::set_block_number(1); + + let net_a = NetUid::from(42); + let net_b = NetUid::from(43); + let who_a1: u64 = 1001; + let who_a2: u64 = 1002; + let who_b: u64 = 2001; + + // Minimal commitment payload + let empty_fields: BoundedVec::MaxFields> = BoundedVec::default(); + let info_empty: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: empty_fields, + }; + let bn = System::::block_number(); + + // Seed NET A with two accounts across all tracked storages + let reg_a1 = Registration { + deposit: Default::default(), + block: bn, + info: info_empty.clone(), + }; + let reg_a2 = Registration { + deposit: Default::default(), + block: bn, + info: info_empty.clone(), + }; + CommitmentOf::::insert(net_a, who_a1, reg_a1); + CommitmentOf::::insert(net_a, who_a2, reg_a2); + LastCommitment::::insert(net_a, who_a1, bn); + LastCommitment::::insert(net_a, who_a2, bn); + LastBondsReset::::insert(net_a, who_a1, bn); + RevealedCommitments::::insert(net_a, who_a1, vec![(b"a".to_vec(), 7u64)]); + UsedSpaceOf::::insert( + net_a, + who_a1, + UsageTracker { + last_epoch: 1, + used_space: 123, + }, + ); + + // Seed NET B with one account that must remain intact + let reg_b = Registration { + deposit: Default::default(), + block: bn, + info: info_empty, + }; + CommitmentOf::::insert(net_b, who_b, reg_b); + LastCommitment::::insert(net_b, who_b, bn); + LastBondsReset::::insert(net_b, who_b, bn); + RevealedCommitments::::insert(net_b, who_b, vec![(b"b".to_vec(), 8u64)]); + UsedSpaceOf::::insert( + net_b, + who_b, + UsageTracker { + last_epoch: 9, + used_space: 999, + }, + ); + + // Timelocked index contains both nets + TimelockedIndex::::mutate(|idx| { + idx.insert((net_a, who_a1)); + idx.insert((net_a, who_a2)); + idx.insert((net_b, who_b)); + }); + + // Sanity pre-checks + assert!(CommitmentOf::::get(net_a, who_a1).is_some()); + assert!(CommitmentOf::::get(net_b, who_b).is_some()); + assert!(TimelockedIndex::::get().contains(&(net_a, who_a1))); + + // Act + Pallet::::purge_netuid(net_a); + + // NET A: everything cleared + assert_eq!(CommitmentOf::::iter_prefix(net_a).count(), 0); + assert!(CommitmentOf::::get(net_a, who_a1).is_none()); + assert!(CommitmentOf::::get(net_a, who_a2).is_none()); + + assert_eq!(LastCommitment::::iter_prefix(net_a).count(), 0); + assert!(LastCommitment::::get(net_a, who_a1).is_none()); + assert!(LastCommitment::::get(net_a, who_a2).is_none()); + + assert_eq!(LastBondsReset::::iter_prefix(net_a).count(), 0); + assert!(LastBondsReset::::get(net_a, who_a1).is_none()); + + assert_eq!(RevealedCommitments::::iter_prefix(net_a).count(), 0); + assert!(RevealedCommitments::::get(net_a, who_a1).is_none()); + + assert_eq!(UsedSpaceOf::::iter_prefix(net_a).count(), 0); + assert!(UsedSpaceOf::::get(net_a, who_a1).is_none()); + + let idx_after = TimelockedIndex::::get(); + assert!(!idx_after.contains(&(net_a, who_a1))); + assert!(!idx_after.contains(&(net_a, who_a2))); + + // NET B: untouched + assert!(CommitmentOf::::get(net_b, who_b).is_some()); + assert!(LastCommitment::::get(net_b, who_b).is_some()); + assert!(LastBondsReset::::get(net_b, who_b).is_some()); + assert!(RevealedCommitments::::get(net_b, who_b).is_some()); + assert!(UsedSpaceOf::::get(net_b, who_b).is_some()); + assert!(idx_after.contains(&(net_b, who_b))); + + // Idempotency + Pallet::::purge_netuid(net_a); + assert_eq!(CommitmentOf::::iter_prefix(net_a).count(), 0); + assert!(!TimelockedIndex::::get().contains(&(net_a, who_a1))); + }); +} diff --git a/pallets/proxy/Cargo.toml b/pallets/proxy/Cargo.toml index 4f5dddfed1..ffca8ad8a7 100644 --- a/pallets/proxy/Cargo.toml +++ b/pallets/proxy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-proxy" -version = "38.0.0" +version = "40.1.0" authors = ["Bittensor Nucleus Team"] edition.workspace = true license = "Apache-2.0" @@ -15,43 +15,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { workspace = true, features = ["derive", "max-encoded-len"] } +codec = { workspace = true, features = ["max-encoded-len"] } +frame = { workspace = true, features = ["runtime"] } scale-info = { workspace = true, features = ["derive"] } -frame-benchmarking = { workspace = true, optional = true } -frame-support.workspace = true -frame-system.workspace = true -sp-io.workspace = true -sp-runtime.workspace = true subtensor-macros.workspace = true [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +pallet-balances = { default-features = true, workspace = true } +pallet-utility = { default-features = true, workspace = true } [features] default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", + "frame/std", "scale-info/std", - "sp-io/std", - "sp-runtime/std", ] runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", + "frame/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-utility/runtime-benchmarks", ] try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "sp-runtime/try-runtime", + "frame/try-runtime", "pallet-balances/try-runtime", "pallet-utility/try-runtime", ] diff --git a/pallets/proxy/src/benchmarking.rs b/pallets/proxy/src/benchmarking.rs index 0e0d89f03e..ff881912d1 100644 --- a/pallets/proxy/src/benchmarking.rs +++ b/pallets/proxy/src/benchmarking.rs @@ -1,13 +1,13 @@ // This file is part of Substrate. -// + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -// + // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0/ +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,13 +18,14 @@ // Benchmarks for Proxy Pallet #![cfg(feature = "runtime-benchmarks")] +#![allow(clippy::arithmetic_side_effects)] use super::*; use crate::Pallet as Proxy; use alloc::{boxed::Box, vec}; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; -use frame_system::{RawOrigin, pallet_prelude::BlockNumberFor}; -use sp_runtime::traits::{Bounded, CheckedDiv}; +use frame::benchmarking::prelude::{ + BenchmarkError, RawOrigin, account, benchmarks, impl_test_function, whitelisted_caller, +}; const SEED: u32 = 0; @@ -32,15 +33,13 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn half_max_balance() -> BalanceOf { - BalanceOf::::max_value() - .checked_div(&BalanceOf::::from(2_u32)) - .unwrap_or_else(BalanceOf::::max_value) +fn assert_has_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_has_event(generic_event.into()); } fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(whitelisted_caller); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); for i in 0..n { let real = T::Lookup::unlookup(account("target", i, SEED)); @@ -61,12 +60,12 @@ fn add_announcements( ) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); let caller_lookup = T::Lookup::unlookup(caller.clone()); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); let real = if let Some(real) = maybe_real { real } else { let real = account("real", 0, SEED); - T::Currency::make_free_balance_be(&real, half_max_balance::()); + T::Currency::make_free_balance_be(&real, BalanceOf::::max_value() / 2u32.into()); Proxy::::add_proxy( RawOrigin::Signed(real.clone()).into(), caller_lookup, @@ -86,157 +85,256 @@ fn add_announcements( Ok(()) } -benchmarks! { - proxy { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; - let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn proxy(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); - }: _(RawOrigin::Signed(caller), real_lookup, Some(T::ProxyType::default()), Box::new(call)) - verify { - assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller), + real_lookup, + Some(T::ProxyType::default()), + Box::new(call), + ); + + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()); + + Ok(()) } - proxy_announced { - let a in 0 .. T::MaxPending::get().saturating_sub(1); - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn proxy_announced( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy let caller: T::AccountId = account("pure", 0, SEED); - let delegate: T::AccountId = account("target", p.saturating_sub(1), SEED); + let delegate: T::AccountId = account("target", p - 1, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); - T::Currency::make_free_balance_be(&delegate, half_max_balance::()); + T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(delegate.clone()), None)?; - }: _(RawOrigin::Signed(caller), delegate_lookup, real_lookup, Some(T::ProxyType::default()), Box::new(call)) - verify { - assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) + + #[extrinsic_call] + _( + RawOrigin::Signed(caller), + delegate_lookup, + real_lookup, + Some(T::ProxyType::default()), + Box::new(call), + ); + + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()); + + Ok(()) } - remove_announcement { - let a in 0 .. T::MaxPending::get().saturating_sub(1); - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; - let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + #[benchmark] + fn remove_announcement( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(caller.clone()), real_lookup, T::CallHasher::hash_of(&call)) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + real_lookup, + T::CallHasher::hash_of(&call), + ); + let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); + + Ok(()) } - reject_announcement { - let a in 0 .. T::MaxPending::get().saturating_sub(1); - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; - let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); + #[benchmark] + fn reject_announcement( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); let caller_lookup = T::Lookup::unlookup(caller.clone()); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real.clone()); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real_lookup, T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(real), caller_lookup, T::CallHasher::hash_of(&call)) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(real), + caller_lookup, + T::CallHasher::hash_of(&call), + ); + let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); + + Ok(()) } - announce { - let a in 0 .. T::MaxPending::get().saturating_sub(1); - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; - let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + #[benchmark] + fn announce( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real.clone()); add_announcements::(a, Some(caller.clone()), None)?; - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); let call_hash = T::CallHasher::hash_of(&call); - }: _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash) - verify { - assert_last_event::(Event::Announced { real, proxy: caller, call_hash }.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash); + + assert_last_event::( + Event::Announced { + real, + proxy: caller, + call_hash, + } + .into(), + ); + + Ok(()) } - add_proxy { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn add_proxy(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); let real = T::Lookup::unlookup(account("target", T::MaxProxies::get(), SEED)); - }: _( - RawOrigin::Signed(caller.clone()), - real, - T::ProxyType::default(), - BlockNumberFor::::zero() - ) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + real, + T::ProxyType::default(), + BlockNumberFor::::zero(), + ); + let (proxies, _) = Proxies::::get(caller); - assert_eq!(proxies.len() as u32, p.saturating_add(1)); + assert_eq!(proxies.len() as u32, p + 1); + + Ok(()) } - remove_proxy { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn remove_proxy(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); let delegate = T::Lookup::unlookup(account("target", 0, SEED)); - }: _( - RawOrigin::Signed(caller.clone()), - delegate, - T::ProxyType::default(), - BlockNumberFor::::zero() - ) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + delegate, + T::ProxyType::default(), + BlockNumberFor::::zero(), + ); + let (proxies, _) = Proxies::::get(caller); - assert_eq!(proxies.len() as u32, p.saturating_sub(1)); + assert_eq!(proxies.len() as u32, p - 1); + + Ok(()) } - remove_proxies { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn remove_proxies(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + let (proxies, _) = Proxies::::get(caller); assert_eq!(proxies.len() as u32, 0); + + Ok(()) } - create_pure { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn create_pure(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); - }: _( - RawOrigin::Signed(caller.clone()), - T::ProxyType::default(), - BlockNumberFor::::zero(), - 0 - ) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + 0, + ); + let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); - assert_last_event::(Event::PureCreated { - pure: pure_account, - who: caller, - proxy_type: T::ProxyType::default(), - disambiguation_index: 0, - }.into()); - } + assert_last_event::( + Event::PureCreated { + pure: pure_account, + who: caller, + proxy_type: T::ProxyType::default(), + disambiguation_index: 0, + } + .into(), + ); - kill_pure { - let p in 0 .. (T::MaxProxies::get().saturating_sub(2)); + Ok(()) + } + #[benchmark] + fn kill_pure(p: Linear<0, { T::MaxProxies::get() - 2 }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -244,17 +342,149 @@ benchmarks! { RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), BlockNumberFor::::zero(), - 0 + 0, )?; - let height = system::Pallet::::block_number(); - let ext_index = system::Pallet::::extrinsic_index().unwrap_or(0); + let height = T::BlockNumberProvider::current_block_number(); + let ext_index = frame_system::Pallet::::extrinsic_index().unwrap_or(0); let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); add_proxies::(p, Some(pure_account.clone()))?; - ensure!(Proxies::::contains_key(&pure_account), "pure proxy not created"); - }: _(RawOrigin::Signed(pure_account.clone()), caller_lookup, T::ProxyType::default(), 0, height, ext_index) - verify { + ensure!( + Proxies::::contains_key(&pure_account), + "pure proxy not created" + ); + + #[extrinsic_call] + _( + RawOrigin::Signed(pure_account.clone()), + caller_lookup, + T::ProxyType::default(), + 0, + height, + ext_index, + ); + assert!(!Proxies::::contains_key(&pure_account)); + + Ok(()) + } + + #[benchmark] + fn poke_deposit() -> Result<(), BenchmarkError> { + // Create accounts using the same pattern as other benchmarks + let account_1: T::AccountId = account("account", 1, SEED); + let account_2: T::AccountId = account("account", 2, SEED); + let account_3: T::AccountId = account("account", 3, SEED); + + // Fund accounts + T::Currency::make_free_balance_be(&account_1, BalanceOf::::max_value() / 100u8.into()); + T::Currency::make_free_balance_be(&account_2, BalanceOf::::max_value() / 100u8.into()); + T::Currency::make_free_balance_be(&account_3, BalanceOf::::max_value() / 100u8.into()); + + // Add proxy relationships + Proxy::::add_proxy( + RawOrigin::Signed(account_1.clone()).into(), + T::Lookup::unlookup(account_2.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + )?; + Proxy::::add_proxy( + RawOrigin::Signed(account_2.clone()).into(), + T::Lookup::unlookup(account_3.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + )?; + let (proxies, initial_proxy_deposit) = Proxies::::get(&account_2); + assert!(!initial_proxy_deposit.is_zero()); + assert_eq!( + initial_proxy_deposit, + T::Currency::reserved_balance(&account_2) + ); + + // Create announcement + Proxy::::announce( + RawOrigin::Signed(account_2.clone()).into(), + T::Lookup::unlookup(account_1.clone()), + T::CallHasher::hash_of(&("add_announcement", 1)), + )?; + let (announcements, initial_announcement_deposit) = Announcements::::get(&account_2); + assert!(!initial_announcement_deposit.is_zero()); + assert_eq!( + initial_announcement_deposit.saturating_add(initial_proxy_deposit), + T::Currency::reserved_balance(&account_2) + ); + + // Artificially inflate deposits and reserve the extra amount + let extra_proxy_deposit = initial_proxy_deposit; // Double the deposit + let extra_announcement_deposit = initial_announcement_deposit; // Double the deposit + let total = extra_proxy_deposit.saturating_add(extra_announcement_deposit); + + T::Currency::reserve(&account_2, total)?; + + let initial_reserved = T::Currency::reserved_balance(&account_2); + assert_eq!(initial_reserved, total.saturating_add(total)); // Double + + // Update storage with increased deposits + Proxies::::insert( + &account_2, + ( + proxies, + initial_proxy_deposit.saturating_add(extra_proxy_deposit), + ), + ); + Announcements::::insert( + &account_2, + ( + announcements, + initial_announcement_deposit.saturating_add(extra_announcement_deposit), + ), + ); + + // Verify artificial state + let (_, inflated_proxy_deposit) = Proxies::::get(&account_2); + let (_, inflated_announcement_deposit) = Announcements::::get(&account_2); + assert_eq!( + inflated_proxy_deposit, + initial_proxy_deposit.saturating_add(extra_proxy_deposit) + ); + assert_eq!( + inflated_announcement_deposit, + initial_announcement_deposit.saturating_add(extra_announcement_deposit) + ); + + #[extrinsic_call] + _(RawOrigin::Signed(account_2.clone())); + + // Verify results + let (_, final_proxy_deposit) = Proxies::::get(&account_2); + let (_, final_announcement_deposit) = Announcements::::get(&account_2); + assert_eq!(final_proxy_deposit, initial_proxy_deposit); + assert_eq!(final_announcement_deposit, initial_announcement_deposit); + + let final_reserved = T::Currency::reserved_balance(&account_2); + assert_eq!(final_reserved, initial_reserved.saturating_sub(total)); + + // Verify events + assert_has_event::( + Event::DepositPoked { + who: account_2.clone(), + kind: DepositKind::Proxies, + old_deposit: inflated_proxy_deposit, + new_deposit: final_proxy_deposit, + } + .into(), + ); + assert_last_event::( + Event::DepositPoked { + who: account_2, + kind: DepositKind::Announcements, + old_deposit: inflated_announcement_deposit, + new_deposit: final_announcement_deposit, + } + .into(), + ); + + Ok(()) } impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/pallets/proxy/src/lib.rs b/pallets/proxy/src/lib.rs index 9a7aab857a..a4325bd099 100644 --- a/pallets/proxy/src/lib.rs +++ b/pallets/proxy/src/lib.rs @@ -1,13 +1,13 @@ // This file is part of Substrate. -// + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -// + // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0/ +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -34,24 +34,12 @@ mod tests; pub mod weights; extern crate alloc; - use alloc::{boxed::Box, vec}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::pallet_prelude::{Pays, Weight}; -use frame_support::{ - BoundedVec, - dispatch::GetDispatchInfo, - ensure, - traits::{Currency, Get, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency}, +use frame::{ + prelude::*, + traits::{Currency, InstanceFilter, ReservableCurrency}, }; -use frame_system::{self as system, ensure_signed, pallet_prelude::BlockNumberFor}; pub use pallet::*; -use scale_info::{TypeInfo, prelude::cmp::Ordering}; -use sp_io::hashing::blake2_256; -use sp_runtime::{ - DispatchError, DispatchResult, RuntimeDebug, - traits::{Dispatchable, Hash, Saturating, StaticLookup, TrailingZeroInput, Zero}, -}; use subtensor_macros::freeze_struct; pub use weights::WeightInfo; @@ -60,6 +48,9 @@ type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// The parameters under which a particular account has a proxy relationship with some other @@ -100,11 +91,29 @@ pub struct Announcement { height: BlockNumber, } -#[frame_support::pallet] +/// The type of deposit +#[derive( + Encode, + Decode, + Clone, + Copy, + Eq, + PartialEq, + RuntimeDebug, + MaxEncodedLen, + TypeInfo, + DecodeWithMemTracking, +)] +pub enum DepositKind { + /// Proxy registration deposit + Proxies, + /// Announcement deposit + Announcements, +} + +#[frame::pallet] pub mod pallet { - use super::{DispatchResult, *}; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; + use super::*; #[pallet::pallet] pub struct Pallet(_); @@ -134,7 +143,7 @@ pub mod pallet { + Member + Ord + PartialOrd - + InstanceFilter<::RuntimeCall> + + frame::traits::InstanceFilter<::RuntimeCall> + Default + MaxEncodedLen; @@ -180,6 +189,30 @@ pub mod pallet { /// into a pre-existing storage value. #[pallet::constant] type AnnouncementDepositFactor: Get>; + + /// Query the current block number. + /// + /// Must return monotonically increasing values when called from consecutive blocks. + /// Can be configured to return either: + /// - the local block number of the runtime via `frame_system::Pallet` + /// - a remote block number, eg from the relay chain through `RelaychainDataProvider` + /// - an arbitrary value through a custom implementation of the trait + /// + /// There is currently no migration provided to "hot-swap" block number providers and it may + /// result in undefined behavior when doing so. Parachains are therefore best off setting + /// this to their local block number provider if they have the pallet already deployed. + /// + /// Suggested values: + /// - Solo- and Relay-chains: `frame_system::Pallet` + /// - Parachains that may produce blocks sparingly or only when needed (on-demand): + /// - already have the pallet deployed: `frame_system::Pallet` + /// - are freshly deploying this pallet: `RelaychainDataProvider` + /// - Parachains with a reliably block production rate (PLO or bulk-coretime): + /// - already have the pallet deployed: `frame_system::Pallet` + /// - are freshly deploying this pallet: no strong recommendation. Both local and remote + /// providers can be used. Relay provider can be a bit better in cases where the + /// parachain is lagging its block production to avoid clock skew. + type BlockNumberProvider: BlockNumberProvider; } #[pallet::call] @@ -196,13 +229,11 @@ pub mod pallet { #[pallet::call_index(0)] #[pallet::weight({ let di = call.get_dispatch_info(); - let inner_call_weight = match di.pays_fee { - Pays::Yes => di.call_weight, - Pays::No => Weight::zero(), - }; - let base_weight = T::WeightInfo::proxy(T::MaxProxies::get()) - .saturating_add(T::DbWeight::get().reads_writes(1, 1)); - (base_weight.saturating_add(inner_call_weight), di.class) + (T::WeightInfo::proxy(T::MaxProxies::get()) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(di.call_weight), + di.class, di.pays_fee) })] pub fn proxy( origin: OriginFor, @@ -283,12 +314,12 @@ pub mod pallet { /// /// - `proxy_type`: The type of the proxy that the sender will be registered as over the /// new account. This will almost always be the most permissive `ProxyType` possible to - /// allow for maximum flexibility. + /// allow for maximum flexibility. /// - `index`: A disambiguation index, in case this is called multiple times in the same - /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just - /// want to use `0`. + /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just + /// want to use `0`. /// - `delay`: The announcement period required of the initial proxy. Will generally be - /// zero. + /// zero. /// /// Fails with `Duplicate` if this has already been called in this transaction, from the /// same sender, with the same parameters. @@ -409,7 +440,7 @@ pub mod pallet { let announcement = Announcement { real: real.clone(), call_hash, - height: system::Pallet::::block_number(), + height: T::BlockNumberProvider::current_block_number(), }; Announcements::::try_mutate(&who, |(pending, deposit)| { @@ -526,7 +557,7 @@ pub mod pallet { let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); - let now = system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); Self::edit_announcements(&delegate, |ann| { ann.real != real || ann.call_hash != call_hash @@ -538,6 +569,109 @@ pub mod pallet { Ok(()) } + + /// Poke / Adjust deposits made for proxies and announcements based on current values. + /// This can be used by accounts to possibly lower their locked amount. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// The transaction fee is waived if the deposit amount has changed. + /// + /// Emits `DepositPoked` if successful. + #[pallet::call_index(10)] + #[pallet::weight(T::WeightInfo::poke_deposit())] + pub fn poke_deposit(origin: OriginFor) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + let mut deposit_updated = false; + + // Check and update proxy deposits + Proxies::::try_mutate_exists(&who, |maybe_proxies| -> DispatchResult { + let (proxies, old_deposit) = maybe_proxies.take().unwrap_or_default(); + let maybe_new_deposit = Self::rejig_deposit( + &who, + old_deposit, + T::ProxyDepositBase::get(), + T::ProxyDepositFactor::get(), + proxies.len(), + )?; + + match maybe_new_deposit { + Some(new_deposit) if new_deposit != old_deposit => { + *maybe_proxies = Some((proxies, new_deposit)); + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Proxies, + old_deposit, + new_deposit, + }); + } + Some(_) => { + *maybe_proxies = Some((proxies, old_deposit)); + } + None => { + *maybe_proxies = None; + if !old_deposit.is_zero() { + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Proxies, + old_deposit, + new_deposit: BalanceOf::::zero(), + }); + } + } + } + Ok(()) + })?; + + // Check and update announcement deposits + Announcements::::try_mutate_exists(&who, |maybe_announcements| -> DispatchResult { + let (announcements, old_deposit) = maybe_announcements.take().unwrap_or_default(); + let maybe_new_deposit = Self::rejig_deposit( + &who, + old_deposit, + T::AnnouncementDepositBase::get(), + T::AnnouncementDepositFactor::get(), + announcements.len(), + )?; + + match maybe_new_deposit { + Some(new_deposit) if new_deposit != old_deposit => { + *maybe_announcements = Some((announcements, new_deposit)); + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Announcements, + old_deposit, + new_deposit, + }); + } + Some(_) => { + *maybe_announcements = Some((announcements, old_deposit)); + } + None => { + *maybe_announcements = None; + if !old_deposit.is_zero() { + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Announcements, + old_deposit, + new_deposit: BalanceOf::::zero(), + }); + } + } + } + Ok(()) + })?; + + Ok(if deposit_updated { + Pays::No.into() + } else { + Pays::Yes.into() + }) + } } #[pallet::event] @@ -584,6 +718,13 @@ pub mod pallet { // The index originally passed to `create_pure` when this pure proxy was created. disambiguation_index: u16, }, + /// A deposit stored for proxies or announcements was poked / updated. + DepositPoked { + who: T::AccountId, + kind: DepositKind, + old_deposit: BalanceOf, + new_deposit: BalanceOf, + }, } #[pallet::error] @@ -635,6 +776,22 @@ pub mod pallet { ), ValueQuery, >; + + #[pallet::view_functions_experimental] + impl Pallet { + /// Check if a `RuntimeCall` is allowed for a given `ProxyType`. + pub fn check_permissions( + call: ::RuntimeCall, + proxy_type: T::ProxyType, + ) -> bool { + proxy_type.filter(&call) + } + + /// Check if one `ProxyType` is a subset of another `ProxyType`. + pub fn is_superset(to_check: T::ProxyType, against: T::ProxyType) -> bool { + to_check.is_superset(&against) + } + } } impl Pallet { @@ -677,8 +834,8 @@ impl Pallet { ) -> T::AccountId { let (height, ext_index) = maybe_when.unwrap_or_else(|| { ( - system::Pallet::::block_number(), - system::Pallet::::extrinsic_index().unwrap_or_default(), + T::BlockNumberProvider::current_block_number(), + frame_system::Pallet::::extrinsic_index().unwrap_or_default(), ) }); let entropy = ( @@ -723,14 +880,10 @@ impl Pallet { .try_insert(i, proxy_def) .map_err(|_| Error::::TooMany)?; let new_deposit = Self::deposit(proxies.len() as u32); - match new_deposit.cmp(deposit) { - Ordering::Greater => { - T::Currency::reserve(delegator, new_deposit.saturating_sub(*deposit))?; - } - Ordering::Less => { - T::Currency::unreserve(delegator, deposit.saturating_sub(new_deposit)); - } - Ordering::Equal => (), + if new_deposit > *deposit { + T::Currency::reserve(delegator, new_deposit.saturating_sub(*deposit))?; + } else if new_deposit < *deposit { + T::Currency::unreserve(delegator, (*deposit).saturating_sub(new_deposit)); } *deposit = new_deposit; Self::deposit_event(Event::::ProxyAdded { @@ -770,14 +923,10 @@ impl Pallet { .ok_or(Error::::NotFound)?; proxies.remove(i); let new_deposit = Self::deposit(proxies.len() as u32); - match new_deposit.cmp(&old_deposit) { - Ordering::Greater => { - T::Currency::reserve(delegator, new_deposit.saturating_sub(old_deposit))?; - } - Ordering::Less => { - T::Currency::unreserve(delegator, old_deposit.saturating_sub(new_deposit)); - } - Ordering::Equal => (), + if new_deposit > old_deposit { + T::Currency::reserve(delegator, new_deposit.saturating_sub(old_deposit))?; + } else if new_deposit < old_deposit { + T::Currency::unreserve(delegator, old_deposit.saturating_sub(new_deposit)); } if !proxies.is_empty() { *x = Some((proxies, new_deposit)) @@ -813,14 +962,17 @@ impl Pallet { } else { base.saturating_add(factor.saturating_mul((len as u32).into())) }; - match new_deposit.cmp(&old_deposit) { - Ordering::Greater => { - T::Currency::reserve(who, new_deposit.saturating_sub(old_deposit))?; - } - Ordering::Less => { - T::Currency::unreserve(who, old_deposit.saturating_sub(new_deposit)); + if new_deposit > old_deposit { + T::Currency::reserve(who, new_deposit.saturating_sub(old_deposit))?; + } else if new_deposit < old_deposit { + let excess = old_deposit.saturating_sub(new_deposit); + let remaining_unreserved = T::Currency::unreserve(who, excess); + if !remaining_unreserved.is_zero() { + defensive!( + "Failed to unreserve full amount. (Requested, Actual)", + (excess, excess.saturating_sub(remaining_unreserved)) + ); } - Ordering::Equal => (), } Ok(if len == 0 { None } else { Some(new_deposit) }) } @@ -829,12 +981,12 @@ impl Pallet { F: FnMut(&Announcement, BlockNumberFor>) -> bool, >( delegate: &T::AccountId, - mut f: F, + f: F, ) -> DispatchResult { Announcements::::try_mutate_exists(delegate, |x| { let (mut pending, old_deposit) = x.take().ok_or(Error::::NotFound)?; let orig_pending_len = pending.len(); - pending.retain(&mut f); + pending.retain(f); ensure!(orig_pending_len > pending.len(), Error::::NotFound); *x = Self::rejig_deposit( delegate, @@ -868,6 +1020,7 @@ impl Pallet { real: T::AccountId, call: ::RuntimeCall, ) { + use frame::traits::{InstanceFilter as _, OriginTrait as _}; // This is a freshly authenticated new account, the origin restrictions doesn't apply. let mut origin: T::RuntimeOrigin = frame_system::RawOrigin::Signed(real).into(); origin.add_filter(move |c: &::RuntimeCall| { diff --git a/pallets/proxy/src/tests.rs b/pallets/proxy/src/tests.rs index e350386164..4e5e4722c3 100644 --- a/pallets/proxy/src/tests.rs +++ b/pallets/proxy/src/tests.rs @@ -18,24 +18,21 @@ // Tests for Proxy Pallet #![cfg(test)] +#![allow( + clippy::arithmetic_side_effects, + clippy::unwrap_used, + clippy::indexing_slicing +)] use super::*; - use crate as proxy; use alloc::{vec, vec::Vec}; -use codec::{Decode, DecodeWithMemTracking, Encode}; -use frame_support::{ - assert_noop, assert_ok, derive_impl, - traits::{ConstU32, ConstU64, Contains}, -}; -use sp_core::H256; -use sp_runtime::{BuildStorage, DispatchError, RuntimeDebug, traits::BlakeTwo256}; +use frame::testing_prelude::*; type Block = frame_system::mocking::MockBlock; -frame_support::construct_runtime!( - pub enum Test - { +construct_runtime!( + pub enum Test { System: frame_system = 1, Balances: pallet_balances = 2, Proxy: proxy = 3, @@ -87,7 +84,7 @@ impl Default for ProxyType { Self::Any } } -impl InstanceFilter for ProxyType { +impl frame::traits::InstanceFilter for ProxyType { fn filter(&self, c: &RuntimeCall) -> bool { match self { ProxyType::Any => true, @@ -115,45 +112,54 @@ impl Contains for BaseFilter { } } } + +parameter_types! { + pub static ProxyDepositBase: u64 = 1; + pub static ProxyDepositFactor: u64 = 1; + pub static AnnouncementDepositBase: u64 = 1; + pub static AnnouncementDepositFactor: u64 = 1; +} + impl Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; type Currency = Balances; type ProxyType = ProxyType; - type ProxyDepositBase = ConstU64<1>; - type ProxyDepositFactor = ConstU64<1>; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = ConstU32<4>; type WeightInfo = (); type CallHasher = BlakeTwo256; type MaxPending = ConstU32<2>; - type AnnouncementDepositBase = ConstU64<1>; - type AnnouncementDepositFactor = ConstU64<1>; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } use super::{Call as ProxyCall, Event as ProxyEvent}; use frame_system::Call as SystemCall; -use pallet_balances::{Call as BalancesCall, Event as BalancesEvent}; +use pallet_balances::{Call as BalancesCall, Error as BalancesError, Event as BalancesEvent}; use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; type SystemError = frame_system::Error; -pub fn new_test_ext() -> sp_io::TestExternalities { +pub fn new_test_ext() -> TestState { let mut t = frame_system::GenesisConfig::::default() .build_storage() - .expect("Expected to not panic"); + .unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 3)], - dev_accounts: None, + ..Default::default() } .assimilate_storage(&mut t) - .expect("Expected to not panic"); - let mut ext = sp_io::TestExternalities::new(t); + .unwrap(); + let mut ext = TestState::new(t); ext.execute_with(|| System::set_block_number(1)); ext } fn last_events(n: usize) -> Vec { - system::Pallet::::events() + frame_system::Pallet::::events() .into_iter() .rev() .take(n) @@ -380,7 +386,7 @@ fn delayed_requires_pre_announcement() { ); let call_hash = BlakeTwo256::hash_of(&call); assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, call_hash)); - system::Pallet::::set_block_number(2); + frame_system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced( RuntimeOrigin::signed(0), 2, @@ -417,7 +423,7 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { e ); - system::Pallet::::set_block_number(2); + frame_system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced( RuntimeOrigin::signed(0), 3, @@ -928,7 +934,6 @@ fn pure_works() { anon, 5 )); - assert_eq!(Balances::free_balance(6), 0); assert_ok!(Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call)); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); @@ -946,7 +951,7 @@ fn pure_works() { None, call.clone() )); - let de: DispatchError = DispatchError::from(Error::::NoPermission).stripped(); + let de = DispatchError::from(Error::::NoPermission).stripped(); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Err(de) }.into()); assert_noop!( Proxy::kill_pure(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0, 1, 0), @@ -964,24 +969,286 @@ fn pure_works() { Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call.clone()), Error::::NotProxy ); + }); +} - // Actually kill the pure proxy. - assert_ok!(Proxy::kill_pure( - RuntimeOrigin::signed(anon), - 1, +#[test] +fn poke_deposit_works_for_proxy_deposits() { + new_test_ext().execute_with(|| { + // Add a proxy and check initial deposit + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, ProxyType::Any, - 0, - 1, 0 )); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + + // Change the proxy deposit base to trigger deposit update + ProxyDepositBase::set(2); + let result = Proxy::poke_deposit(RuntimeOrigin::signed(1)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::No); + assert_eq!(Balances::reserved_balance(1), 3); // New Base(2) + Factor(1) * 1 System::assert_last_event( - ProxyEvent::PureKilled { - pure: anon, - spawner: 1, - proxy_type: ProxyType::Any, - disambiguation_index: 0, + ProxyEvent::DepositPoked { + who: 1, + kind: DepositKind::Proxies, + old_deposit: 2, + new_deposit: 3, + } + .into(), + ); + assert!(System::events().iter().any(|record| matches!( + record.event, + RuntimeEvent::Proxy(Event::DepositPoked { .. }) + ))); + }); +} + +#[test] +fn poke_deposit_works_for_announcement_deposits() { + new_test_ext().execute_with(|| { + // Setup proxy and make announcement + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 1 + )); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1 + }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + let initial_deposit = Balances::reserved_balance(3); + + // Change announcement deposit base to trigger update + AnnouncementDepositBase::set(2); + let result = Proxy::poke_deposit(RuntimeOrigin::signed(3)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::No); + let new_deposit = initial_deposit.saturating_add(1); // Base increased by 1 + assert_eq!(Balances::reserved_balance(3), new_deposit); + System::assert_last_event( + ProxyEvent::DepositPoked { + who: 3, + kind: DepositKind::Announcements, + old_deposit: initial_deposit, + new_deposit, } .into(), ); + assert!(System::events().iter().any(|record| matches!( + record.event, + RuntimeEvent::Proxy(Event::DepositPoked { .. }) + ))); + }); +} + +#[test] +fn poke_deposit_charges_fee_when_deposit_unchanged() { + new_test_ext().execute_with(|| { + // Add a proxy and check initial deposit + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + + // Poke the deposit without changing deposit required and check fee + let result = Proxy::poke_deposit(RuntimeOrigin::signed(1)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); // Pays fee + assert_eq!(Balances::reserved_balance(1), 2); // No change + + // No event emitted + assert!(!System::events().iter().any(|record| matches!( + record.event, + RuntimeEvent::Proxy(Event::DepositPoked { .. }) + ))); + + // Add an announcement and check initial deposit + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1 + }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + let initial_deposit = Balances::reserved_balance(3); + + // Poke the deposit without changing deposit required and check fee + let result = Proxy::poke_deposit(RuntimeOrigin::signed(3)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); // Pays fee + assert_eq!(Balances::reserved_balance(3), initial_deposit); // No change + + // No event emitted + assert!(!System::events().iter().any(|record| matches!( + record.event, + RuntimeEvent::Proxy(Event::DepositPoked { .. }) + ))); + }); +} + +#[test] +fn poke_deposit_handles_insufficient_balance() { + new_test_ext().execute_with(|| { + // Setup with account that has minimal balance + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(5), + 3, + ProxyType::Any, + 0 + )); + let initial_deposit = Balances::reserved_balance(5); + + // Change deposit base to require more than available balance + ProxyDepositBase::set(10); + + // Poking should fail due to insufficient balance + assert_noop!( + Proxy::poke_deposit(RuntimeOrigin::signed(5)), + BalancesError::::InsufficientBalance, + ); + + // Original deposit should remain unchanged + assert_eq!(Balances::reserved_balance(5), initial_deposit); + }); +} + +#[test] +fn poke_deposit_updates_both_proxy_and_announcement_deposits() { + new_test_ext().execute_with(|| { + // Setup both proxy and announcement for the same account + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::Any, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(2), + 3, + ProxyType::Any, + 1 + )); + assert_eq!(Balances::reserved_balance(2), 2); // Base(1) + Factor(1) * 1 + assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, [1; 32].into())); + let announcements = Announcements::::get(2); + assert_eq!( + announcements.0, + vec![Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1 + }] + ); + assert_eq!(announcements.1, 2); // Base(1) + Factor(1) * 1 + + // Record initial deposits + let initial_proxy_deposit = Proxies::::get(2).1; + let initial_announcement_deposit = Announcements::::get(2).1; + + // Total reserved = deposit for proxy + deposit for announcement + assert_eq!( + Balances::reserved_balance(2), + initial_proxy_deposit.saturating_add(initial_announcement_deposit) + ); + + // Change both deposit requirements + ProxyDepositBase::set(2); + AnnouncementDepositBase::set(2); + + // Poke deposits - should update both deposits and emit two events + let result = Proxy::poke_deposit(RuntimeOrigin::signed(2)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::No); + + // Check both deposits were updated + let (_, new_proxy_deposit) = Proxies::::get(2); + let (_, new_announcement_deposit) = Announcements::::get(2); + assert_eq!(new_proxy_deposit, 3); // Base(2) + Factor(1) * 1 + assert_eq!(new_announcement_deposit, 3); // Base(2) + Factor(1) * 1 + assert_eq!( + Balances::reserved_balance(2), + new_proxy_deposit.saturating_add(new_announcement_deposit) + ); + + // Verify both events were emitted in the correct order + let events = System::events(); + let relevant_events: Vec<_> = events + .iter() + .filter(|record| { + matches!( + record.event, + RuntimeEvent::Proxy(ProxyEvent::DepositPoked { .. }) + ) + }) + .collect(); + + assert_eq!(relevant_events.len(), 2); + + // First event should be for Proxies + assert_eq!( + relevant_events[0].event, + ProxyEvent::DepositPoked { + who: 2, + kind: DepositKind::Proxies, + old_deposit: initial_proxy_deposit, + new_deposit: new_proxy_deposit, + } + .into() + ); + + // Second event should be for Announcements + assert_eq!( + relevant_events[1].event, + ProxyEvent::DepositPoked { + who: 2, + kind: DepositKind::Announcements, + old_deposit: initial_announcement_deposit, + new_deposit: new_announcement_deposit, + } + .into() + ); + + // Poking again should charge fee as nothing changes + let result = Proxy::poke_deposit(RuntimeOrigin::signed(2)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + + // Verify deposits remained the same + assert_eq!(Proxies::::get(2).1, new_proxy_deposit); + assert_eq!(Announcements::::get(2).1, new_announcement_deposit); + assert_eq!( + Balances::reserved_balance(2), + new_proxy_deposit.saturating_add(new_announcement_deposit) + ); + }); +} + +#[test] +fn poke_deposit_fails_for_unsigned_origin() { + new_test_ext().execute_with(|| { + assert_noop!( + Proxy::poke_deposit(RuntimeOrigin::none()), + DispatchError::BadOrigin, + ); }); } diff --git a/pallets/proxy/src/weights.rs b/pallets/proxy/src/weights.rs index 3093298e3e..bb51872b2e 100644 --- a/pallets/proxy/src/weights.rs +++ b/pallets/proxy/src/weights.rs @@ -18,36 +18,38 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=dev +// --extrinsic=* +// --runtime=target/production/wbuild/kitchensink-runtime/kitchensink_runtime.wasm +// --pallet=pallet_proxy +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/proxy/src/weights.rs +// --wasm-execution=compiled // --steps=50 // --repeat=20 -// --pallet=pallet_proxy +// --heap-pages=4096 +// --template=substrate/.maintain/frame-umbrella-weight-template.hbs // --no-storage-info -// --no-median-slopes // --no-min-squares -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./substrate/frame/proxy/src/weights.rs -// --header=./substrate/HEADER-APACHE2 -// --template=./substrate/.maintain/frame-weight-template.hbs +// --no-median-slopes +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] #![allow(missing_docs)] +#![allow(dead_code)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; +use frame::weights_prelude::*; /// Weight functions needed for `pallet_proxy`. pub trait WeightInfo { @@ -61,6 +63,7 @@ pub trait WeightInfo { fn remove_proxies(p: u32, ) -> Weight; fn create_pure(p: u32, ) -> Weight; fn kill_pure(p: u32, ) -> Weight; + fn poke_deposit() -> Weight; } /// Weights for `pallet_proxy` using the Substrate node and recommended hardware. @@ -75,12 +78,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `306 + p * (37 ±0)` + // Measured: `339 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 18_280_000 picoseconds. - Weight::from_parts(19_655_145, 4706) - // Standard Error: 2_345 - .saturating_add(Weight::from_parts(36_306, 0).saturating_mul(p.into())) + // Minimum execution time: 23_353_000 picoseconds. + Weight::from_parts(25_084_085, 4706) + // Standard Error: 2_569 + .saturating_add(Weight::from_parts(33_574, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -97,14 +100,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `633 + a * (68 ±0) + p * (37 ±0)` + // Measured: `666 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_789_000 picoseconds. - Weight::from_parts(41_812_078, 5698) - // Standard Error: 3_694 - .saturating_add(Weight::from_parts(163_029, 0).saturating_mul(a.into())) - // Standard Error: 3_817 - .saturating_add(Weight::from_parts(79_539, 0).saturating_mul(p.into())) + // Minimum execution time: 47_196_000 picoseconds. + Weight::from_parts(48_686_812, 5698) + // Standard Error: 3_711 + .saturating_add(Weight::from_parts(171_107, 0).saturating_mul(a.into())) + // Standard Error: 3_834 + .saturating_add(Weight::from_parts(34_523, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -116,14 +119,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_475_000 picoseconds. - Weight::from_parts(22_666_821, 5698) - // Standard Error: 1_797 - .saturating_add(Weight::from_parts(170_629, 0).saturating_mul(a.into())) - // Standard Error: 1_857 - .saturating_add(Weight::from_parts(18_799, 0).saturating_mul(p.into())) + // Minimum execution time: 29_341_000 picoseconds. + Weight::from_parts(30_320_504, 5698) + // Standard Error: 1_821 + .saturating_add(Weight::from_parts(158_572, 0).saturating_mul(a.into())) + // Standard Error: 1_881 + .saturating_add(Weight::from_parts(8_433, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -135,14 +138,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_326_000 picoseconds. - Weight::from_parts(22_654_227, 5698) - // Standard Error: 1_859 - .saturating_add(Weight::from_parts(168_822, 0).saturating_mul(a.into())) - // Standard Error: 1_921 - .saturating_add(Weight::from_parts(21_839, 0).saturating_mul(p.into())) + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(29_754_384, 5698) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(176_827, 0).saturating_mul(a.into())) + // Standard Error: 1_901 + .saturating_add(Weight::from_parts(9_607, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -156,14 +159,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `420 + a * (68 ±0) + p * (37 ±0)` + // Measured: `453 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 31_551_000 picoseconds. - Weight::from_parts(32_205_445, 5698) - // Standard Error: 4_089 - .saturating_add(Weight::from_parts(167_596, 0).saturating_mul(a.into())) - // Standard Error: 4_225 - .saturating_add(Weight::from_parts(67_833, 0).saturating_mul(p.into())) + // Minimum execution time: 36_885_000 picoseconds. + Weight::from_parts(38_080_636, 5698) + // Standard Error: 2_642 + .saturating_add(Weight::from_parts(157_335, 0).saturating_mul(a.into())) + // Standard Error: 2_730 + .saturating_add(Weight::from_parts(28_872, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -172,12 +175,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_495_000 picoseconds. - Weight::from_parts(22_358_457, 4706) - // Standard Error: 1_606 - .saturating_add(Weight::from_parts(64_322, 0).saturating_mul(p.into())) + // Minimum execution time: 27_016_000 picoseconds. + Weight::from_parts(28_296_216, 4706) + // Standard Error: 1_643 + .saturating_add(Weight::from_parts(50_271, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -186,12 +189,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_495_000 picoseconds. - Weight::from_parts(22_579_308, 4706) - // Standard Error: 2_571 - .saturating_add(Weight::from_parts(62_404, 0).saturating_mul(p.into())) + // Minimum execution time: 26_955_000 picoseconds. + Weight::from_parts(28_379_566, 4706) + // Standard Error: 1_547 + .saturating_add(Weight::from_parts(45_784, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -200,12 +203,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_541_000 picoseconds. - Weight::from_parts(21_456_750, 4706) - // Standard Error: 1_697 - .saturating_add(Weight::from_parts(45_387, 0).saturating_mul(p.into())) + // Minimum execution time: 24_656_000 picoseconds. + Weight::from_parts(25_821_878, 4706) + // Standard Error: 2_300 + .saturating_add(Weight::from_parts(33_972, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -214,12 +217,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `206` // Estimated: `4706` - // Minimum execution time: 22_809_000 picoseconds. - Weight::from_parts(23_878_644, 4706) - // Standard Error: 1_600 - .saturating_add(Weight::from_parts(10_149, 0).saturating_mul(p.into())) + // Minimum execution time: 28_416_000 picoseconds. + Weight::from_parts(29_662_728, 4706) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(29_928, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -228,15 +231,30 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `198 + p * (37 ±0)` + // Measured: `231 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_993_000 picoseconds. - Weight::from_parts(22_067_418, 4706) - // Standard Error: 1_673 - .saturating_add(Weight::from_parts(52_703, 0).saturating_mul(p.into())) + // Minimum execution time: 25_505_000 picoseconds. + Weight::from_parts(26_780_627, 4706) + // Standard Error: 1_581 + .saturating_add(Weight::from_parts(33_085, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `519` + // Estimated: `5698` + // Minimum execution time: 46_733_000 picoseconds. + Weight::from_parts(47_972_000, 5698) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } } // For backwards compatibility and tests. @@ -250,12 +268,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `306 + p * (37 ±0)` + // Measured: `339 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 18_280_000 picoseconds. - Weight::from_parts(19_655_145, 4706) - // Standard Error: 2_345 - .saturating_add(Weight::from_parts(36_306, 0).saturating_mul(p.into())) + // Minimum execution time: 23_353_000 picoseconds. + Weight::from_parts(25_084_085, 4706) + // Standard Error: 2_569 + .saturating_add(Weight::from_parts(33_574, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -272,14 +290,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `633 + a * (68 ±0) + p * (37 ±0)` + // Measured: `666 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_789_000 picoseconds. - Weight::from_parts(41_812_078, 5698) - // Standard Error: 3_694 - .saturating_add(Weight::from_parts(163_029, 0).saturating_mul(a.into())) - // Standard Error: 3_817 - .saturating_add(Weight::from_parts(79_539, 0).saturating_mul(p.into())) + // Minimum execution time: 47_196_000 picoseconds. + Weight::from_parts(48_686_812, 5698) + // Standard Error: 3_711 + .saturating_add(Weight::from_parts(171_107, 0).saturating_mul(a.into())) + // Standard Error: 3_834 + .saturating_add(Weight::from_parts(34_523, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -291,14 +309,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_475_000 picoseconds. - Weight::from_parts(22_666_821, 5698) - // Standard Error: 1_797 - .saturating_add(Weight::from_parts(170_629, 0).saturating_mul(a.into())) - // Standard Error: 1_857 - .saturating_add(Weight::from_parts(18_799, 0).saturating_mul(p.into())) + // Minimum execution time: 29_341_000 picoseconds. + Weight::from_parts(30_320_504, 5698) + // Standard Error: 1_821 + .saturating_add(Weight::from_parts(158_572, 0).saturating_mul(a.into())) + // Standard Error: 1_881 + .saturating_add(Weight::from_parts(8_433, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -310,14 +328,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_326_000 picoseconds. - Weight::from_parts(22_654_227, 5698) - // Standard Error: 1_859 - .saturating_add(Weight::from_parts(168_822, 0).saturating_mul(a.into())) - // Standard Error: 1_921 - .saturating_add(Weight::from_parts(21_839, 0).saturating_mul(p.into())) + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(29_754_384, 5698) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(176_827, 0).saturating_mul(a.into())) + // Standard Error: 1_901 + .saturating_add(Weight::from_parts(9_607, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -331,14 +349,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `420 + a * (68 ±0) + p * (37 ±0)` + // Measured: `453 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 31_551_000 picoseconds. - Weight::from_parts(32_205_445, 5698) - // Standard Error: 4_089 - .saturating_add(Weight::from_parts(167_596, 0).saturating_mul(a.into())) - // Standard Error: 4_225 - .saturating_add(Weight::from_parts(67_833, 0).saturating_mul(p.into())) + // Minimum execution time: 36_885_000 picoseconds. + Weight::from_parts(38_080_636, 5698) + // Standard Error: 2_642 + .saturating_add(Weight::from_parts(157_335, 0).saturating_mul(a.into())) + // Standard Error: 2_730 + .saturating_add(Weight::from_parts(28_872, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -347,12 +365,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_495_000 picoseconds. - Weight::from_parts(22_358_457, 4706) - // Standard Error: 1_606 - .saturating_add(Weight::from_parts(64_322, 0).saturating_mul(p.into())) + // Minimum execution time: 27_016_000 picoseconds. + Weight::from_parts(28_296_216, 4706) + // Standard Error: 1_643 + .saturating_add(Weight::from_parts(50_271, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -361,12 +379,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_495_000 picoseconds. - Weight::from_parts(22_579_308, 4706) - // Standard Error: 2_571 - .saturating_add(Weight::from_parts(62_404, 0).saturating_mul(p.into())) + // Minimum execution time: 26_955_000 picoseconds. + Weight::from_parts(28_379_566, 4706) + // Standard Error: 1_547 + .saturating_add(Weight::from_parts(45_784, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -375,12 +393,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_541_000 picoseconds. - Weight::from_parts(21_456_750, 4706) - // Standard Error: 1_697 - .saturating_add(Weight::from_parts(45_387, 0).saturating_mul(p.into())) + // Minimum execution time: 24_656_000 picoseconds. + Weight::from_parts(25_821_878, 4706) + // Standard Error: 2_300 + .saturating_add(Weight::from_parts(33_972, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -389,12 +407,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `206` // Estimated: `4706` - // Minimum execution time: 22_809_000 picoseconds. - Weight::from_parts(23_878_644, 4706) - // Standard Error: 1_600 - .saturating_add(Weight::from_parts(10_149, 0).saturating_mul(p.into())) + // Minimum execution time: 28_416_000 picoseconds. + Weight::from_parts(29_662_728, 4706) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(29_928, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -403,13 +421,28 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `198 + p * (37 ±0)` + // Measured: `231 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_993_000 picoseconds. - Weight::from_parts(22_067_418, 4706) - // Standard Error: 1_673 - .saturating_add(Weight::from_parts(52_703, 0).saturating_mul(p.into())) + // Minimum execution time: 25_505_000 picoseconds. + Weight::from_parts(26_780_627, 4706) + // Standard Error: 1_581 + .saturating_add(Weight::from_parts(33_085, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `519` + // Estimated: `5698` + // Minimum execution time: 46_733_000 picoseconds. + Weight::from_parts(47_972_000, 5698) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } } diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index e3d5d8f1c1..2df65242d1 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -9,7 +9,7 @@ use jsonrpsee::{ use sp_blockchain::HeaderBackend; use sp_runtime::{AccountId32, traits::Block as BlockT}; use std::sync::Arc; -use subtensor_runtime_common::{NetUid, TaoCurrency}; +use subtensor_runtime_common::{MechId, NetUid, TaoCurrency}; use sp_api::ProvideRuntimeApi; @@ -72,6 +72,15 @@ pub trait SubtensorCustomApi { fn get_all_metagraphs(&self, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getMetagraph")] fn get_metagraph(&self, netuid: NetUid, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getAllMechagraphs")] + fn get_all_mechagraphs(&self, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getMechagraph")] + fn get_mechagraph( + &self, + netuid: NetUid, + mecid: MechId, + at: Option, + ) -> RpcResult>; #[method(name = "subnetInfo_getSubnetState")] fn get_subnet_state(&self, netuid: NetUid, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getLockCost")] @@ -83,6 +92,16 @@ pub trait SubtensorCustomApi { metagraph_index: Vec, at: Option, ) -> RpcResult>; + #[method(name = "subnetInfo_getSelectiveMechagraph")] + fn get_selective_mechagraph( + &self, + netuid: NetUid, + mecid: MechId, + metagraph_index: Vec, + at: Option, + ) -> RpcResult>; + #[method(name = "subnetInfo_getSubnetToPrune")] + fn get_subnet_to_prune(&self, at: Option) -> RpcResult>; } pub struct SubtensorCustom { @@ -319,6 +338,16 @@ where } } + fn get_all_mechagraphs(&self, at: Option<::Hash>) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_all_mechagraphs(at) { + Ok(result) => Ok(result.encode()), + Err(e) => Err(Error::RuntimeError(format!("Unable to get metagraps: {e:?}")).into()), + } + } + fn get_dynamic_info( &self, netuid: NetUid, @@ -352,6 +381,23 @@ where } } + fn get_mechagraph( + &self, + netuid: NetUid, + mecid: MechId, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + match api.get_mechagraph(at, netuid, mecid) { + Ok(result) => Ok(result.encode()), + Err(e) => Err(Error::RuntimeError(format!( + "Unable to get dynamic subnets info: {e:?}" + )) + .into()), + } + } + fn get_subnet_state( &self, netuid: NetUid, @@ -427,4 +473,37 @@ where } } } + + fn get_selective_mechagraph( + &self, + netuid: NetUid, + mecid: MechId, + metagraph_index: Vec, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_selective_mechagraph(at, netuid, mecid, metagraph_index) { + Ok(result) => Ok(result.encode()), + Err(e) => { + Err(Error::RuntimeError(format!("Unable to get selective metagraph: {e:?}")).into()) + } + } + } + + fn get_subnet_to_prune( + &self, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_subnet_to_prune(at) { + Ok(result) => Ok(result), + Err(e) => { + Err(Error::RuntimeError(format!("Unable to get subnet to prune: {e:?}")).into()) + } + } + } } diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index 42d12eb686..a2df8a3518 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -12,7 +12,7 @@ use pallet_subtensor::rpc_info::{ subnet_info::{SubnetHyperparams, SubnetHyperparamsV2, SubnetInfo, SubnetInfov2}, }; use sp_runtime::AccountId32; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, MechId, NetUid, TaoCurrency}; // Here we declare the runtime API. It is implemented it the `impl` block in // src/neuron_info.rs, src/subnet_info.rs, and src/delegate_info.rs @@ -40,9 +40,13 @@ sp_api::decl_runtime_apis! { fn get_all_dynamic_info() -> Vec>>; fn get_all_metagraphs() -> Vec>>; fn get_metagraph(netuid: NetUid) -> Option>; + fn get_all_mechagraphs() -> Vec>>; + fn get_mechagraph(netuid: NetUid, mecid: MechId) -> Option>; fn get_dynamic_info(netuid: NetUid) -> Option>; fn get_subnet_state(netuid: NetUid) -> Option>; fn get_selective_metagraph(netuid: NetUid, metagraph_indexes: Vec) -> Option>; + fn get_selective_mechagraph(netuid: NetUid, subid: MechId, metagraph_indexes: Vec) -> Option>; + fn get_subnet_to_prune() -> Option; } pub trait StakeInfoRuntimeApi { diff --git a/pallets/subtensor/src/coinbase/reveal_commits.rs b/pallets/subtensor/src/coinbase/reveal_commits.rs index e7bc6dc008..889c41d96a 100644 --- a/pallets/subtensor/src/coinbase/reveal_commits.rs +++ b/pallets/subtensor/src/coinbase/reveal_commits.rs @@ -3,7 +3,7 @@ use ark_serialize::CanonicalDeserialize; use codec::Decode; use frame_support::{dispatch, traits::OriginTrait}; use scale_info::prelude::collections::VecDeque; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{MechId, NetUid}; use tle::{ curves::drand::TinyBLS381, stream_ciphers::AESGCMStreamCipherProvider, @@ -44,152 +44,159 @@ impl Pallet { // Weights revealed must have been committed during epoch `cur_epoch - reveal_period`. let reveal_epoch = cur_epoch.saturating_sub(reveal_period); - // Clean expired commits - for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid) { - if epoch < reveal_epoch { - TimelockedWeightCommits::::remove(netuid, epoch); - } - } + // All mechanisms share the same epoch, so the reveal_period/reveal_epoch are also the same + // Reveal for all mechanisms + for mecid in 0..MechanismCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); - // No commits to reveal until at least epoch reveal_period. - if cur_epoch < reveal_period { - log::trace!("Failed to reveal commit for subnet {netuid} Too early"); - return Ok(()); - } - - let mut entries = TimelockedWeightCommits::::take(netuid, reveal_epoch); - let mut unrevealed = VecDeque::new(); - - // Keep popping items off the front of the queue until we successfully reveal a commit. - while let Some((who, commit_block, serialized_compresssed_commit, round_number)) = - entries.pop_front() - { - // Try to get the round number from pallet_drand. - let pulse = match pallet_drand::Pulses::::get(round_number) { - Some(p) => p, - None => { - // Round number used was not found on the chain. Skip this commit. - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." - ); - unrevealed.push_back(( - who, - commit_block, - serialized_compresssed_commit, - round_number, - )); - continue; + // Clean expired commits + for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid_index) { + if epoch < reveal_epoch { + TimelockedWeightCommits::::remove(netuid_index, epoch); } - }; + } - let reader = &mut &serialized_compresssed_commit[..]; - let commit = match TLECiphertext::::deserialize_compressed(reader) { - Ok(c) => c, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing the commit: {e:?}" - ); - continue; - } - }; - - let signature_bytes = pulse - .signature - .strip_prefix(b"0x") - .unwrap_or(&pulse.signature); - - let sig_reader = &mut &signature_bytes[..]; - let sig = match ::SignatureGroup::deserialize_compressed( - sig_reader, - ) { - Ok(s) => s, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" - ); - continue; - } - }; + // No commits to reveal until at least epoch reveal_period. + if cur_epoch < reveal_period { + log::trace!("Failed to reveal commit for mechanism {netuid_index} Too early"); + return Ok(()); + } - let decrypted_bytes: Vec = match tld::( - commit, sig, - ) { - Ok(d) => d, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error decrypting the commit: {e:?}" - ); - continue; - } - }; - - // ------------------------------------------------------------------ - // Try to decode payload with the new and legacy formats. - // ------------------------------------------------------------------ - let (uids, values, version_key) = { - let mut reader_new = &decrypted_bytes[..]; - if let Ok(payload) = WeightsTlockPayload::decode(&mut reader_new) { - // Verify hotkey matches committer - let mut hk_reader = &payload.hotkey[..]; - match T::AccountId::decode(&mut hk_reader) { - Ok(decoded_hotkey) if decoded_hotkey == who => { - (payload.uids, payload.values, payload.version_key) - } - Ok(_) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to hotkey mismatch in payload" - ); - continue; - } - Err(e) => { - let mut reader_legacy = &decrypted_bytes[..]; - match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { - Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), - Err(_) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing hotkey: {e:?}" - ); - continue; + let mut entries = TimelockedWeightCommits::::take(netuid_index, reveal_epoch); + let mut unrevealed = VecDeque::new(); + + // Keep popping items off the front of the queue until we successfully reveal a commit. + while let Some((who, commit_block, serialized_compresssed_commit, round_number)) = + entries.pop_front() + { + // Try to get the round number from pallet_drand. + let pulse = match pallet_drand::Pulses::::get(round_number) { + Some(p) => p, + None => { + // Round number used was not found on the chain. Skip this commit. + log::trace!( + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." + ); + unrevealed.push_back(( + who, + commit_block, + serialized_compresssed_commit, + round_number, + )); + continue; + } + }; + + let reader = &mut &serialized_compresssed_commit[..]; + let commit = match TLECiphertext::::deserialize_compressed(reader) { + Ok(c) => c, + Err(e) => { + log::trace!( + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error deserializing the commit: {e:?}" + ); + continue; + } + }; + + let signature_bytes = pulse + .signature + .strip_prefix(b"0x") + .unwrap_or(&pulse.signature); + + let sig_reader = &mut &signature_bytes[..]; + let sig = match ::SignatureGroup::deserialize_compressed( + sig_reader, + ) { + Ok(s) => s, + Err(e) => { + log::trace!( + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" + ); + continue; + } + }; + + let decrypted_bytes: Vec = match tld::( + commit, sig, + ) { + Ok(d) => d, + Err(e) => { + log::trace!( + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error decrypting the commit: {e:?}" + ); + continue; + } + }; + + // ------------------------------------------------------------------ + // Try to decode payload with the new and legacy formats. + // ------------------------------------------------------------------ + let (uids, values, version_key) = { + let mut reader_new = &decrypted_bytes[..]; + if let Ok(payload) = WeightsTlockPayload::decode(&mut reader_new) { + // Verify hotkey matches committer + let mut hk_reader = &payload.hotkey[..]; + match T::AccountId::decode(&mut hk_reader) { + Ok(decoded_hotkey) if decoded_hotkey == who => { + (payload.uids, payload.values, payload.version_key) + } + Ok(_) => { + log::trace!( + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to hotkey mismatch in payload" + ); + continue; + } + Err(e) => { + let mut reader_legacy = &decrypted_bytes[..]; + match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { + Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), + Err(_) => { + log::trace!( + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error deserializing hotkey: {e:?}" + ); + continue; + } } } } - } - } else { - // Fallback to legacy payload - let mut reader_legacy = &decrypted_bytes[..]; - match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { - Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing both payload formats: {e:?}" - ); - continue; + } else { + // Fallback to legacy payload + let mut reader_legacy = &decrypted_bytes[..]; + match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { + Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), + Err(e) => { + log::trace!( + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error deserializing both payload formats: {e:?}" + ); + continue; + } } } + }; + + // ------------------------------------------------------------------ + // Apply weights + // ------------------------------------------------------------------ + if let Err(e) = Self::do_set_mechanism_weights( + T::RuntimeOrigin::signed(who.clone()), + netuid, + MechId::from(mecid), + uids, + values, + version_key, + ) { + log::trace!( + "Failed to `do_set_mechanism_weights` for mechanism {netuid_index} submitted by {who:?}: {e:?}" + ); + continue; } - }; - - // ------------------------------------------------------------------ - // Apply weights - // ------------------------------------------------------------------ - if let Err(e) = Self::do_set_weights( - T::RuntimeOrigin::signed(who.clone()), - netuid, - uids, - values, - version_key, - ) { - log::trace!( - "Failed to `do_set_weights` for subnet {netuid} submitted by {who:?}: {e:?}" - ); - continue; - } - Self::deposit_event(Event::TimelockedWeightsRevealed(netuid, who)); - } + Self::deposit_event(Event::TimelockedWeightsRevealed(netuid_index, who)); + } - if !unrevealed.is_empty() { - TimelockedWeightCommits::::insert(netuid, reveal_epoch, unrevealed); + if !unrevealed.is_empty() { + TimelockedWeightCommits::::insert(netuid_index, reveal_epoch, unrevealed); + } } Ok(()) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 8bb10e0b16..6b09c9ed46 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -16,13 +16,13 @@ // DEALINGS IN THE SOFTWARE. use super::*; -use frame_support::dispatch::Pays; -use frame_support::storage::IterableStorageDoubleMap; -use frame_support::weights::Weight; +use crate::CommitmentsInterface; +use frame_support::{dispatch::Pays, weights::Weight}; use safe_math::*; use sp_core::Get; -use substrate_fixed::types::I64F64; -use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; +use substrate_fixed::types::{I64F64, U96F32}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, NetUidStorageIndex, TaoCurrency}; +use subtensor_swap_interface::SwapHandler; impl Pallet { /// Fetches the total count of root network validators @@ -362,54 +362,36 @@ impl Pallet { /// * 'NetworkRemoved': Emitted when a network is successfully removed. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': If the specified network does not exist. + /// * 'MechanismDoesNotExist': If the specified network does not exist. /// * 'NotSubnetOwner': If the caller does not own the specified subnet. /// - pub fn user_remove_network(coldkey: T::AccountId, netuid: NetUid) -> dispatch::DispatchResult { - // --- 1. Ensure this subnet exists. + pub fn do_dissolve_network(netuid: NetUid) -> dispatch::DispatchResult { + // 1. --- The network exists? ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Self::if_subnet_exist(netuid) && netuid != NetUid::ROOT, + Error::::SubnetNotExists ); - // --- 2. Ensure the caller owns this subnet. - ensure!( - SubnetOwner::::get(netuid) == coldkey, - Error::::NotSubnetOwner - ); - - // --- 4. Remove the subnet identity if it exists. - if SubnetIdentitiesV3::::take(netuid).is_some() { - Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); - } + // 2. --- Perform the cleanup before removing the network. + T::SwapInterface::dissolve_all_liquidity_providers(netuid)?; + Self::destroy_alpha_in_out_stakes(netuid)?; + T::SwapInterface::clear_protocol_liquidity(netuid)?; + T::CommitmentsInterface::purge_netuid(netuid); - // --- 5. Explicitly erase the network and all its parameters. + // 3. --- Remove the network Self::remove_network(netuid); - // --- 6. Emit the NetworkRemoved event. - log::debug!("NetworkRemoved( netuid:{netuid:?} )"); + // 4. --- Emit the NetworkRemoved event + log::info!("NetworkRemoved( netuid:{netuid:?} )"); Self::deposit_event(Event::NetworkRemoved(netuid)); - // --- 7. Return success. Ok(()) } - /// Removes a network (identified by netuid) and all associated parameters. - /// - /// This function is responsible for cleaning up all the data associated with a network. - /// It ensures that all the storage values related to the network are removed, any - /// reserved balance is returned to the network owner, and the subnet identity is removed if it exists. - /// - /// # Args: - /// * 'netuid': ('u16'): The unique identifier of the network to be removed. - /// - /// # Note: - /// This function does not emit any events, nor does it raise any errors. It silently - /// returns if any internal checks fail. pub fn remove_network(netuid: NetUid) { - // --- 1. Return balance to subnet owner. + // --- 1. Get the owner and remove from SubnetOwner. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); - let reserved_amount = Self::get_subnet_locked_balance(netuid); + SubnetOwner::::remove(netuid); // --- 2. Remove network count. SubnetworkN::::remove(netuid); @@ -427,27 +409,18 @@ impl Pallet { let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let keys = Keys::::iter_prefix(netuid).collect::>(); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - - // --- 7. Removes the weights for this subnet (do not remove). - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); // --- 8. Iterate over stored weights and fill the matrix. - for (uid_i, weights_i) in - as IterableStorageDoubleMap>>::iter_prefix( - NetUid::ROOT, - ) - { + for (uid_i, weights_i) in Weights::::iter_prefix(NetUidStorageIndex::ROOT) { // Create a new vector to hold modified weights. let mut modified_weights = weights_i.clone(); - // Iterate over each weight entry to potentially update it. for (subnet_id, weight) in modified_weights.iter_mut() { + // If the root network had a weight pointing to this netuid, set it to 0 if subnet_id == &u16::from(netuid) { - // If the condition matches, modify the weight - *weight = 0; // Set weight to 0 for the matching subnet_id. + *weight = 0; } } - Weights::::insert(NetUid::ROOT, uid_i, modified_weights); + Weights::::insert(NetUidStorageIndex::ROOT, uid_i, modified_weights); } // --- 9. Remove various network-related parameters. @@ -455,11 +428,10 @@ impl Pallet { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); @@ -480,16 +452,200 @@ impl Pallet { POWRegistrationsThisInterval::::remove(netuid); BurnRegistrationsThisInterval::::remove(netuid); - // --- 11. Add the balance back to the owner. - Self::add_balance_to_coldkey_account(&owner_coldkey, reserved_amount.into()); - Self::set_subnet_locked_balance(netuid, TaoCurrency::ZERO); - SubnetOwner::::remove(netuid); + // --- 11. AMM / price / accounting. + // SubnetTAO, SubnetAlpha{In,InProvided,Out} are already cleared during dissolve/destroy. + SubnetAlphaInEmission::::remove(netuid); + SubnetAlphaOutEmission::::remove(netuid); + SubnetTaoInEmission::::remove(netuid); + SubnetVolume::::remove(netuid); + SubnetMovingPrice::::remove(netuid); + SubnetTaoProvided::::remove(netuid); + + // --- 13. Token / mechanism / registration toggles. + TokenSymbol::::remove(netuid); + SubnetMechanism::::remove(netuid); + SubnetOwnerHotkey::::remove(netuid); + NetworkRegistrationAllowed::::remove(netuid); + NetworkPowRegistrationAllowed::::remove(netuid); + + // --- 14. Locks & toggles. + TransferToggle::::remove(netuid); + SubnetLocked::::remove(netuid); + LargestLocked::::remove(netuid); + + // --- 15. Mechanism step / emissions bookkeeping. + FirstEmissionBlockNumber::::remove(netuid); + PendingEmission::::remove(netuid); + PendingRootDivs::::remove(netuid); + PendingAlphaSwapped::::remove(netuid); + PendingOwnerCut::::remove(netuid); + BlocksSinceLastStep::::remove(netuid); + LastMechansimStepBlock::::remove(netuid); + LastAdjustmentBlock::::remove(netuid); + + // --- 16. Serving / rho / curves, and other per-net controls. + ServingRateLimit::::remove(netuid); + Rho::::remove(netuid); + AlphaSigmoidSteepness::::remove(netuid); + + MaxAllowedValidators::::remove(netuid); + AdjustmentInterval::::remove(netuid); + BondsMovingAverage::::remove(netuid); + BondsPenalty::::remove(netuid); + BondsResetOn::::remove(netuid); + WeightsSetRateLimit::::remove(netuid); + ValidatorPruneLen::::remove(netuid); + ScalingLawPower::::remove(netuid); + TargetRegistrationsPerInterval::::remove(netuid); + AdjustmentAlpha::::remove(netuid); + CommitRevealWeightsEnabled::::remove(netuid); + + Burn::::remove(netuid); + MinBurn::::remove(netuid); + MaxBurn::::remove(netuid); + MinDifficulty::::remove(netuid); + MaxDifficulty::::remove(netuid); + RegistrationsThisBlock::::remove(netuid); + EMAPriceHalvingBlocks::::remove(netuid); + RAORecycledForRegistration::::remove(netuid); + MaxRegistrationsPerBlock::::remove(netuid); + WeightsVersionKey::::remove(netuid); + + // --- 17. Subtoken / feature flags. + LiquidAlphaOn::::remove(netuid); + Yuma3On::::remove(netuid); + AlphaValues::::remove(netuid); + SubtokenEnabled::::remove(netuid); + ImmuneOwnerUidsLimit::::remove(netuid); + + // --- 18. Consensus aux vectors. + StakeWeight::::remove(netuid); + LoadedEmission::::remove(netuid); + + // --- 19. DMAPs where netuid is the FIRST key: clear by prefix. + let _ = BlockAtRegistration::::clear_prefix(netuid, u32::MAX, None); + let _ = Axons::::clear_prefix(netuid, u32::MAX, None); + let _ = NeuronCertificates::::clear_prefix(netuid, u32::MAX, None); + let _ = Prometheus::::clear_prefix(netuid, u32::MAX, None); + let _ = AlphaDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); + let _ = TaoDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); + let _ = PendingChildKeys::::clear_prefix(netuid, u32::MAX, None); + let _ = AssociatedEvmAddress::::clear_prefix(netuid, u32::MAX, None); + + // Commit-reveal / weights commits (all per-net prefixes): + let mechanisms: u8 = MechanismCountCurrent::::get(netuid).into(); + for subid in 0..mechanisms { + let netuid_index = Self::get_mechanism_storage_index(netuid, subid.into()); + LastUpdate::::remove(netuid_index); + Incentive::::remove(netuid_index); + let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + let _ = TimelockedWeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + let _ = CRV3WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + let _ = CRV3WeightCommitsV2::::clear_prefix(netuid_index, u32::MAX, None); + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + } + RevealPeriodEpochs::::remove(netuid); + MechanismCountCurrent::::remove(netuid); + MechanismEmissionSplit::::remove(netuid); - // --- 12. Remove subnet identity if it exists. + // Last hotkey swap (DMAP where netuid is FIRST key → easy) + let _ = LastHotkeySwapOnNetuid::::clear_prefix(netuid, u32::MAX, None); + + // --- 20. Identity maps across versions (netuid-scoped). + SubnetIdentities::::remove(netuid); + SubnetIdentitiesV2::::remove(netuid); if SubnetIdentitiesV3::::contains_key(netuid) { SubnetIdentitiesV3::::remove(netuid); Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); } + + // --- 21. DMAP / NMAP where netuid is NOT the first key → iterate & remove. + + // ChildkeyTake: (hot, netuid) → u16 + { + let to_rm: sp_std::vec::Vec = ChildkeyTake::::iter() + .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) + .collect(); + for hot in to_rm { + ChildkeyTake::::remove(&hot, netuid); + } + } + // ChildKeys: (parent, netuid) → Vec<...> + { + let to_rm: sp_std::vec::Vec = ChildKeys::::iter() + .filter_map(|(parent, n, _)| if n == netuid { Some(parent) } else { None }) + .collect(); + for parent in to_rm { + ChildKeys::::remove(&parent, netuid); + } + } + // ParentKeys: (child, netuid) → Vec<...> + { + let to_rm: sp_std::vec::Vec = ParentKeys::::iter() + .filter_map(|(child, n, _)| if n == netuid { Some(child) } else { None }) + .collect(); + for child in to_rm { + ParentKeys::::remove(&child, netuid); + } + } + // LastHotkeyEmissionOnNetuid: (hot, netuid) → α + { + let to_rm: sp_std::vec::Vec = LastHotkeyEmissionOnNetuid::::iter() + .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) + .collect(); + for hot in to_rm { + LastHotkeyEmissionOnNetuid::::remove(&hot, netuid); + } + } + // TotalHotkeyAlphaLastEpoch: (hot, netuid) → ... + // (TotalHotkeyAlpha and TotalHotkeyShares were already removed during dissolve.) + { + let to_rm_alpha_last: sp_std::vec::Vec = + TotalHotkeyAlphaLastEpoch::::iter() + .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) + .collect(); + for hot in to_rm_alpha_last { + TotalHotkeyAlphaLastEpoch::::remove(&hot, netuid); + } + } + // TransactionKeyLastBlock NMAP: (hot, netuid, name) → u64 + { + let to_rm: sp_std::vec::Vec<(T::AccountId, u16)> = TransactionKeyLastBlock::::iter() + .filter_map( + |((hot, n, name), _)| if n == netuid { Some((hot, name)) } else { None }, + ) + .collect(); + for (hot, name) in to_rm { + TransactionKeyLastBlock::::remove((hot, netuid, name)); + } + } + // StakingOperationRateLimiter NMAP: (hot, cold, netuid) → bool + { + let to_rm: sp_std::vec::Vec<(T::AccountId, T::AccountId)> = + StakingOperationRateLimiter::::iter() + .filter_map( + |((hot, cold, n), _)| { + if n == netuid { Some((hot, cold)) } else { None } + }, + ) + .collect(); + for (hot, cold) in to_rm { + StakingOperationRateLimiter::::remove((hot, cold, netuid)); + } + } + + // --- 22. Subnet leasing: remove mapping and any lease-scoped state linked to this netuid. + if let Some(lease_id) = SubnetUidToLeaseId::::take(netuid) { + SubnetLeases::::remove(lease_id); + let _ = SubnetLeaseShares::::clear_prefix(lease_id, u32::MAX, None); + AccumulatedLeaseDividends::::remove(lease_id); + } + + // --- Final removal logging. + log::debug!( + "remove_network: netuid={netuid}, owner={owner_coldkey:?} removed successfully" + ); } #[allow(clippy::arithmetic_side_effects)] @@ -594,4 +750,38 @@ impl Pallet { pub fn remove_rate_limited_last_block(rate_limit_key: &RateLimitKey) { LastRateLimitedBlock::::remove(rate_limit_key); } + + pub fn get_network_to_prune() -> Option { + let current_block: u64 = Self::get_current_block_as_u64(); + + let mut candidate_netuid: Option = None; + let mut candidate_price: U96F32 = U96F32::saturating_from_num(u128::MAX); + let mut candidate_timestamp: u64 = u64::MAX; + + for (netuid, added) in NetworksAdded::::iter() { + if !added || netuid == NetUid::ROOT { + continue; + } + + let registered_at = NetworkRegisteredAt::::get(netuid); + + // Skip immune networks. + if current_block < registered_at.saturating_add(Self::get_network_immunity_period()) { + continue; + } + + let price: U96F32 = Self::get_moving_alpha_price(netuid); + + // If tie on price, earliest registration wins. + if price < candidate_price + || (price == candidate_price && registered_at < candidate_timestamp) + { + candidate_netuid = Some(netuid); + candidate_price = price; + candidate_timestamp = registered_at; + } + } + + candidate_netuid + } } diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 7651a4162f..3f2f715df6 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -85,7 +85,7 @@ impl Pallet { ); if price_i < tao_in_ratio { tao_in_i = price_i.saturating_mul(U96F32::saturating_from_num(block_emission)); - alpha_in_i = alpha_emission_i; + alpha_in_i = block_emission; let difference_tao: U96F32 = default_tao_in_i.saturating_sub(tao_in_i); // Difference becomes buy. let buy_swap_result = Self::swap_tao_for_alpha( @@ -428,7 +428,7 @@ impl Pallet { (prop_alpha_dividends, tao_dividends) } - fn get_immune_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + fn get_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { // Gather (block, uid, hotkey) only for hotkeys that have a UID and a registration block. let mut triples: Vec<(u64, u16, T::AccountId)> = OwnedHotkeys::::get(coldkey) .into_iter() @@ -445,28 +445,19 @@ impl Pallet { // Recent registration is priority so that we can let older keys expire (get non-immune) triples.sort_by(|(b1, u1, _), (b2, u2, _)| b2.cmp(b1).then(u1.cmp(u2))); - // Keep first ImmuneOwnerUidsLimit - let limit = ImmuneOwnerUidsLimit::::get(netuid).into(); - if triples.len() > limit { - triples.truncate(limit); - } - // Project to just hotkeys - let mut immune_hotkeys: Vec = + let mut owner_hotkeys: Vec = triples.into_iter().map(|(_, _, hk)| hk).collect(); // Insert subnet owner hotkey in the beginning of the list if valid and not // already present if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) { - if Uids::::get(netuid, &owner_hk).is_some() && !immune_hotkeys.contains(&owner_hk) { - immune_hotkeys.insert(0, owner_hk); - if immune_hotkeys.len() > limit { - immune_hotkeys.truncate(limit); - } + if Uids::::get(netuid, &owner_hk).is_some() && !owner_hotkeys.contains(&owner_hk) { + owner_hotkeys.insert(0, owner_hk); } } - immune_hotkeys + owner_hotkeys } pub fn distribute_dividends_and_incentives( @@ -498,7 +489,7 @@ impl Pallet { // Distribute mining incentives. let subnet_owner_coldkey = SubnetOwner::::get(netuid); - let owner_hotkeys = Self::get_immune_owner_hotkeys(netuid, &subnet_owner_coldkey); + let owner_hotkeys = Self::get_owner_hotkeys(netuid, &subnet_owner_coldkey); log::debug!("incentives: owner hotkeys: {owner_hotkeys:?}"); for (hotkey, incentive) in incentives { log::debug!("incentives: hotkey: {incentive:?}"); @@ -508,6 +499,17 @@ impl Pallet { log::debug!( "incentives: hotkey: {hotkey:?} is SN owner hotkey or associated hotkey, skipping {incentive:?}" ); + // Check if we should recycle or burn the incentive + match RecycleOrBurn::::try_get(netuid) { + Ok(RecycleOrBurnEnum::Recycle) => { + log::debug!("recycling {incentive:?}"); + Self::recycle_subnet_alpha(netuid, incentive); + } + Ok(RecycleOrBurnEnum::Burn) | Err(_) => { + log::debug!("burning {incentive:?}"); + Self::burn_subnet_alpha(netuid, incentive); + } + } continue; } @@ -657,7 +659,7 @@ impl Pallet { // Run the epoch. let hotkey_emission: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> = - Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); + Self::epoch_with_mechanisms(netuid, pending_alpha.saturating_add(pending_swapped)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Compute the pending validator alpha. diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 11930bf26e..2cbbfae77d 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -1,96 +1,79 @@ // we get a compiler warning for this , even though the trait is used in the // quantile function. use crate::alloc::borrow::ToOwned; -#[allow(unused)] -use num_traits::float::Float; use safe_math::*; -use sp_runtime::traits::{CheckedAdd, Saturating}; -use sp_std::cmp::Ordering; +use sp_runtime::traits::CheckedAdd; use sp_std::vec; use substrate_fixed::transcendental::{exp, ln}; use substrate_fixed::types::{I32F32, I64F64}; -// TODO: figure out what cfg gate this needs to not be a warning in rustc -#[allow(unused)] use sp_std::vec::Vec; -#[allow(dead_code)] +pub fn get_safe(slice: &[T], idx: usize) -> T { + slice.get(idx).copied().unwrap_or_default() +} + pub fn fixed(val: f32) -> I32F32 { I32F32::saturating_from_num(val) } -#[allow(dead_code)] pub fn fixed_to_u16(x: I32F32) -> u16 { x.saturating_to_num::() } -#[allow(dead_code)] pub fn fixed_to_u64(x: I32F32) -> u64 { x.saturating_to_num::() } -#[allow(dead_code)] pub fn fixed64_to_u64(x: I64F64) -> u64 { x.saturating_to_num::() } -#[allow(dead_code)] pub fn fixed64_to_fixed32(x: I64F64) -> I32F32 { I32F32::saturating_from_num(x) } -#[allow(dead_code)] pub fn fixed32_to_fixed64(x: I32F32) -> I64F64 { I64F64::saturating_from_num(x) } -#[allow(dead_code)] pub fn u16_to_fixed(x: u16) -> I32F32 { I32F32::saturating_from_num(x) } -#[allow(dead_code)] pub fn u16_proportion_to_fixed(x: u16) -> I32F32 { I32F32::saturating_from_num(x).safe_div(I32F32::saturating_from_num(u16::MAX)) } -#[allow(dead_code)] pub fn fixed_to_fixed_u16_proportion(x: I32F32) -> I32F32 { x.safe_div(I32F32::saturating_from_num(u16::MAX)) } -#[allow(dead_code)] pub fn fixed_proportion_to_u16(x: I32F32) -> u16 { fixed_to_u16(x.saturating_mul(I32F32::saturating_from_num(u16::MAX))) } -#[allow(dead_code)] pub fn vec_fixed32_to_u64(vec: Vec) -> Vec { vec.into_iter().map(fixed_to_u64).collect() } -#[allow(dead_code)] pub fn vec_fixed64_to_fixed32(vec: Vec) -> Vec { vec.into_iter().map(fixed64_to_fixed32).collect() } -#[allow(dead_code)] pub fn vec_fixed32_to_fixed64(vec: Vec) -> Vec { vec.into_iter().map(fixed32_to_fixed64).collect() } -#[allow(dead_code)] pub fn vec_fixed64_to_u64(vec: Vec) -> Vec { vec.into_iter().map(fixed64_to_u64).collect() } -#[allow(dead_code)] pub fn vec_fixed_proportions_to_u16(vec: Vec) -> Vec { vec.into_iter().map(fixed_proportion_to_u16).collect() } -#[allow(dead_code)] // Max-upscale vector and convert to u16 so max_value = u16::MAX. Assumes non-negative normalized input. pub fn vec_max_upscale_to_u16(vec: &[I32F32]) -> Vec { let u16_max: I32F32 = I32F32::saturating_from_num(u16::MAX); @@ -136,7 +119,6 @@ pub fn vec_max_upscale_to_u16(vec: &[I32F32]) -> Vec { } } -#[allow(dead_code)] // Max-upscale u16 vector and convert to u16 so max_value = u16::MAX. Assumes u16 vector input. pub fn vec_u16_max_upscale_to_u16(vec: &[u16]) -> Vec { let vec_fixed: Vec = vec @@ -146,7 +128,6 @@ pub fn vec_u16_max_upscale_to_u16(vec: &[u16]) -> Vec { vec_max_upscale_to_u16(&vec_fixed) } -#[allow(dead_code)] // Checks if u16 vector, when normalized, has a max value not greater than a u16 ratio max_limit. pub fn check_vec_max_limited(vec: &[u16], max_limit: u16) -> bool { let max_limit_fixed: I32F32 = @@ -160,12 +141,10 @@ pub fn check_vec_max_limited(vec: &[u16], max_limit: u16) -> bool { max_value.is_none_or(|v| *v <= max_limit_fixed) } -#[allow(dead_code)] pub fn sum(x: &[I32F32]) -> I32F32 { x.iter().sum() } -#[allow(dead_code)] // Sums a Vector of type that has CheckedAdd trait. // Returns None if overflow occurs during sum using T::checked_add. // Returns Some(T::default()) if input vector is empty. @@ -184,14 +163,12 @@ where } // Return true when vector sum is zero. -#[allow(dead_code)] pub fn is_zero(vector: &[I32F32]) -> bool { let vector_sum: I32F32 = sum(vector); vector_sum == I32F32::saturating_from_num(0) } // Exp safe function with I32F32 output of I32F32 input. -#[allow(dead_code)] pub fn exp_safe(input: I32F32) -> I32F32 { let min_input: I32F32 = I32F32::saturating_from_num(-20); // <= 1/exp(-20) = 485 165 195,4097903 let max_input: I32F32 = I32F32::saturating_from_num(20); // <= exp(20) = 485 165 195,4097903 @@ -218,7 +195,6 @@ pub fn exp_safe(input: I32F32) -> I32F32 { } // Sigmoid safe function with I32F32 output of I32F32 input with offset kappa and (recommended) scaling 0 < rho <= 40. -#[allow(dead_code)] pub fn sigmoid_safe(input: I32F32, rho: I32F32, kappa: I32F32) -> I32F32 { let one: I32F32 = I32F32::saturating_from_num(1); let offset: I32F32 = input.saturating_sub(kappa); // (input - kappa) @@ -231,7 +207,6 @@ pub fn sigmoid_safe(input: I32F32, rho: I32F32, kappa: I32F32) -> I32F32 { } // Returns a bool vector where an item is true if the vector item is in topk values. -#[allow(dead_code, clippy::indexing_slicing)] pub fn is_topk(vector: &[I32F32], k: usize) -> Vec { let n: usize = vector.len(); let mut result: Vec = vec![true; n]; @@ -239,15 +214,16 @@ pub fn is_topk(vector: &[I32F32], k: usize) -> Vec { return result; } let mut idxs: Vec = (0..n).collect(); - idxs.sort_by_key(|&idx| &vector[idx]); // ascending stable sort + idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort for &idx in idxs.iter().take(n.saturating_sub(k)) { - result[idx] = false; + if let Some(cell) = result.get_mut(idx) { + *cell = false; + } } result } // Returns a bool vector where an item is true if the vector item is in topk values and is non-zero. -#[allow(dead_code, clippy::indexing_slicing)] pub fn is_topk_nonzero(vector: &[I32F32], k: usize) -> Vec { let n: usize = vector.len(); let mut result: Vec = vector.iter().map(|&elem| elem != I32F32::from(0)).collect(); @@ -255,15 +231,16 @@ pub fn is_topk_nonzero(vector: &[I32F32], k: usize) -> Vec { return result; } let mut idxs: Vec = (0..n).collect(); - idxs.sort_by_key(|&idx| &vector[idx]); // ascending stable sort + idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort for &idx in idxs.iter().take(n.saturating_sub(k)) { - result[idx] = false; + if let Some(cell) = result.get_mut(idx) { + *cell = false; + } } result } // Returns a normalized (sum to 1 except 0) copy of the input vector. -#[allow(dead_code)] pub fn normalize(x: &[I32F32]) -> Vec { let x_sum: I32F32 = sum(x); if x_sum != I32F32::saturating_from_num(0.0_f32) { @@ -274,7 +251,6 @@ pub fn normalize(x: &[I32F32]) -> Vec { } // Normalizes (sum to 1 except 0) the input vector directly in-place. -#[allow(dead_code)] pub fn inplace_normalize(x: &mut [I32F32]) { let x_sum: I32F32 = x.iter().sum(); if x_sum == I32F32::saturating_from_num(0.0_f32) { @@ -285,7 +261,6 @@ pub fn inplace_normalize(x: &mut [I32F32]) { } // Normalizes (sum to 1 except 0) the input vector directly in-place, using the sum arg. -#[allow(dead_code)] pub fn inplace_normalize_using_sum(x: &mut [I32F32], x_sum: I32F32) { if x_sum == I32F32::saturating_from_num(0.0_f32) { return; @@ -295,7 +270,6 @@ pub fn inplace_normalize_using_sum(x: &mut [I32F32], x_sum: I32F32) { } // Normalizes (sum to 1 except 0) the I64F64 input vector directly in-place. -#[allow(dead_code)] pub fn inplace_normalize_64(x: &mut [I64F64]) { let x_sum: I64F64 = x.iter().sum(); if x_sum == I64F64::saturating_from_num(0) { @@ -306,7 +280,6 @@ pub fn inplace_normalize_64(x: &mut [I64F64]) { } /// Normalizes (sum to 1 except 0) each row (dim=0) of a I64F64 matrix in-place. -#[allow(dead_code)] pub fn inplace_row_normalize_64(x: &mut [Vec]) { for row in x { let row_sum: I64F64 = row.iter().sum(); @@ -318,23 +291,26 @@ pub fn inplace_row_normalize_64(x: &mut [Vec]) { } /// Returns x / y for input vectors x and y, if y == 0 return 0. -#[allow(dead_code)] pub fn vecdiv(x: &[I32F32], y: &[I32F32]) -> Vec { - assert_eq!(x.len(), y.len()); - x.iter() - .zip(y) - .map(|(x_i, y_i)| { - if *y_i != 0 { - x_i.safe_div(*y_i) - } else { - I32F32::saturating_from_num(0) - } - }) - .collect() + if x.len() != y.len() { + log::error!( + "math error: vecdiv input lengths are not equal: {:?} != {:?}", + x.len(), + y.len() + ); + } + + let zero = I32F32::saturating_from_num(0); + + let mut out = Vec::with_capacity(x.len()); + for (i, x_i) in x.iter().enumerate() { + let y_i = y.get(i).copied().unwrap_or(zero); + out.push(x_i.safe_div(y_i)); + } + out } // Normalizes (sum to 1 except 0) each row (dim=0) of a matrix in-place. -#[allow(dead_code)] pub fn inplace_row_normalize(x: &mut [Vec]) { for row in x { let row_sum: I32F32 = row.iter().sum(); @@ -346,7 +322,6 @@ pub fn inplace_row_normalize(x: &mut [Vec]) { } // Normalizes (sum to 1 except 0) each row (dim=0) of a sparse matrix in-place. -#[allow(dead_code)] pub fn inplace_row_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>]) { for sparse_row in sparse_matrix.iter_mut() { let row_sum: I32F32 = sparse_row.iter().map(|(_j, value)| *value).sum(); @@ -359,7 +334,6 @@ pub fn inplace_row_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>]) { } // Sum across each row (dim=0) of a matrix. -#[allow(dead_code)] pub fn row_sum(x: &[Vec]) -> Vec { if let Some(first_row) = x.first() { if first_row.is_empty() { @@ -370,7 +344,6 @@ pub fn row_sum(x: &[Vec]) -> Vec { } // Sum across each row (dim=0) of a sparse matrix. -#[allow(dead_code)] pub fn row_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec { sparse_matrix .iter() @@ -378,213 +351,234 @@ pub fn row_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec { .collect() } -// Sum across each column (dim=1) of a matrix. -#[allow(dead_code)] -pub fn col_sum(x: &[Vec]) -> Vec { - let Some(first_row) = x.first() else { - return vec![]; - }; - let cols = first_row.len(); - if cols == 0 { - return vec![]; - } - x.iter().fold( - vec![I32F32::saturating_from_num(0); cols], - |acc, next_row| { - acc.into_iter() - .zip(next_row) - .map(|(acc_elem, next_elem)| acc_elem.saturating_add(*next_elem)) - .collect() - }, - ) -} - -// Sum across each column (dim=1) of a sparse matrix. -#[allow(dead_code, clippy::indexing_slicing)] -pub fn col_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>], columns: u16) -> Vec { - let mut result: Vec = vec![I32F32::saturating_from_num(0); columns as usize]; - for sparse_row in sparse_matrix { - for (j, value) in sparse_row { - result[*j as usize] = result[*j as usize].saturating_add(*value); - } - } - result -} - // Normalizes (sum to 1 except 0) each column (dim=1) of a sparse matrix in-place. -#[allow(dead_code, clippy::indexing_slicing)] pub fn inplace_col_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>], columns: u16) { - let mut col_sum: Vec = vec![I32F32::saturating_from_num(0.0); columns as usize]; // assume square matrix, rows=cols + let zero = I32F32::saturating_from_num(0.0); + let mut col_sum: Vec = vec![zero; columns as usize]; + + // Pass 1: accumulate column sums. for sparse_row in sparse_matrix.iter() { - for (j, value) in sparse_row.iter() { - col_sum[*j as usize] = col_sum[*j as usize].saturating_add(*value); + for &(j, value) in sparse_row.iter() { + if let Some(sum) = col_sum.get_mut(j as usize) { + *sum = sum.saturating_add(value); + } } } - for sparse_row in sparse_matrix { - for (j, value) in sparse_row { - if col_sum[*j as usize] == I32F32::saturating_from_num(0.0_f32) { - continue; + + // Pass 2: normalize by column sums where non-zero. + for sparse_row in sparse_matrix.iter_mut() { + for (j, value) in sparse_row.iter_mut() { + let denom = col_sum.get(*j as usize).copied().unwrap_or(zero); + if denom != zero { + *value = value.safe_div(denom); } - *value = value.safe_div(col_sum[*j as usize]); } } } // Normalizes (sum to 1 except 0) each column (dim=1) of a matrix in-place. -#[allow(dead_code)] +// If a row is shorter/longer than the accumulator, pad with zeroes accordingly. pub fn inplace_col_normalize(x: &mut [Vec]) { - let Some(first_row) = x.first() else { - return; - }; - if first_row.is_empty() { + let zero = I32F32::saturating_from_num(0.0); + + // Build column sums; treat missing entries as zero, but don't modify rows. + let mut col_sums: Vec = Vec::new(); + for row in x.iter() { + if col_sums.len() < row.len() { + col_sums.resize(row.len(), zero); + } + let mut sums_it = col_sums.iter_mut(); + for v in row.iter() { + if let Some(sum) = sums_it.next() { + *sum = sum.saturating_add(*v); + } else { + break; + } + } + } + + if col_sums.is_empty() { return; } - let cols = first_row.len(); - let col_sums = x - .iter_mut() - .fold(vec![I32F32::saturating_from_num(0.0); cols], |acc, row| { - row.iter_mut() - .zip(acc) - .map(|(&mut m_val, acc_val)| acc_val.saturating_add(m_val)) - .collect() - }); - x.iter_mut().for_each(|row| { - row.iter_mut() - .zip(&col_sums) - .filter(|(_, col_sum)| **col_sum != I32F32::saturating_from_num(0_f32)) - .for_each(|(m_val, col_sum)| { - *m_val = m_val.safe_div(*col_sum); - }); - }); + + // Normalize only existing elements in each row. + for row in x.iter_mut() { + let mut sums_it = col_sums.iter(); + for m in row.iter_mut() { + if let Some(sum) = sums_it.next() { + if *sum != zero { + *m = m.safe_div(*sum); + } + } else { + break; + } + } + } } // Max-upscale each column (dim=1) of a sparse matrix in-place. -#[allow(dead_code, clippy::indexing_slicing)] pub fn inplace_col_max_upscale_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>], columns: u16) { - let mut col_max: Vec = vec![I32F32::saturating_from_num(0.0); columns as usize]; // assume square matrix, rows=cols + let zero = I32F32::saturating_from_num(0.0); + let mut col_max: Vec = vec![zero; columns as usize]; + + // Pass 1: compute per-column max for sparse_row in sparse_matrix.iter() { for (j, value) in sparse_row.iter() { - if col_max[*j as usize] < *value { - col_max[*j as usize] = *value; + if let Some(m) = col_max.get_mut(*j as usize) { + if *m < *value { + *m = *value; + } } } } - for sparse_row in sparse_matrix { - for (j, value) in sparse_row { - if col_max[*j as usize] == I32F32::saturating_from_num(0.0_f32) { - continue; + + // Pass 2: divide each nonzero entry by its column max + for sparse_row in sparse_matrix.iter_mut() { + for (j, value) in sparse_row.iter_mut() { + let m = col_max.get(*j as usize).copied().unwrap_or(zero); + if m != zero { + *value = value.safe_div(m); } - *value = value.safe_div(col_max[*j as usize]); } } } // Max-upscale each column (dim=1) of a matrix in-place. -#[allow(dead_code)] pub fn inplace_col_max_upscale(x: &mut [Vec]) { - let Some(first_row) = x.first() else { - return; - }; - if first_row.is_empty() { + let zero = I32F32::saturating_from_num(0.0); + + // Find the widest row to size the column-max buffer; don't modify rows. + let max_cols = x.iter().map(|r| r.len()).max().unwrap_or(0); + if max_cols == 0 { return; } - let cols = first_row.len(); - let col_maxes = x.iter_mut().fold( - vec![I32F32::saturating_from_num(0_f32); cols], - |acc, row| { - row.iter_mut() - .zip(acc) - .map(|(m_val, acc_val)| acc_val.max(*m_val)) - .collect() - }, - ); - x.iter_mut().for_each(|row| { - row.iter_mut() - .zip(&col_maxes) - .filter(|(_, col_max)| **col_max != I32F32::saturating_from_num(0)) - .for_each(|(m_val, col_max)| { - *m_val = m_val.safe_div(*col_max); - }); - }); + + // Pass 1: compute per-column maxima across existing entries only. + let mut col_maxes = vec![zero; max_cols]; + for row in x.iter() { + let mut max_it = col_maxes.iter_mut(); + for v in row.iter() { + if let Some(m) = max_it.next() { + if *m < *v { + *m = *v; + } + } else { + break; + } + } + } + + // Pass 2: divide each existing entry by its column max (if non-zero). + for row in x.iter_mut() { + let mut max_it = col_maxes.iter(); + for val in row.iter_mut() { + if let Some(&m) = max_it.next() { + if m != zero { + *val = val.safe_div(m); + } + } else { + break; + } + } + } } // Apply mask to vector, mask=true will mask out, i.e. set to 0. -#[allow(dead_code)] pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { + if mask.len() != vector.len() { + log::error!( + "math error: inplace_mask_vector input lengths are not equal: {:?} != {:?}", + mask.len(), + vector.len() + ); + } + if mask.is_empty() { return; } - assert_eq!(mask.len(), vector.len()); let zero: I32F32 = I32F32::saturating_from_num(0.0); - mask.iter() - .zip(vector) - .filter(|(m, _)| **m) - .for_each(|(_, v_elem)| { - *v_elem = zero; - }); + for (i, v) in vector.iter_mut().enumerate() { + if *mask.get(i).unwrap_or(&true) { + *v = zero; + } + } } // Apply mask to matrix, mask=true will mask out, i.e. set to 0. -#[allow(dead_code)] -pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut Vec>) { +pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut [Vec]) { + if mask.len() != matrix.len() { + log::error!( + "math error: inplace_mask_matrix input sizes are not equal: {:?} != {:?}", + mask.len(), + matrix.len() + ); + } let Some(first_row) = mask.first() else { return; }; if first_row.is_empty() { return; } - assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::saturating_from_num(0.0); - mask.iter().zip(matrix).for_each(|(mask_row, matrix_row)| { - mask_row - .iter() - .zip(matrix_row) - .filter(|(mask_elem, _)| **mask_elem) - .for_each(|(_, matrix_elem)| { - *matrix_elem = zero; - }); - }); + for (r, row) in matrix.iter_mut().enumerate() { + let mask_row_opt = mask.get(r); + for (c, val) in row.iter_mut().enumerate() { + let should_zero = mask_row_opt + .and_then(|mr| mr.get(c)) + .copied() + .unwrap_or(true); + if should_zero { + *val = zero; + } + } + } } // Apply row mask to matrix, mask=true will mask out, i.e. set to 0. -#[allow(dead_code)] pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { + if mask.len() != matrix.len() { + log::error!( + "math error: inplace_mask_rows input sizes are not equal: {:?} != {:?}", + mask.len(), + matrix.len() + ); + } let Some(first_row) = matrix.first() else { return; }; let cols = first_row.len(); - assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::saturating_from_num(0); - matrix - .iter_mut() - .zip(mask) - .for_each(|(row_elem, mask_row)| { - if *mask_row { - *row_elem = vec![zero; cols]; - } - }); + for (r, row) in matrix.iter_mut().enumerate() { + if mask.get(r).copied().unwrap_or(true) { + *row = vec![zero; cols]; + } + } } // Apply column mask to matrix, mask=true will mask out, i.e. set to 0. // Assumes each column has the same length. -#[allow(dead_code)] pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec]) { - let Some(first_row) = matrix.first() else { + if mask.len() != matrix.len() { + log::error!( + "math error: inplace_mask_cols input sizes are not equal: {:?} != {:?}", + mask.len(), + matrix.len() + ); + } + if matrix.is_empty() { return; }; - assert_eq!(mask.len(), first_row.len()); let zero: I32F32 = I32F32::saturating_from_num(0); - matrix.iter_mut().for_each(|row_elem| { - row_elem.iter_mut().zip(mask).for_each(|(elem, mask_col)| { - if *mask_col { + for row in matrix.iter_mut() { + for (c, elem) in row.iter_mut().enumerate() { + if mask.get(c).copied().unwrap_or(true) { *elem = zero; } - }); - }); + } + } } // Mask out the diagonal of the input matrix in-place. -#[allow(dead_code)] pub fn inplace_mask_diag(matrix: &mut [Vec]) { let Some(first_row) = matrix.first() else { return; @@ -592,7 +586,18 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { if first_row.is_empty() { return; } - assert_eq!(matrix.len(), first_row.len()); + // Weights that we use this function for are always a square matrix. + // If something not square is passed to this function, it's safe to return + // with no action. Log error if this happens. + if matrix.len() != first_row.len() { + log::error!( + "math error: inplace_mask_diag: matrix.len {:?} != first_row.len {:?}", + matrix.len(), + first_row.len() + ); + return; + } + let zero: I32F32 = I32F32::saturating_from_num(0.0); matrix.iter_mut().enumerate().for_each(|(idx, row)| { let Some(elem) = row.get_mut(idx) else { @@ -604,27 +609,29 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { } // Remove cells from sparse matrix where the mask function of a scalar and a vector is true. -#[allow(dead_code, clippy::indexing_slicing)] pub fn scalar_vec_mask_sparse_matrix( sparse_matrix: &[Vec<(u16, I32F32)>], scalar: u64, vector: &[u64], mask_fn: &dyn Fn(u64, u64) -> bool, ) -> Vec> { - let n: usize = sparse_matrix.len(); - let mut result: Vec> = vec![vec![]; n]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row { - if !mask_fn(scalar, vector[*j as usize]) { - result[i].push((*j, *value)); + let mut result: Vec> = Vec::with_capacity(sparse_matrix.len()); + + for row in sparse_matrix.iter() { + let mut out_row: Vec<(u16, I32F32)> = Vec::with_capacity(row.len()); + for &(j, value) in row.iter() { + let vj = vector.get(j as usize).copied().unwrap_or(0); + if !mask_fn(scalar, vj) { + out_row.push((j, value)); } } + result.push(out_row); } + result } // Mask out the diagonal of the input matrix in-place, except for the diagonal entry at except_index. -#[allow(dead_code)] pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: u16) { let Some(first_row) = matrix.first() else { return; @@ -632,8 +639,14 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: if first_row.is_empty() { return; } - assert_eq!(matrix.len(), first_row.len()); - + if matrix.len() != first_row.len() { + log::error!( + "math error: inplace_mask_diag input matrix is now square: {:?} != {:?}", + matrix.len(), + first_row.len() + ); + return; + } let diag_at_index = matrix .get(except_index as usize) .and_then(|row| row.get(except_index as usize)) @@ -651,26 +664,22 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: } // Return a new sparse matrix that replaces masked rows with an empty vector placeholder. -#[allow(dead_code)] pub fn mask_rows_sparse( mask: &[bool], sparse_matrix: &[Vec<(u16, I32F32)>], ) -> Vec> { - assert_eq!(sparse_matrix.len(), mask.len()); - mask.iter() - .zip(sparse_matrix) - .map(|(mask_elem, sparse_row)| { - if *mask_elem { - vec![] - } else { - sparse_row.clone() - } - }) - .collect() + let mut out = Vec::with_capacity(sparse_matrix.len()); + for (i, sparse_row) in sparse_matrix.iter().enumerate() { + if mask.get(i).copied().unwrap_or(true) { + out.push(Vec::new()); + } else { + out.push(sparse_row.clone()); + } + } + out } // Return a new sparse matrix with a masked out diagonal of input sparse matrix. -#[allow(dead_code)] pub fn mask_diag_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec> { sparse_matrix .iter() @@ -687,7 +696,6 @@ pub fn mask_diag_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec], except_index: u16, @@ -709,27 +717,29 @@ pub fn mask_diag_sparse_except_index( } // Remove cells from sparse matrix where the mask function of two vectors is true. -#[allow(dead_code, clippy::indexing_slicing)] pub fn vec_mask_sparse_matrix( sparse_matrix: &[Vec<(u16, I32F32)>], first_vector: &[u64], second_vector: &[u64], mask_fn: &dyn Fn(u64, u64) -> bool, ) -> Vec> { - let n: usize = sparse_matrix.len(); - let mut result: Vec> = vec![vec![]; n]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row { - if !mask_fn(first_vector[i], second_vector[*j as usize]) { - result[i].push((*j, *value)); + let mut result: Vec> = Vec::with_capacity(sparse_matrix.len()); + let mut fv_it = first_vector.iter(); + for row in sparse_matrix.iter() { + let fv = fv_it.next().copied().unwrap_or(0); + let mut out_row: Vec<(u16, I32F32)> = Vec::with_capacity(row.len()); + for &(j, val) in row.iter() { + let sv = second_vector.get(j as usize).copied().unwrap_or(0); + if !mask_fn(fv, sv) { + out_row.push((j, val)); } } + result.push(out_row); } result } // Row-wise matrix-vector hadamard product. -#[allow(dead_code)] pub fn row_hadamard(matrix: &[Vec], vector: &[I32F32]) -> Vec> { let Some(first_row) = matrix.first() else { return vec![vec![]]; @@ -737,37 +747,43 @@ pub fn row_hadamard(matrix: &[Vec], vector: &[I32F32]) -> Vec], vector: &[I32F32], ) -> Vec> { - sparse_matrix - .iter() - .zip(vector) - .map(|(sparse_row, vec_val)| { - sparse_row - .iter() - .map(|(j, value)| (*j, value.saturating_mul(*vec_val))) - .collect() - }) - .collect() + let mut out = Vec::with_capacity(sparse_matrix.len()); + let mut vec_it = vector.iter(); + + for sparse_row in sparse_matrix.iter() { + let Some(&scale) = vec_it.next() else { break }; + let mut new_row = Vec::with_capacity(sparse_row.len()); + for &(j, val) in sparse_row.iter() { + new_row.push((j, val.saturating_mul(scale))); + } + out.push(new_row); + } + + out } // Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. -#[allow(dead_code)] pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { let Some(first_row) = matrix.first() else { return vec![]; @@ -776,52 +792,37 @@ pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { if cols == 0 { return vec![]; } - assert!(matrix.len() == vector.len()); - matrix.iter().zip(vector).fold( - vec![I32F32::saturating_from_num(0_f32); cols], - |acc, (row, vec_val)| { - row.iter() - .zip(acc) - .map(|(m_val, acc_val)| { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - acc_val.saturating_add(vec_val.saturating_mul(*m_val)) - }) - .collect() - }, - ) -} + if matrix.len() != vector.len() { + log::error!( + "math error: matmul input sizes are not equal: {:?} != {:?}", + matrix.len(), + vector.len() + ); + } -// Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. -#[allow(dead_code)] -pub fn matmul_64(matrix: &[Vec], vector: &[I64F64]) -> Vec { - let Some(first_row) = matrix.first() else { - return vec![]; - }; - let cols = first_row.len(); - if cols == 0 { - return vec![]; + let zero = I32F32::saturating_from_num(0.0); + let mut acc = vec![zero; cols]; + + let mut vec_it = vector.iter(); + for row in matrix.iter() { + // Use 0 if the vector ran out (rows beyond vector length contribute nothing). + let scale = vec_it.next().copied().unwrap_or(zero); + + let mut acc_it = acc.iter_mut(); + for m_val in row.iter() { + if let Some(a) = acc_it.next() { + *a = a.saturating_add(scale.saturating_mul(*m_val)); + } else { + // Ignore elements beyond the accumulator width (first row’s length). + break; + } + } } - assert!(matrix.len() == vector.len()); - matrix.iter().zip(vector).fold( - vec![I64F64::saturating_from_num(0.0); cols], - |acc, (row, vec_val)| { - row.iter() - .zip(acc) - .map(|(m_val, acc_val)| { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - acc_val.saturating_add(vec_val.saturating_mul(*m_val)) - }) - .collect() - }, - ) + + acc } // Column-wise matrix-vector product, row-wise sum: result_i = SUM(j) vector_j * matrix_ij. -#[allow(dead_code)] pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec { let Some(first_row) = matrix.first() else { return vec![]; @@ -829,143 +830,119 @@ pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec], vector: &[I32F32], columns: u16, ) -> Vec { - let mut result: Vec = vec![I32F32::saturating_from_num(0.0); columns as usize]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - result[*j as usize] = - result[*j as usize].saturating_add(vector[i].saturating_mul(*value)); + let zero = I32F32::saturating_from_num(0.0); + let mut result = vec![zero; columns as usize]; + + let mut vec_it = vector.iter(); + for row in sparse_matrix.iter() { + let scale = vec_it.next().copied().unwrap_or(zero); + for &(j, val) in row.iter() { + if let Some(r) = result.get_mut(j as usize) { + *r = r.saturating_add(scale.saturating_mul(val)); + } } } + result } // Column-wise sparse_matrix-vector product, row-wise sum: result_i = SUM(j) vector_j * matrix_ij. -#[allow(dead_code, clippy::indexing_slicing)] pub fn matmul_transpose_sparse( sparse_matrix: &[Vec<(u16, I32F32)>], vector: &[I32F32], ) -> Vec { - let mut result: Vec = vec![I32F32::saturating_from_num(0.0); sparse_matrix.len()]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - // Compute dividends: d_j = SUM(i) b_ji * inc_i - // result_j = SUM(i) vector_i * matrix_ji - // result_i = SUM(j) vector_j * matrix_ij - result[i] = result[i].saturating_add(vector[*j as usize].saturating_mul(*value)); + let zero = I32F32::saturating_from_num(0.0); + let mut result = vec![zero; sparse_matrix.len()]; + + let mut out_it = result.iter_mut(); + for row in sparse_matrix.iter() { + let Some(out_cell) = out_it.next() else { break }; + let mut acc = zero; + for &(j, val) in row.iter() { + let v = vector.get(j as usize).copied().unwrap_or(zero); + acc = acc.saturating_add(v.saturating_mul(val)); } + *out_cell = acc; } + result } // Set inplace matrix values above column threshold to threshold value. -#[allow(dead_code)] pub fn inplace_col_clip(x: &mut [Vec], col_threshold: &[I32F32]) { - x.iter_mut().for_each(|row| { - row.iter_mut() - .zip(col_threshold) - .for_each(|(value, threshold)| { - *value = *threshold.min(value); - }); - }); + for row in x.iter_mut() { + let mut thr_it = col_threshold.iter(); + for value in row.iter_mut() { + if let Some(th) = thr_it.next() { + // Clip: value = min(value, threshold) + *value = *th.min(&*value); + } else { + // No more thresholds; stop for this row. + break; + } + } + } } // Return sparse matrix with values above column threshold set to threshold value. -#[allow(dead_code, clippy::indexing_slicing)] pub fn col_clip_sparse( sparse_matrix: &[Vec<(u16, I32F32)>], col_threshold: &[I32F32], ) -> Vec> { - let mut result: Vec> = vec![vec![]; sparse_matrix.len()]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - if col_threshold[*j as usize] < *value { - if 0 < col_threshold[*j as usize] { - result[i].push((*j, col_threshold[*j as usize])); + let zero = I32F32::saturating_from_num(0.0); + let mut result = Vec::with_capacity(sparse_matrix.len()); + + for row in sparse_matrix.iter() { + let mut out_row: Vec<(u16, I32F32)> = Vec::with_capacity(row.len()); + for &(j, val) in row.iter() { + let th = col_threshold.get(j as usize).copied().unwrap_or(zero); + if th < val { + if th > zero { + // clip down to threshold, but drop if threshold <= 0 + out_row.push((j, th)); } } else { - result[i].push((*j, *value)); + // keep original + out_row.push((j, val)); } } + result.push(out_row); } - result -} - -// Set matrix values below threshold to lower, and equal-above to upper. -#[allow(dead_code)] -pub fn clip( - x: &[Vec], - threshold: I32F32, - upper: I32F32, - lower: I32F32, -) -> Vec> { - x.iter() - .map(|row| { - row.iter() - .map(|elem| if *elem >= threshold { upper } else { lower }) - .collect() - }) - .collect() -} - -// Set inplace matrix values below threshold to lower, and equal-above to upper. -#[allow(dead_code)] -pub fn inplace_clip(x: &mut [Vec], threshold: I32F32, upper: I32F32, lower: I32F32) { - x.iter_mut().for_each(|row| { - row.iter_mut().for_each(|elem| { - *elem = if *elem >= threshold { upper } else { lower }; - }); - }); -} -// Set sparse matrix values below threshold to lower, and equal-above to upper. -// Does not add missing elements (0 value assumed) when lower!=0. -#[allow(dead_code)] -pub fn clip_sparse( - sparse_matrix: &[Vec<(u16, I32F32)>], - threshold: I32F32, - upper: I32F32, - lower: I32F32, -) -> Vec> { - sparse_matrix - .iter() - .map(|row| { - row.iter() - .map(|(j, value)| { - if *value < threshold { - (*j, lower) - } else { - (*j, upper) - } - }) - .collect() - }) - .collect() + result } // Stake-weighted median score finding algorithm, based on a mid pivot binary search. @@ -995,144 +972,204 @@ pub fn clip_sparse( // * 'median': ( I32F32 ): // - median via random pivot binary search. // -#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median( stake: &[I32F32], score: &[I32F32], partition_idx: &[usize], minority: I32F32, - partition_lo: I32F32, - partition_hi: I32F32, + mut partition_lo: I32F32, + mut partition_hi: I32F32, ) -> I32F32 { - let n = partition_idx.len(); - if n == 0 { - return I32F32::saturating_from_num(0); - } - if n == 1 { - return score[partition_idx[0]]; + let zero = I32F32::saturating_from_num(0.0); + if stake.len() != score.len() { + log::error!( + "math error: weighted_median stake and score have different lengths: {:?} != {:?}", + stake.len(), + score.len() + ); + return zero; } - assert!(stake.len() == score.len()); - let mid_idx: usize = n.safe_div(2); - let pivot: I32F32 = score[partition_idx[mid_idx]]; - let mut lo_stake: I32F32 = I32F32::saturating_from_num(0); - let mut hi_stake: I32F32 = I32F32::saturating_from_num(0); + let mut current_partition_index: Vec = partition_idx.to_vec(); + let mut iteration_counter: usize = 0; + let iteration_limit = partition_idx.len(); let mut lower: Vec = vec![]; let mut upper: Vec = vec![]; - for &idx in partition_idx { - if score[idx] == pivot { - continue; + + loop { + let n = current_partition_index.len(); + if n == 0 { + return zero; } - if score[idx] < pivot { - lo_stake = lo_stake.saturating_add(stake[idx]); - lower.push(idx); - } else { - hi_stake = hi_stake.saturating_add(stake[idx]); - upper.push(idx); + if n == 1 { + if let Some(&only_idx) = current_partition_index.first() { + return get_safe::(score, only_idx); + } else { + return zero; + } } - } - if (partition_lo.saturating_add(lo_stake) <= minority) - && (minority < partition_hi.saturating_sub(hi_stake)) - { - return pivot; - } else if (minority < partition_lo.saturating_add(lo_stake)) && (!lower.is_empty()) { - return weighted_median( - stake, - score, - &lower, - minority, - partition_lo, - partition_lo.saturating_add(lo_stake), - ); - } else if (partition_hi.saturating_sub(hi_stake) <= minority) && (!upper.is_empty()) { - return weighted_median( - stake, + let mid_idx: usize = n.safe_div(2); + let pivot: I32F32 = get_safe::( score, - &upper, - minority, - partition_hi.saturating_sub(hi_stake), - partition_hi, + current_partition_index.get(mid_idx).copied().unwrap_or(0), ); + let mut lo_stake: I32F32 = I32F32::saturating_from_num(0); + let mut hi_stake: I32F32 = I32F32::saturating_from_num(0); + + for idx in current_partition_index.clone() { + if get_safe::(score, idx) == pivot { + continue; + } + if get_safe::(score, idx) < pivot { + lo_stake = lo_stake.saturating_add(get_safe::(stake, idx)); + lower.push(idx); + } else { + hi_stake = hi_stake.saturating_add(get_safe::(stake, idx)); + upper.push(idx); + } + } + if (minority < partition_lo.saturating_add(lo_stake)) && (!lower.is_empty()) { + current_partition_index = lower.clone(); + partition_hi = partition_lo.saturating_add(lo_stake); + } else if (partition_hi.saturating_sub(hi_stake) <= minority) && (!upper.is_empty()) { + current_partition_index = upper.clone(); + partition_lo = partition_hi.saturating_sub(hi_stake); + } else { + return pivot; + } + + lower.clear(); + upper.clear(); + + // Safety limit: We should never need more than iteration_limit iterations. + iteration_counter = iteration_counter.saturating_add(1); + if iteration_counter > iteration_limit { + break; + } } - pivot + zero } /// Column-wise weighted median, e.g. stake-weighted median scores per server (column) over all validators (rows). -#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median_col( stake: &[I32F32], score: &[Vec], majority: I32F32, ) -> Vec { - let rows = stake.len(); - let columns = score[0].len(); - let zero: I32F32 = I32F32::saturating_from_num(0); - let mut median: Vec = vec![zero; columns]; - - #[allow(clippy::needless_range_loop)] - for c in 0..columns { - let mut use_stake: Vec = vec![]; - let mut use_score: Vec = vec![]; - for r in 0..rows { - assert_eq!(columns, score[r].len()); - if stake[r] > zero { - use_stake.push(stake[r]); - use_score.push(score[r][c]); + let zero = I32F32::saturating_from_num(0.0); + + // Determine number of columns from the first row. + let columns = score.first().map(|r| r.len()).unwrap_or(0); + let mut median = vec![zero; columns]; + + // Iterate columns into `median`. + let mut c = 0usize; + for med_cell in median.iter_mut() { + let mut use_stake: Vec = Vec::new(); + let mut use_score: Vec = Vec::new(); + + // Iterate rows aligned with `stake` length. + let mut r = 0usize; + while r < stake.len() { + let st = get_safe::(stake, r); + if st > zero { + // Fetch row safely; if it's missing or has wrong width, push zeros to both. + if let Some(row) = score.get(r) { + if row.len() == columns { + let val = row.get(c).copied().unwrap_or(zero); + use_stake.push(st); + use_score.push(val); + } else { + use_stake.push(zero); + use_score.push(zero); + log::error!( + "math error: weighted_median_col row.len() != columns: {:?} != {:?}", + row.len(), + columns + ); + } + } else { + // Missing row: insert zeroes. + use_stake.push(zero); + use_score.push(zero); + } } + r = r.saturating_add(1); } + if !use_stake.is_empty() { inplace_normalize(&mut use_stake); let stake_sum: I32F32 = use_stake.iter().sum(); let minority: I32F32 = stake_sum.saturating_sub(majority); - median[c] = weighted_median( + + let idxs: Vec = (0..use_stake.len()).collect(); + *med_cell = weighted_median( &use_stake, &use_score, - (0..use_stake.len()).collect::>().as_slice(), + idxs.as_slice(), minority, zero, stake_sum, ); } + + c = c.saturating_add(1); } median } /// Column-wise weighted median, e.g. stake-weighted median scores per server (column) over all validators (rows). -#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median_col_sparse( stake: &[I32F32], score: &[Vec<(u16, I32F32)>], columns: u16, majority: I32F32, ) -> Vec { - let rows = stake.len(); - let zero: I32F32 = I32F32::saturating_from_num(0); + let zero = I32F32::saturating_from_num(0.0); + + // Keep only positive-stake rows; normalize them. let mut use_stake: Vec = stake.iter().copied().filter(|&s| s > zero).collect(); inplace_normalize(&mut use_stake); + let stake_sum: I32F32 = use_stake.iter().sum(); - let stake_idx: Vec = (0..use_stake.len()).collect(); let minority: I32F32 = stake_sum.saturating_sub(majority); - let mut use_score: Vec> = vec![vec![zero; use_stake.len()]; columns as usize]; - let mut median: Vec = vec![zero; columns as usize]; + let stake_idx: Vec = (0..use_stake.len()).collect(); + + // use_score: columns x use_stake.len(), prefilled with zeros. + let mut use_score: Vec> = (0..columns as usize) + .map(|_| vec![zero; use_stake.len()]) + .collect(); + + // Fill use_score by walking stake and score together, counting positives with k. let mut k: usize = 0; - for r in 0..rows { - if stake[r] <= zero { - continue; - } - for (c, val) in score[r].iter() { - use_score[*c as usize][k] = *val; + let mut stake_it = stake.iter(); + let mut score_it = score.iter(); + + while let (Some(&s), Some(sparse_row)) = (stake_it.next(), score_it.next()) { + if s > zero { + for &(c, val) in sparse_row.iter() { + if let Some(col_vec) = use_score.get_mut(c as usize) { + if let Some(cell) = col_vec.get_mut(k) { + *cell = val; + } + } + } + k = k.saturating_add(1); } - k.saturating_inc(); } - for c in 0..columns as usize { - median[c] = weighted_median( + + // Compute weighted median per column. + let mut median: Vec = Vec::with_capacity(columns as usize); + for col_vec in use_score.iter() { + median.push(weighted_median( &use_stake, - &use_score[c], - &stake_idx, + col_vec, + stake_idx.as_slice(), minority, zero, stake_sum, - ); + )); } + median } @@ -1140,34 +1177,66 @@ pub fn weighted_median_col_sparse( // ratio has intended range [0, 1] // ratio=0: Result = A // ratio=1: Result = B -#[allow(dead_code)] pub fn interpolate(mat1: &[Vec], mat2: &[Vec], ratio: I32F32) -> Vec> { - if ratio == I32F32::saturating_from_num(0) { + if ratio == I32F32::saturating_from_num(0.0) { return mat1.to_owned(); } - if ratio == I32F32::saturating_from_num(1) { + if ratio == I32F32::saturating_from_num(1.0) { return mat2.to_owned(); } - assert!(mat1.len() == mat2.len()); - if mat1.is_empty() { - return vec![vec![]; 1]; + if mat1.is_empty() || mat1.first().map(|r| r.is_empty()).unwrap_or(true) { + return vec![vec![]]; } - if mat1.first().unwrap_or(&vec![]).is_empty() { - return vec![vec![]; 1]; + if mat1.len() != mat2.len() { + log::error!( + "math error: interpolate mat1.len() != mat2.len(): {:?} != {:?}", + mat1.len(), + mat2.len() + ); } - let mut result: Vec> = - vec![ - vec![I32F32::saturating_from_num(0); mat1.first().unwrap_or(&vec![]).len()]; - mat1.len() - ]; - for (i, (row1, row2)) in mat1.iter().zip(mat2.iter()).enumerate() { - assert!(row1.len() == row2.len()); - for (j, (&v1, &v2)) in row1.iter().zip(row2.iter()).enumerate() { - if let Some(res) = result.get_mut(i).unwrap_or(&mut vec![]).get_mut(j) { - *res = v1.saturating_add(ratio.saturating_mul(v2.saturating_sub(v1))); - } + + let zero = I32F32::saturating_from_num(0.0); + let cols = mat1.first().map(|r| r.len()).unwrap_or(0); + + // Pre-size result to mat1's shape (row count = mat1.len(), col count = first row of mat1). + let mut result: Vec> = { + let mut out = Vec::with_capacity(mat1.len()); + for _ in mat1.iter() { + out.push(vec![zero; cols]); + } + out + }; + + // Walk rows of mat1, mat2, and result in lockstep; stop when any iterator ends. + let mut m2_it = mat2.iter(); + let mut out_it = result.iter_mut(); + + for row1 in mat1.iter() { + let (Some(row2), Some(out_row)) = (m2_it.next(), out_it.next()) else { + log::error!("math error: interpolate: No more rows in mat2"); + break; + }; + if row1.len() != row2.len() { + log::error!( + "math error: interpolate row1.len() != row2.len(): {:?} != {:?}", + row1.len(), + row2.len() + ); } + + // Walk elements of row1, row2, and out_row in lockstep; stop at the shortest. + let mut r1_it = row1.iter(); + let mut r2_it = row2.iter(); + let mut out_cell_it = out_row.iter_mut(); + + while let (Some(v1), Some(v2), Some(out_cell)) = + (r1_it.next(), r2_it.next(), out_cell_it.next()) + { + *out_cell = (*v1).saturating_add(ratio.saturating_mul((*v2).saturating_sub(*v1))); + } + // Any remaining cells in `out_row` (beyond min row length) stay as zero (pre-filled). } + result } @@ -1175,7 +1244,6 @@ pub fn interpolate(mat1: &[Vec], mat2: &[Vec], ratio: I32F32) -> // ratio has intended range [0, 1] // ratio=0: Result = A // ratio=1: Result = B -#[allow(dead_code)] pub fn interpolate_sparse( mat1: &[Vec<(u16, I32F32)>], mat2: &[Vec<(u16, I32F32)>], @@ -1188,7 +1256,15 @@ pub fn interpolate_sparse( if ratio == I32F32::saturating_from_num(1) { return mat2.to_owned(); } - assert!(mat1.len() == mat2.len()); + if mat1.len() != mat2.len() { + // In case if sizes mismatch, return clipped weights + log::error!( + "math error: interpolate_sparse: mat1.len() != mat2.len(): {:?} != {:?}", + mat1.len(), + mat2.len() + ); + return mat2.to_owned(); + } let rows = mat1.len(); let zero: I32F32 = I32F32::saturating_from_num(0); let mut result: Vec> = vec![vec![]; rows]; @@ -1224,12 +1300,16 @@ pub fn interpolate_sparse( } // Element-wise product of two vectors. -#[allow(dead_code)] pub fn vec_mul(a: &[I32F32], b: &[I32F32]) -> Vec { - a.iter() - .zip(b.iter()) - .map(|(x, y)| x.checked_mul(*y).unwrap_or_default()) - .collect() + let mut out = Vec::with_capacity(core::cmp::min(a.len(), b.len())); + let mut ai = a.iter(); + let mut bi = b.iter(); + + while let (Some(x), Some(y)) = (ai.next(), bi.next()) { + out.push(x.checked_mul(*y).unwrap_or_default()); + } + + out } // Element-wise product of matrix and vector @@ -1240,11 +1320,15 @@ pub fn mat_vec_mul(matrix: &[Vec], vector: &[I32F32]) -> Vec if first_row.is_empty() { return vec![vec![]]; } - matrix.iter().map(|row| vec_mul(row, vector)).collect() + + let mut out = Vec::with_capacity(matrix.len()); + for row in matrix.iter() { + out.push(vec_mul(row, vector)); + } + out } // Element-wise product of matrix and vector -#[allow(dead_code)] pub fn mat_vec_mul_sparse( matrix: &[Vec<(u16, I32F32)>], vector: &[I32F32], @@ -1265,58 +1349,6 @@ pub fn mat_vec_mul_sparse( result } -// Element-wise product of two matrices. -#[allow(dead_code)] -pub fn hadamard(mat1: &[Vec], mat2: &[Vec]) -> Vec> { - assert!(mat1.len() == mat2.len()); - let Some(first_row) = mat1.first() else { - return vec![vec![]]; - }; - if first_row.is_empty() { - return vec![vec![]]; - } - mat1.iter() - .zip(mat2) - .map(|(row1, row2)| { - assert!(row1.len() == row2.len()); - row1.iter() - .zip(row2) - .map(|(elem1, elem2)| elem1.saturating_mul(*elem2)) - .collect() - }) - .collect() -} - -// Element-wise product of two sparse matrices. -#[allow(dead_code, clippy::indexing_slicing)] -pub fn hadamard_sparse( - mat1: &[Vec<(u16, I32F32)>], - mat2: &[Vec<(u16, I32F32)>], - columns: u16, -) -> Vec> { - assert!(mat1.len() == mat2.len()); - let rows = mat1.len(); - let zero: I32F32 = I32F32::saturating_from_num(0); - let mut result: Vec> = vec![vec![]; rows]; - for i in 0..rows { - let mut row1: Vec = vec![zero; columns as usize]; - for (j, value) in mat1[i].iter() { - row1[*j as usize] = row1[*j as usize].saturating_add(*value); - } - let mut row2: Vec = vec![zero; columns as usize]; - for (j, value) in mat2[i].iter() { - row2[*j as usize] = row2[*j as usize].saturating_add(*value); - } - for j in 0..columns as usize { - let prod: I32F32 = row1[j].saturating_mul(row2[j]); - if zero < prod { - result[i].push((j as u16, prod)) - } - } - } - result -} - /// Clamp the input value between high and low. /// Note: assumes high > low pub fn clamp_value(value: I32F32, low: I32F32, high: I32F32) -> I32F32 { @@ -1334,7 +1366,6 @@ pub fn clamp_value(value: I32F32, low: I32F32, high: I32F32) -> I32F32 { // Return matrix exponential moving average: `alpha * a_ij + one_minus_alpha * b_ij`. // `alpha` is the EMA coefficient, how much to add of the new observation, typically small, // higher alpha discounts older observations faster. -#[allow(dead_code)] pub fn mat_ema(new: &[Vec], old: &[Vec], alpha: I32F32) -> Vec> { let Some(first_row) = new.first() else { return vec![vec![]]; @@ -1342,214 +1373,219 @@ pub fn mat_ema(new: &[Vec], old: &[Vec], alpha: I32F32) -> Vec], old: &[Vec<(u16, I32F32)>], alpha: I32F32, ) -> Vec> { - assert!(new.len() == old.len()); - let n = new.len(); // assume square matrix, rows=cols - let zero: I32F32 = I32F32::saturating_from_num(0.0); - let one_minus_alpha: I32F32 = I32F32::saturating_from_num(1.0).saturating_sub(alpha); - let mut result: Vec> = vec![vec![]; n]; - for i in 0..new.len() { - let mut row: Vec = vec![zero; n]; - for (j, value) in new[i].iter() { - row[*j as usize] = row[*j as usize].saturating_add(alpha.saturating_mul(*value)); + if new.len() != old.len() { + log::error!( + "math error: mat_ema_sparse: new.len() == old.len(): {:?} != {:?}", + new.len(), + old.len() + ); + } + + let zero = I32F32::saturating_from_num(0.0); + let one_minus_alpha = I32F32::saturating_from_num(1.0).saturating_sub(alpha); + + let n = new.len(); // assume square (rows = cols) + if n == 0 { + return Vec::new(); + } + + let mut result: Vec> = Vec::with_capacity(n); + let mut old_it = old.iter(); + + for new_row in new.iter() { + let mut acc_row = vec![zero; n]; + + // Add alpha * new + for &(j, v) in new_row.iter() { + if let Some(cell) = acc_row.get_mut(j as usize) { + *cell = cell.saturating_add(alpha.saturating_mul(v)); + } } - for (j, value) in old[i].iter() { - row[*j as usize] = - row[*j as usize].saturating_add(one_minus_alpha.saturating_mul(*value)); + + // Add (1 - alpha) * old + if let Some(orow) = old_it.next() { + for &(j, v) in orow.iter() { + if let Some(cell) = acc_row.get_mut(j as usize) { + *cell = cell.saturating_add(one_minus_alpha.saturating_mul(v)); + } + } } - for (j, value) in row.iter().enumerate() { - if *value > zero { - result[i].push((j as u16, *value)) + + // Densified row -> sparse (keep positives) + let mut out_row: Vec<(u16, I32F32)> = Vec::new(); + for (j, &val) in acc_row.iter().enumerate() { + if val > zero { + out_row.push((j as u16, val)); } } + + result.push(out_row); } + result } /// Calculates the exponential moving average (EMA) for a sparse matrix using dynamic alpha values. -#[allow(dead_code)] pub fn mat_ema_alpha_sparse( new: &[Vec<(u16, I32F32)>], old: &[Vec<(u16, I32F32)>], alpha: &[Vec], ) -> Vec> { - // Ensure dimensions match. - assert!(new.len() == old.len()); - assert!(new.len() == alpha.len()); + // If shapes don't match, just return `new` + if new.len() != old.len() || new.len() != alpha.len() { + log::error!( + "math error: mat_ema_alpha_sparse shapes don't match: {:?} vs. {:?} vs. {:?}", + old.len(), + new.len(), + alpha.len() + ); + return new.to_owned(); + } - // The output vector of rows. - let mut result: Vec> = Vec::with_capacity(new.len()); - let zero: I32F32 = I32F32::saturating_from_num(0.0); + let zero = I32F32::saturating_from_num(0.0); let one = I32F32::saturating_from_num(1.0); - // Iterate over each row of the matrices. - for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { - // Initialize a row of zeros for the result matrix. - let mut decayed_values: Vec = vec![zero; alpha_row.len()]; + let mut result: Vec> = Vec::with_capacity(new.len()); + let mut old_it = old.iter(); + let mut alf_it = alpha.iter(); + + for new_row in new.iter() { + let Some(old_row) = old_it.next() else { break }; + let Some(alpha_row) = alf_it.next() else { + break; + }; - let mut result_row: Vec<(u16, I32F32)> = Vec::new(); + // Densified accumulator sized to alpha_row length (columns outside are ignored). + let mut decayed_values = vec![zero; alpha_row.len()]; - // Process the old matrix values. - for (j, old_val) in old_row.iter() { - if let (Some(alpha_val), Some(decayed_val)) = ( - alpha_row.get(*j as usize), - decayed_values.get_mut(*j as usize), + // Apply (1 - alpha_j) * old_ij into accumulator. + for &(j, old_val) in old_row.iter() { + if let (Some(&a), Some(cell)) = ( + alpha_row.get(j as usize), + decayed_values.get_mut(j as usize), ) { - // Calculate the complement of the alpha value - let one_minus_alpha = one.saturating_sub(*alpha_val); - // Bonds_decayed = Bonds * (1 - alpha) - *decayed_val = one_minus_alpha.saturating_mul(*old_val); + *cell = one.saturating_sub(a).saturating_mul(old_val); } } - // Process the new matrix values. - for (j, new_val) in new_row.iter() { - if let (Some(alpha_val), Some(decayed_val)) = - (alpha_row.get(*j as usize), decayed_values.get(*j as usize)) + // Add alpha_j * new_ij, clamp to [0, 1], and emit sparse entries > 0. + let mut out_row: Vec<(u16, I32F32)> = Vec::new(); + for &(j, new_val) in new_row.iter() { + if let (Some(&a), Some(&decayed)) = + (alpha_row.get(j as usize), decayed_values.get(j as usize)) { - // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap - // Validators allocate their purchase across miners based on weights - let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); - let result_val = decayed_val.saturating_add(purchase_increment).min(one); - - if result_val > zero { - result_row.push((*j, result_val)); + let inc = a.saturating_mul(new_val).max(zero); + let val = decayed.saturating_add(inc).min(one); + if val > zero { + out_row.push((j, val)); } } } - result.push(result_row); + + result.push(out_row); } - // Return the computed EMA sparse matrix. result } /// Calculates the exponential moving average (EMA) for a dense matrix using dynamic alpha values. -#[allow(dead_code)] pub fn mat_ema_alpha( new: &[Vec], // Weights old: &[Vec], // Bonds alpha: &[Vec], ) -> Vec> { - // Check if the new matrix is empty or its first row is empty. - if new.is_empty() || new.first().is_none_or(|row| row.is_empty()) { - return vec![vec![]; 1]; + // Empty or degenerate input + if new.is_empty() || new.first().map(|r| r.is_empty()).unwrap_or(true) { + return vec![vec![]]; } - // Ensure the dimensions of the new, old and alpha matrices match. - assert!(new.len() == old.len()); - assert!(new.len() == alpha.len()); - - // Initialize the result matrix with zeros, having the same dimensions as the new matrix. - let zero: I32F32 = I32F32::saturating_from_num(0.0); - let one = I32F32::saturating_from_num(1.0); - - let mut result: Vec> = Vec::with_capacity(new.len()); - - // Iterate over each row of the matrices. - for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { - assert!(new_row.len() == old_row.len()); - assert!(new_row.len() == alpha_row.len()); - let mut result_row: Vec = Vec::new(); - - // Iterate over each column of the current row. - for j in 0..new_row.len() { - // Compute the EMA for the current element using saturating operations. - if let (Some(new_val), Some(old_val), Some(alpha_val)) = - (new_row.get(j), old_row.get(j), alpha_row.get(j)) - { - // Calculate the complement of the alpha value - let one_minus_alpha = one.saturating_sub(*alpha_val); - - // Bonds_decayed = Bonds * (1 - alpha) - let decayed_val = one_minus_alpha.saturating_mul(*old_val); + // If outer dimensions don't match, return bonds unchanged + if new.len() != old.len() || new.len() != alpha.len() { + log::error!( + "math error: mat_ema_alpha shapes don't match: {:?} vs. {:?} vs. {:?}", + old.len(), + new.len(), + alpha.len() + ); + return old.to_owned(); + } - // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap - // Validators allocate their purchase across miners based on weights - let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); - let result_val = decayed_val.saturating_add(purchase_increment).min(one); - result_row.push(result_val); - } + // Ensure each corresponding row has matching length; otherwise return `new` unchanged. + let mut old_it = old.iter(); + let mut alp_it = alpha.iter(); + for nrow in new.iter() { + let (Some(orow), Some(arow)) = (old_it.next(), alp_it.next()) else { + return new.to_owned(); + }; + if nrow.len() != orow.len() || nrow.len() != arow.len() { + return new.to_owned(); } - result.push(result_row); } - // Return the computed EMA matrix. - result -} -/// Return the quantile of a vector of I32F32 values. -pub fn quantile(data: &[I32F32], quantile: f64) -> I32F32 { - // Clone the input data to avoid modifying the original vector. - let mut sorted_data = data.to_owned(); - - // Sort the cloned data in ascending order, handling potential NaN values. - sorted_data.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal)); + let zero = I32F32::saturating_from_num(0.0); + let one = I32F32::saturating_from_num(1.0); - // Get the length of the sorted data. - let len = sorted_data.len(); + // Compute EMA: result = (1 - α) * old + α * new, clamped to [0, 1]. + let mut out: Vec> = Vec::with_capacity(new.len()); + let mut old_it = old.iter(); + let mut alp_it = alpha.iter(); - // If the data is empty, return 0 as the quantile value. - if len == 0 { - return I32F32::saturating_from_num(0); - } - - // Calculate the position in the sorted array corresponding to the quantile. - let pos = quantile * (len.saturating_sub(1)) as f64; + for nrow in new.iter() { + let (Some(orow), Some(arow)) = (old_it.next(), alp_it.next()) else { + break; + }; - // Determine the lower index by flooring the position. - let low = pos.floor() as usize; + let mut r: Vec = Vec::with_capacity(nrow.len()); + let mut n_it = nrow.iter(); + let mut o_it = orow.iter(); + let mut a_it = arow.iter(); - // Determine the higher index by ceiling the position. - let high = pos.ceil() as usize; + while let (Some(&n), Some(&o), Some(&a)) = (n_it.next(), o_it.next(), a_it.next()) { + let one_minus_a = one.saturating_sub(a); + let decayed = one_minus_a.saturating_mul(o); + let inc = a.saturating_mul(n).max(zero); + r.push(decayed.saturating_add(inc).min(one)); + } - // If the low and high indices are the same, return the value at that index. - if low == high { - sorted_data - .get(low) - .copied() - .unwrap_or_else(|| I32F32::saturating_from_num(0)) - } else { - // Otherwise, perform linear interpolation between the low and high values. - let low_value = sorted_data - .get(low) - .copied() - .unwrap_or_else(|| I32F32::saturating_from_num(0)); - let high_value = sorted_data - .get(high) - .copied() - .unwrap_or_else(|| I32F32::saturating_from_num(0)); - - // Calculate the weight for interpolation. - let weight = I32F32::saturating_from_num(pos - low as f64); - - // Return the interpolated value using saturating operations. - low_value.saturating_add((high_value.saturating_sub(low_value)).saturating_mul(weight)) + out.push(r); } + + out } /// Safe ln function, returns 0 if value is 0. diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 7906064780..cf7d4dc050 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1,19 +1,163 @@ use super::*; use crate::epoch::math::*; +use alloc::collections::BTreeMap; use frame_support::IterableStorageDoubleMap; use safe_math::*; +use sp_std::collections::btree_map::IntoIter; use sp_std::vec; use substrate_fixed::types::{I32F32, I64F64, I96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, MechId, NetUid, NetUidStorageIndex}; + +#[derive(Debug, Default)] +pub struct EpochTerms { + pub uid: usize, + pub dividend: u16, + pub incentive: u16, + pub validator_emission: AlphaCurrency, + pub server_emission: AlphaCurrency, + pub stake_weight: u16, + pub active: bool, + pub emission: AlphaCurrency, + pub rank: u16, + pub trust: u16, + pub consensus: u16, + pub pruning_score: u16, + pub validator_trust: u16, + pub new_validator_permit: bool, + pub bond: Vec<(u16, u16)>, +} + +pub struct EpochOutput(pub BTreeMap); + +impl EpochOutput { + pub fn as_map(&self) -> &BTreeMap { + &self.0 + } +} + +impl IntoIterator for EpochOutput +where + T: frame_system::Config, + T::AccountId: Ord, +{ + type Item = (T::AccountId, EpochTerms); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +#[macro_export] +macro_rules! extract_from_sorted_terms { + ($sorted:expr, $field:ident) => {{ + ($sorted) + .iter() + .copied() + .map(|t| t.$field) + .collect::>() + }}; +} impl Pallet { + /// Legacy epoch function interface (TODO: Is only used for tests, remove) + pub fn epoch( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // Run mechanism-style epoch + let output = Self::epoch_mechanism(netuid, MechId::MAIN, rao_emission); + + // Persist values in legacy format + Self::persist_mechanism_epoch_terms(netuid, MechId::MAIN, output.as_map()); + Self::persist_netuid_epoch_terms(netuid, output.as_map()); + + // Remap and return + output + .into_iter() + .map(|(hotkey, terms)| (hotkey, terms.server_emission, terms.validator_emission)) + .collect() + } + + /// Legacy epoch_dense function interface (TODO: Is only used for tests, remove) + pub fn epoch_dense( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + Self::epoch_dense_mechanism(netuid, MechId::MAIN, rao_emission) + } + + /// Persists per-mechanism epoch output in state + pub fn persist_mechanism_epoch_terms( + netuid: NetUid, + mecid: MechId, + output: &BTreeMap, + ) { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); + let mut terms_sorted: sp_std::vec::Vec<&EpochTerms> = output.values().collect(); + terms_sorted.sort_unstable_by_key(|t| t.uid); + + let incentive = extract_from_sorted_terms!(terms_sorted, incentive); + let bonds: Vec> = terms_sorted + .iter() + .cloned() + .map(|t| t.bond.clone()) + .collect::>(); + + Incentive::::insert(netuid_index, incentive); + + let server_emission = extract_from_sorted_terms!(terms_sorted, server_emission); + Self::deposit_event(Event::IncentiveAlphaEmittedToMiners { + netuid: netuid_index, + emissions: server_emission, + }); + + bonds + .into_iter() + .enumerate() + .for_each(|(uid_usize, bond_vec)| { + let uid: u16 = uid_usize.try_into().unwrap_or_default(); + Bonds::::insert(netuid_index, uid, bond_vec); + }); + } + + /// Persists per-netuid epoch output in state + pub fn persist_netuid_epoch_terms(netuid: NetUid, output: &BTreeMap) { + let mut terms_sorted: sp_std::vec::Vec<&EpochTerms> = output.values().collect(); + terms_sorted.sort_unstable_by_key(|t| t.uid); + + let active = extract_from_sorted_terms!(terms_sorted, active); + let emission = extract_from_sorted_terms!(terms_sorted, emission); + let rank = extract_from_sorted_terms!(terms_sorted, rank); + let trust = extract_from_sorted_terms!(terms_sorted, trust); + let consensus = extract_from_sorted_terms!(terms_sorted, consensus); + let dividend = extract_from_sorted_terms!(terms_sorted, dividend); + let pruning_score = extract_from_sorted_terms!(terms_sorted, pruning_score); + let validator_trust = extract_from_sorted_terms!(terms_sorted, validator_trust); + let new_validator_permit = extract_from_sorted_terms!(terms_sorted, new_validator_permit); + + Active::::insert(netuid, active.clone()); + Emission::::insert(netuid, emission); + Rank::::insert(netuid, rank); + Trust::::insert(netuid, trust); + Consensus::::insert(netuid, consensus); + Dividends::::insert(netuid, dividend); + PruningScores::::insert(netuid, pruning_score); + ValidatorTrust::::insert(netuid, validator_trust); + ValidatorPermit::::insert(netuid, new_validator_permit); + } + /// Calculates reward consensus and returns the emissions for uids/hotkeys in a given `netuid`. /// (Dense version used only for testing purposes.) #[allow(clippy::indexing_slicing)] - pub fn epoch_dense( + pub fn epoch_dense_mechanism( netuid: NetUid, + mecid: MechId, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // Calculate netuid storage index + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); + // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); log::trace!("n: {n:?}"); @@ -35,7 +179,7 @@ impl Pallet { log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. - let last_update: Vec = Self::get_last_update(netuid); + let last_update: Vec = Self::get_last_update(netuid_index); log::trace!("Last update: {:?}", &last_update); // Inactive mask. @@ -150,7 +294,7 @@ impl Pallet { let owner_uid: Option = Self::get_owner_uid(netuid); // Access network weights row unnormalized. - let mut weights: Vec> = Self::get_weights(netuid); + let mut weights: Vec> = Self::get_weights(netuid_index); log::trace!("W: {:?}", &weights); // Mask weights that are not from permitted validators. @@ -222,7 +366,7 @@ impl Pallet { let mut ema_bonds: Vec>; if Yuma3On::::get(netuid) { // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid); + let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid_index); inplace_mask_cols(&recently_registered, &mut bonds); // mask outdated bonds log::trace!("B: {:?}", &bonds); @@ -249,7 +393,7 @@ impl Pallet { } else { // original Yuma - liquid alpha disabled // Access network bonds. - let mut bonds: Vec> = Self::get_bonds(netuid); + let mut bonds: Vec> = Self::get_bonds(netuid_index); // Remove bonds referring to neurons that have registered since last tempo. inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 @@ -391,7 +535,7 @@ impl Pallet { Rank::::insert(netuid, cloned_ranks); Trust::::insert(netuid, cloned_trust); Consensus::::insert(netuid, cloned_consensus); - Incentive::::insert(netuid, cloned_incentive); + Incentive::::insert(NetUidStorageIndex::from(netuid), cloned_incentive); Dividends::::insert(netuid, cloned_dividends); PruningScores::::insert(netuid, cloned_pruning_scores); ValidatorTrust::::insert(netuid, cloned_validator_trust); @@ -408,11 +552,11 @@ impl Pallet { let new_bonds_row: Vec<(u16, u16)> = (0..n) .zip(vec_fixed_proportions_to_u16(ema_bond.clone())) .collect(); - Bonds::::insert(netuid, i as u16, new_bonds_row); + Bonds::::insert(netuid_index, i as u16, new_bonds_row); } else if validator_permit { // Only overwrite the intersection. let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i as u16, new_empty_bonds_row); + Bonds::::insert(netuid_index, i as u16, new_empty_bonds_row); } }); @@ -441,11 +585,27 @@ impl Pallet { /// * 'debug' ( bool ): /// - Print debugging outputs. /// - #[allow(clippy::indexing_slicing)] - pub fn epoch( + pub fn epoch_mechanism( netuid: NetUid, + mecid: MechId, rao_emission: AlphaCurrency, - ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + ) -> EpochOutput { + // Calculate netuid storage index + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); + + // Initialize output keys (neuron hotkeys) and UIDs + let mut terms_map: BTreeMap = Keys::::iter_prefix(netuid) + .map(|(uid, hotkey)| { + ( + hotkey, + EpochTerms { + uid: uid as usize, + ..Default::default() + }, + ) + }) + .collect(); + // Get subnetwork size. let n = Self::get_subnetwork_n(netuid); log::trace!("Number of Neurons in Network: {n:?}"); @@ -467,7 +627,7 @@ impl Pallet { log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. - let last_update: Vec = Self::get_last_update(netuid); + let last_update: Vec = Self::get_last_update(netuid_index); log::trace!("Last update: {:?}", &last_update); // Inactive mask. @@ -488,11 +648,6 @@ impl Pallet { // == Stake == // =========== - let hotkeys: Vec<(u16, T::AccountId)> = - as IterableStorageDoubleMap>::iter_prefix(netuid) - .collect(); - log::debug!("hotkeys: {:?}", &hotkeys); - // Access network stake as normalized vector. let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = Self::get_stake_weights_for_network(netuid); @@ -559,7 +714,7 @@ impl Pallet { let owner_uid: Option = Self::get_owner_uid(netuid); // Access network weights row unnormalized. - let mut weights: Vec> = Self::get_weights_sparse(netuid); + let mut weights: Vec> = Self::get_weights_sparse(netuid_index); log::trace!("Weights: {:?}", &weights); // Mask weights that are not from permitted validators. @@ -587,19 +742,14 @@ impl Pallet { let mut commit_blocks: Vec = vec![u64::MAX; n as usize]; // MAX ⇒ “no active commit” // helper: hotkey → uid - let uid_of = |acct: &T::AccountId| -> Option { - hotkeys - .iter() - .find(|(_, a)| a == acct) - .map(|(uid, _)| *uid as usize) - }; + let uid_of = |acct: &T::AccountId| terms_map.get(acct).map(|t| t.uid); // ---------- v2 ------------------------------------------------------ - for (who, q) in WeightCommits::::iter_prefix(netuid) { + for (who, q) in WeightCommits::::iter_prefix(netuid_index) { for (_, cb, _, _) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { - if let Some(i) = uid_of(&who) { - commit_blocks[i] = commit_blocks[i].min(*cb); + if let Some(cell) = uid_of(&who).and_then(|i| commit_blocks.get_mut(i)) { + *cell = (*cell).min(*cb); } break; // earliest active found } @@ -607,11 +757,11 @@ impl Pallet { } // ---------- v3 ------------------------------------------------------ - for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { + for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid_index) { for (who, cb, ..) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { - if let Some(i) = uid_of(who) { - commit_blocks[i] = commit_blocks[i].min(*cb); + if let Some(cell) = uid_of(who).and_then(|i| commit_blocks.get_mut(i)) { + *cell = (*cell).min(*cb); } } } @@ -688,7 +838,7 @@ impl Pallet { let mut ema_bonds: Vec>; if Yuma3On::::get(netuid) { // Access network bonds. - let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); + let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid_index); log::trace!("Bonds: {:?}", &bonds); // Remove bonds referring to neurons that have registered since last tempo. @@ -705,7 +855,8 @@ impl Pallet { // Compute the Exponential Moving Average (EMA) of bonds. log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); - ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); + ema_bonds = + Self::compute_bonds_sparse(netuid_index, &weights_for_bonds, &bonds, &consensus); log::trace!("emaB: {:?}", &ema_bonds); // Normalize EMA bonds. @@ -727,7 +878,7 @@ impl Pallet { } else { // original Yuma - liquid alpha disabled // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_sparse(netuid); + let mut bonds: Vec> = Self::get_bonds_sparse(netuid_index); log::trace!("B: {:?}", &bonds); // Remove bonds referring to neurons that have registered since last tempo. @@ -756,7 +907,7 @@ impl Pallet { log::trace!("ΔB (norm): {:?}", &bonds_delta); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); + ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid_index); // Normalize EMA bonds. inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); @@ -855,9 +1006,9 @@ impl Pallet { let pruning_scores: Vec = normalized_combined_emission.clone(); log::trace!("Pruning Scores: {:?}", &pruning_scores); - // =================== - // == Value storage == - // =================== + // =========================== + // == Populate epoch output == + // =========================== let cloned_stake_weight: Vec = stake .iter() .map(|xi| fixed_proportion_to_u16(*xi)) @@ -888,54 +1039,55 @@ impl Pallet { .iter() .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); - StakeWeight::::insert(netuid, cloned_stake_weight.clone()); - Active::::insert(netuid, active.clone()); - Emission::::insert(netuid, cloned_emission); - Rank::::insert(netuid, cloned_ranks); - Trust::::insert(netuid, cloned_trust); - Consensus::::insert(netuid, cloned_consensus); - Incentive::::insert(netuid, cloned_incentive); - Dividends::::insert(netuid, cloned_dividends); - PruningScores::::insert(netuid, cloned_pruning_scores); - ValidatorTrust::::insert(netuid, cloned_validator_trust); - ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - new_validator_permits - .iter() - .zip(validator_permits) - .zip(ema_bonds) - .enumerate() - .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { - // Set bonds only if uid retains validator permit, otherwise clear bonds. - if *new_permit { - let new_bonds_row: Vec<(u16, u16)> = ema_bond - .iter() - .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) - .collect(); - Bonds::::insert(netuid, i as u16, new_bonds_row); - } else if validator_permit { - // Only overwrite the intersection. - let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i as u16, new_empty_bonds_row); - } - }); - - Self::deposit_event(Event::IncentiveAlphaEmittedToMiners { - netuid, - emissions: server_emission.clone(), - }); + for (_hotkey, terms) in terms_map.iter_mut() { + terms.dividend = cloned_dividends.get(terms.uid).copied().unwrap_or_default(); + terms.incentive = cloned_incentive.get(terms.uid).copied().unwrap_or_default(); + terms.validator_emission = validator_emission + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.server_emission = server_emission.get(terms.uid).copied().unwrap_or_default(); + terms.stake_weight = cloned_stake_weight + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.active = active.get(terms.uid).copied().unwrap_or_default(); + terms.emission = cloned_emission.get(terms.uid).copied().unwrap_or_default(); + terms.rank = cloned_ranks.get(terms.uid).copied().unwrap_or_default(); + terms.trust = cloned_trust.get(terms.uid).copied().unwrap_or_default(); + terms.consensus = cloned_consensus.get(terms.uid).copied().unwrap_or_default(); + terms.pruning_score = cloned_pruning_scores + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.validator_trust = cloned_validator_trust + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.new_validator_permit = new_validator_permits + .get(terms.uid) + .copied() + .unwrap_or_default(); + let old_validator_permit = validator_permits + .get(terms.uid) + .copied() + .unwrap_or_default(); + + // Bonds + if terms.new_validator_permit { + let ema_bond = ema_bonds.get(terms.uid).cloned().unwrap_or_default(); + terms.bond = ema_bond + .iter() + .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) + .collect(); + } else if old_validator_permit { + // Only overwrite the intersection. + terms.bond = vec![]; + } + } - // Emission tuples ( hotkeys, server_emission, validator_emission ) - hotkeys - .into_iter() - .map(|(uid_i, hotkey)| { - ( - hotkey, - server_emission[uid_i as usize], - validator_emission[uid_i as usize], - ) - }) - .collect() + EpochOutput(terms_map) } pub fn get_float_rho(netuid: NetUid) -> I32F32 { @@ -965,59 +1117,54 @@ impl Pallet { } /// Output unnormalized sparse weights, input weights are assumed to be row max-upscaled in u16. - pub fn get_weights_sparse(netuid: NetUid) -> Vec> { + pub fn get_weights_sparse(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![]; n]; for (uid_i, weights_i) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Weights::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) { - weights - .get_mut(uid_i as usize) - .expect("uid_i is filtered to be less than n; qed") - .push((*uid_j, I32F32::saturating_from_num(*weight_ij))); + if let Some(row) = weights.get_mut(uid_i as usize) { + row.push((*uid_j, I32F32::saturating_from_num(*weight_ij))); + } else { + log::error!("math error: uid_i {uid_i:?} is filtered to be less than n"); + } } } weights } /// Output unnormalized weights in [n, n] matrix, input weights are assumed to be row max-upscaled in u16. - pub fn get_weights(netuid: NetUid) -> Vec> { + pub fn get_weights(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; for (uid_i, weights_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Weights::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_vec .into_iter() .filter(|(uid_j, _)| *uid_j < n as u16) { - *weights + if let Some(cell) = weights .get_mut(uid_i as usize) - .expect("uid_i is filtered to be less than n; qed") - .get_mut(uid_j as usize) - .expect("uid_j is filtered to be less than n; qed") = - I32F32::saturating_from_num(weight_ij); + .and_then(|row| row.get_mut(uid_j as usize)) + { + *cell = I32F32::saturating_from_num(weight_ij); + } } } weights } /// Output unnormalized sparse bonds, input bonds are assumed to be column max-upscaled in u16. - pub fn get_bonds_sparse(netuid: NetUid) -> Vec> { + pub fn get_bonds_sparse(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![]; n]; for (uid_i, bonds_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec { bonds @@ -1030,14 +1177,12 @@ impl Pallet { } /// Output unnormalized bonds in [n, n] matrix, input bonds are assumed to be column max-upscaled in u16. - pub fn get_bonds(netuid: NetUid) -> Vec> { + pub fn get_bonds(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; for (uid_i, bonds_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec.into_iter().filter(|(uid_j, _)| *uid_j < n as u16) { *bonds @@ -1051,7 +1196,7 @@ impl Pallet { bonds } - pub fn get_bonds_fixed_proportion(netuid: NetUid) -> Vec> { + pub fn get_bonds_fixed_proportion(netuid: NetUidStorageIndex) -> Vec> { let mut bonds = Self::get_bonds(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -1061,7 +1206,9 @@ impl Pallet { bonds } - pub fn get_bonds_sparse_fixed_proportion(netuid: NetUid) -> Vec> { + pub fn get_bonds_sparse_fixed_proportion( + netuid: NetUidStorageIndex, + ) -> Vec> { let mut bonds = Self::get_bonds_sparse(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -1083,8 +1230,10 @@ impl Pallet { pub fn compute_ema_bonds_normal_sparse( bonds_delta: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], - netuid: NetUid, + netuid_index: NetUidStorageIndex, ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = I64F64::saturating_from_num(Self::get_bonds_moving_average(netuid)) @@ -1191,11 +1340,13 @@ impl Pallet { /// # Returns: /// A vector of EMA bonds. pub fn compute_bonds_sparse( - netuid: NetUid, + netuid_index: NetUidStorageIndex, weights: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], consensus: &[I32F32], ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1236,14 +1387,21 @@ impl Pallet { bonds: &[Vec], // previous epoch bonds consensus: &[I32F32], // previous epoch consensus weights ) -> Vec> { - assert!(weights.len() == bonds.len()); + let mut alphas = Vec::new(); + + if weights.len() != bonds.len() { + log::error!( + "math error: compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", + weights.len(), + bonds.len() + ); + return alphas; + } // Get the high and low alpha values for the network. let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - let mut alphas = Vec::new(); - for (w_row, b_row) in weights.iter().zip(bonds.iter()) { let mut row_alphas = Vec::new(); @@ -1282,12 +1440,20 @@ impl Pallet { bonds: &[Vec<(u16, I32F32)>], // previous epoch bonds consensus: &[I32F32], // previous epoch consensus weights ) -> Vec> { - assert!(weights.len() == bonds.len()); + let mut alphas = Vec::with_capacity(consensus.len()); + + if weights.len() != bonds.len() { + log::error!( + "math error: compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", + weights.len(), + bonds.len() + ); + return alphas; + } let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - let mut alphas = Vec::with_capacity(consensus.len()); let zero = I32F32::from_num(0.0); // iterate over rows @@ -1418,7 +1584,12 @@ impl Pallet { Ok(()) } - pub fn do_reset_bonds(netuid: NetUid, account_id: &T::AccountId) -> Result<(), DispatchError> { + pub fn do_reset_bonds( + netuid_index: NetUidStorageIndex, + account_id: &T::AccountId, + ) -> Result<(), DispatchError> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // check bonds reset enabled for this subnet let bonds_reset_enabled: bool = Self::get_bonds_reset(netuid); if !bonds_reset_enabled { @@ -1426,9 +1597,9 @@ impl Pallet { } if let Ok(uid) = Self::get_uid_for_net_and_hotkey(netuid, account_id) { - for (i, bonds_vec) in Bonds::::iter_prefix(netuid) { + for (i, bonds_vec) in Bonds::::iter_prefix(netuid_index) { Bonds::::insert( - netuid, + netuid_index, i, bonds_vec .clone() diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index db4c385812..7de32221a0 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -42,7 +42,7 @@ pub mod staking; pub mod subnets; pub mod swap; pub mod utils; -use crate::utils::rate_limiting::TransactionType; +use crate::utils::rate_limiting::{Hyperparameter, TransactionType}; use macros::{config, dispatches, errors, events, genesis, hooks}; #[cfg(test)] @@ -86,7 +86,9 @@ pub mod pallet { use sp_std::vec::Vec; use substrate_fixed::types::{I96F32, U64F64}; use subtensor_macros::freeze_struct; - use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; + use subtensor_runtime_common::{ + AlphaCurrency, Currency, MechId, NetUid, NetUidStorageIndex, TaoCurrency, + }; #[cfg(not(feature = "std"))] use alloc::boxed::Box; @@ -303,6 +305,15 @@ pub mod pallet { pub additional: Vec, } + /// Enum for recycle or burn for the owner_uid(s) + #[derive(TypeInfo, Encode, Decode, DecodeWithMemTracking, Clone, PartialEq, Eq, Debug)] + pub enum RecycleOrBurnEnum { + /// Burn the miner emission sent to the burn UID + Burn, + /// Recycle the miner emission sent to the recycle UID + Recycle, + } + /// ============================ /// ==== Staking + Accounts ==== /// ============================ @@ -521,11 +532,6 @@ pub mod pallet { T::InitialNetworkImmunityPeriod::get() } #[pallet::type_value] - /// Default value for network min allowed UIDs. - pub fn DefaultNetworkMinAllowedUids() -> u16 { - T::InitialNetworkMinAllowedUids::get() - } - #[pallet::type_value] /// Default value for network min lock cost. pub fn DefaultNetworkMinLockCost() -> TaoCurrency { T::InitialNetworkMinLockCost::get().into() @@ -541,6 +547,11 @@ pub mod pallet { T::InitialSubnetOwnerCut::get() } #[pallet::type_value] + /// Default value for recycle or burn. + pub fn DefaultRecycleOrBurn() -> RecycleOrBurnEnum { + RecycleOrBurnEnum::Burn // default to burn + } + #[pallet::type_value] /// Default value for network rate limit. pub fn DefaultNetworkRateLimit() -> u64 { if cfg!(feature = "pow-faucet") { @@ -549,6 +560,11 @@ pub mod pallet { T::InitialNetworkRateLimit::get() } #[pallet::type_value] + /// Default value for network rate limit. + pub fn DefaultNetworkRegistrationStartBlock() -> u64 { + 0 + } + #[pallet::type_value] /// Default value for weights version key rate limit. /// In units of tempos. pub fn DefaultWeightsVersionKeyRateLimit() -> u64 { @@ -611,11 +627,21 @@ pub mod pallet { T::InitialKappa::get() } #[pallet::type_value] + /// Default value for network min allowed UIDs. + pub fn DefaultMinAllowedUids() -> u16 { + T::InitialMinAllowedUids::get() + } + #[pallet::type_value] /// Default maximum allowed UIDs. pub fn DefaultMaxAllowedUids() -> u16 { T::InitialMaxAllowedUids::get() } #[pallet::type_value] + /// -- Rate limit for set max allowed UIDs + pub fn MaxUidsTrimmingRateLimit() -> u64 { + prod_or_fast!(30 * 7200, 1) + } + #[pallet::type_value] /// Default immunity period. pub fn DefaultImmunityPeriod() -> u16 { T::InitialImmunityPeriod::get() @@ -858,16 +884,44 @@ pub mod pallet { 50400 } + #[pallet::type_value] + /// Default number of terminal blocks in a tempo during which admin operations are prohibited + pub fn DefaultAdminFreezeWindow() -> u16 { + 10 + } + + #[pallet::type_value] + /// Default number of tempos for owner hyperparameter update rate limit + pub fn DefaultOwnerHyperparamRateLimit() -> u16 { + 2 + } + #[pallet::type_value] /// Default value for ck burn, 18%. pub fn DefaultCKBurn() -> u64 { 0 } + #[pallet::type_value] + /// Default value for subnet limit. + pub fn DefaultSubnetLimit() -> u16 { + 128 + } + #[pallet::storage] pub type MinActivityCutoff = StorageValue<_, u16, ValueQuery, DefaultMinActivityCutoff>; + #[pallet::storage] + /// Global window (in blocks) at the end of each tempo where admin ops are disallowed + pub type AdminFreezeWindow = + StorageValue<_, u16, ValueQuery, DefaultAdminFreezeWindow>; + + #[pallet::storage] + /// Global number of epochs used to rate limit subnet owner hyperparameter updates + pub type OwnerHyperparamRateLimit = + StorageValue<_, u16, ValueQuery, DefaultOwnerHyperparamRateLimit>; + #[pallet::storage] pub type ColdkeySwapScheduleDuration = StorageValue<_, BlockNumberFor, ValueQuery, DefaultColdkeySwapScheduleDuration>; @@ -1040,6 +1094,9 @@ pub mod pallet { /// /// Eventually, Bittensor should migrate to using Holds afterwhich time we will not require this /// separate accounting. + + #[pallet::storage] // --- ITEM ( maximum_number_of_networks ) + pub type SubnetLimit = StorageValue<_, u16, ValueQuery, DefaultSubnetLimit>; #[pallet::storage] // --- ITEM ( total_issuance ) pub type TotalIssuance = StorageValue<_, TaoCurrency, ValueQuery, DefaultTotalIssuance>; #[pallet::storage] // --- ITEM ( total_stake ) @@ -1292,6 +1349,10 @@ pub mod pallet { pub type SubnetOwnerHotkey = StorageMap<_, Identity, NetUid, T::AccountId, ValueQuery, DefaultSubnetOwner>; #[pallet::storage] + /// --- MAP ( netuid ) --> recycle_or_burn + pub type RecycleOrBurn = + StorageMap<_, Identity, NetUid, RecycleOrBurnEnum, ValueQuery, DefaultRecycleOrBurn>; + #[pallet::storage] /// --- MAP ( netuid ) --> serving_rate_limit pub type ServingRateLimit = StorageMap<_, Identity, NetUid, u64, ValueQuery, DefaultServingRateLimit>; @@ -1318,6 +1379,10 @@ pub mod pallet { pub type BurnRegistrationsThisInterval = StorageMap<_, Identity, NetUid, u16, ValueQuery>; #[pallet::storage] + /// --- MAP ( netuid ) --> min_allowed_uids + pub type MinAllowedUids = + StorageMap<_, Identity, NetUid, u16, ValueQuery, DefaultMinAllowedUids>; + #[pallet::storage] /// --- MAP ( netuid ) --> max_allowed_uids pub type MaxAllowedUids = StorageMap<_, Identity, NetUid, u16, ValueQuery, DefaultMaxAllowedUids>; @@ -1481,7 +1546,7 @@ pub mod pallet { /// ==== Subnetwork Consensus Storage ==== /// ======================================= #[pallet::storage] // --- DMAP ( netuid ) --> stake_weight | weight for stake used in YC. - pub(super) type StakeWeight = + pub type StakeWeight = StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU16Vec>; #[pallet::storage] /// --- DMAP ( netuid, hotkey ) --> uid @@ -1522,7 +1587,7 @@ pub mod pallet { #[pallet::storage] /// --- MAP ( netuid ) --> incentive pub type Incentive = - StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU16Vec>; + StorageMap<_, Identity, NetUidStorageIndex, Vec, ValueQuery, EmptyU16Vec>; #[pallet::storage] /// --- MAP ( netuid ) --> dividends pub type Dividends = @@ -1533,7 +1598,7 @@ pub mod pallet { #[pallet::storage] /// --- MAP ( netuid ) --> last_update pub type LastUpdate = - StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU64Vec>; + StorageMap<_, Identity, NetUidStorageIndex, Vec, ValueQuery, EmptyU64Vec>; #[pallet::storage] /// --- MAP ( netuid ) --> validator_trust pub type ValidatorTrust = @@ -1551,7 +1616,7 @@ pub mod pallet { pub type Weights = StorageDoubleMap< _, Identity, - NetUid, + NetUidStorageIndex, Identity, u16, Vec<(u16, u16)>, @@ -1563,7 +1628,7 @@ pub mod pallet { pub type Bonds = StorageDoubleMap< _, Identity, - NetUid, + NetUidStorageIndex, Identity, u16, Vec<(u16, u16)>, @@ -1673,7 +1738,7 @@ pub mod pallet { pub type WeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, T::AccountId, VecDeque<(H256, u64, u64, u64)>, @@ -1685,7 +1750,7 @@ pub mod pallet { pub type TimelockedWeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1702,7 +1767,7 @@ pub mod pallet { pub type CRV3WeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1718,7 +1783,7 @@ pub mod pallet { pub type CRV3WeightCommitsV2 = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1800,6 +1865,43 @@ pub mod pallet { pub type CommitRevealWeightsVersion = StorageValue<_, u16, ValueQuery, DefaultCommitRevealWeightsVersion>; + #[pallet::storage] + /// ITEM( NetworkRegistrationStartBlock ) + pub type NetworkRegistrationStartBlock = + StorageValue<_, u64, ValueQuery, DefaultNetworkRegistrationStartBlock>; + + /// ============================ + /// ==== Subnet Mechanisms ===== + /// ============================ + #[pallet::type_value] + /// -- ITEM (Default number of sub-subnets) + pub fn DefaultMechanismCount() -> MechId { + MechId::from(1) + } + #[pallet::type_value] + /// -- ITEM (Maximum number of sub-subnets) + pub fn MaxMechanismCount() -> MechId { + MechId::from(2) + } + #[pallet::type_value] + /// -- ITEM (Rate limit for mechanism count updates) + pub fn MechanismCountSetRateLimit() -> u64 { + prod_or_fast!(7_200, 1) + } + #[pallet::type_value] + /// -- ITEM (Rate limit for mechanism emission distribution updates) + pub fn MechanismEmissionRateLimit() -> u64 { + prod_or_fast!(7_200, 1) + } + #[pallet::storage] + /// --- MAP ( netuid ) --> Current number of subnet mechanisms + pub type MechanismCountCurrent = + StorageMap<_, Twox64Concat, NetUid, MechId, ValueQuery, DefaultMechanismCount>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Normalized vector of emission split proportion between subnet mechanisms + pub type MechanismEmissionSplit = + StorageMap<_, Twox64Concat, NetUid, Vec, OptionQuery>; + /// ================== /// ==== Genesis ===== /// ================== @@ -1883,7 +1985,7 @@ pub enum CustomTransactionError { ColdkeyInSwapSchedule, StakeAmountTooLow, BalanceTooLow, - SubnetDoesntExist, + SubnetNotExists, HotkeyAccountDoesntExist, NotEnoughStakeToWithdraw, RateLimitExceeded, @@ -1908,7 +2010,7 @@ impl From for u8 { CustomTransactionError::ColdkeyInSwapSchedule => 0, CustomTransactionError::StakeAmountTooLow => 1, CustomTransactionError::BalanceTooLow => 2, - CustomTransactionError::SubnetDoesntExist => 3, + CustomTransactionError::SubnetNotExists => 3, CustomTransactionError::HotkeyAccountDoesntExist => 4, CustomTransactionError::NotEnoughStakeToWithdraw => 5, CustomTransactionError::RateLimitExceeded => 6, @@ -2044,6 +2146,18 @@ impl> fn is_subtoken_enabled(netuid: NetUid) -> bool { SubtokenEnabled::::get(netuid) } + + fn get_validator_trust(netuid: NetUid) -> Vec { + ValidatorTrust::::get(netuid) + } + + fn get_validator_permit(netuid: NetUid) -> Vec { + ValidatorPermit::::get(netuid) + } + + fn hotkey_of_uid(netuid: NetUid, uid: u16) -> Option { + Keys::::try_get(netuid, uid).ok() + } } impl> @@ -2137,6 +2251,8 @@ impl> pub enum RateLimitKey { // The setting sn owner hotkey operation is rate limited per netuid SetSNOwnerHotkey(NetUid), + // Generic rate limit for subnet-owner hyperparameter updates (per netuid) + OwnerHyperparamUpdate(NetUid, Hyperparameter), // Subnet registration rate limit NetworkLastRegistered, // Last tx block limit per account ID @@ -2162,3 +2278,8 @@ impl ProxyInterface for () { Ok(()) } } + +/// Pallets that hold per-subnet commitments implement this to purge all state for `netuid`. +pub trait CommitmentsInterface { + fn purge_netuid(netuid: NetUid); +} diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index eebff46c6e..1a64826ed4 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -6,6 +6,7 @@ use frame_support::pallet_macros::pallet_section; #[pallet_section] mod config { + use crate::CommitmentsInterface; use pallet_commitments::GetCommitments; use subtensor_swap_interface::SwapHandler; @@ -62,6 +63,9 @@ mod config { /// Interface to get commitments. type GetCommitments: GetCommitments; + /// Interface to clean commitments on network dissolution. + type CommitmentsInterface: CommitmentsInterface; + /// ================================= /// ==== Initial Value Constants ==== /// ================================= @@ -132,7 +136,10 @@ mod config { /// Kappa constant. #[pallet::constant] type InitialKappa: Get; - /// Max UID constant. + /// Initial minimum allowed network UIDs + #[pallet::constant] + type InitialMinAllowedUids: Get; + /// Initial maximum allowed network UIDs #[pallet::constant] type InitialMaxAllowedUids: Get; /// Initial validator context pruning length. @@ -195,9 +202,6 @@ mod config { /// Initial network immunity period #[pallet::constant] type InitialNetworkImmunityPeriod: Get; - /// Initial minimum allowed network UIDs - #[pallet::constant] - type InitialNetworkMinAllowedUids: Get; /// Initial network minimum burn cost #[pallet::constant] type InitialNetworkMinLockCost: Get; @@ -254,5 +258,8 @@ mod config { /// Number of blocks between dividends distribution. #[pallet::constant] type LeaseDividendsDistributionInterval: Get>; + /// Maximum percentage of immune UIDs. + #[pallet::constant] + type MaxImmuneUidsPercentage: Get; } } diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index f069ff0aa5..e1bb3311ca 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -53,7 +53,7 @@ mod dispatches { /// - On successfully setting the weights on chain. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -78,7 +78,7 @@ mod dispatches { /// - Attempting to set weights with max value exceeding limit. #[pallet::call_index(0)] #[pallet::weight((Weight::from_parts(15_540_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4111)) + .saturating_add(T::DbWeight::get().reads(4112_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn set_weights( origin: OriginFor, @@ -94,6 +94,86 @@ mod dispatches { } } + /// --- Sets the caller weights for the incentive mechanism for mechanisms. The call + /// can be made from the hotkey account so is potentially insecure, however, the damage + /// of changing weights is minimal if caught early. This function includes all the + /// checks that the passed weights meet the requirements. Stored as u16s they represent + /// rational values in the range [0,1] which sum to 1 and can be interpreted as + /// probabilities. The specific weights determine how inflation propagates outward + /// from this peer. + /// + /// Note: The 16 bit integers weights should represent 1.0 as the max u16. + /// However, the function normalizes all integers to u16_max anyway. This means that if the sum of all + /// elements is larger or smaller than the amount of elements * u16_max, all elements + /// will be corrected for this deviation. + /// + /// # Args: + /// * `origin`: (Origin): + /// - The caller, a hotkey who wishes to set their weights. + /// + /// * `netuid` (u16): + /// - The network uid we are setting these weights on. + /// + /// * `mecid` (`u8`): + /// - The u8 mechnism identifier. + /// + /// * `dests` (Vec): + /// - The edge endpoint for the weight, i.e. j for w_ij. + /// + /// * 'weights' (Vec): + /// - The u16 integer encoded weights. Interpreted as rational + /// values in the range [0,1]. They must sum to in32::MAX. + /// + /// * 'version_key' ( u64 ): + /// - The network version key to check if the validator is up to date. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'MechanismDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + #[pallet::call_index(119)] + #[pallet::weight((Weight::from_parts(15_540_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4111)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn set_mechanism_weights( + origin: OriginFor, + netuid: NetUid, + mecid: MechId, + dests: Vec, + weights: Vec, + version_key: u64, + ) -> DispatchResult { + if Self::get_commit_reveal_weights_enabled(netuid) { + Err(Error::::CommitRevealEnabled.into()) + } else { + Self::do_set_mechanism_weights(origin, netuid, mecid, dests, weights, version_key) + } + } + /// --- Allows a hotkey to set weights for multiple netuids as a batch. /// /// # Args: @@ -121,7 +201,7 @@ mod dispatches { /// #[pallet::call_index(80)] #[pallet::weight((Weight::from_parts(95_460_000, 0) - .saturating_add(T::DbWeight::get().reads(14_u64)) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, @@ -152,8 +232,8 @@ mod dispatches { /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// #[pallet::call_index(96)] - #[pallet::weight((Weight::from_parts(55_130_000, 0) - .saturating_add(T::DbWeight::get().reads(7)) + #[pallet::weight((Weight::from_parts(67_770_000, 0) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_weights( origin: T::RuntimeOrigin, @@ -163,6 +243,41 @@ mod dispatches { Self::do_commit_weights(origin, netuid, commit_hash) } + /// ---- Used to commit a hash of your weight values to later be revealed for mechanisms. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `mecid` (`u8`): + /// - The u8 mechanism identifier. + /// + /// * `commit_hash` (`H256`): + /// - The hash representing the committed weights. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + #[pallet::call_index(115)] + #[pallet::weight((Weight::from_parts(55_130_000, 0) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_mechanism_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + commit_hash: H256, + ) -> DispatchResult { + Self::do_commit_mechanism_weights(origin, netuid, mecid, commit_hash) + } + /// --- Allows a hotkey to commit weight hashes for multiple netuids as a batch. /// /// # Args: @@ -187,7 +302,7 @@ mod dispatches { /// #[pallet::call_index(100)] #[pallet::weight((Weight::from_parts(100_500_000, 0) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_commit_weights( origin: OriginFor, @@ -236,7 +351,7 @@ mod dispatches { /// #[pallet::call_index(97)] #[pallet::weight((Weight::from_parts(122_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn reveal_weights( origin: T::RuntimeOrigin, @@ -249,6 +364,165 @@ mod dispatches { Self::do_reveal_weights(origin, netuid, uids, values, salt, version_key) } + /// ---- Used to reveal the weights for a previously committed hash for mechanisms. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the revealing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `mecid` (`u8`): + /// - The u8 mechanism identifier. + /// + /// * `uids` (`Vec`): + /// - The uids for the weights being revealed. + /// + /// * `values` (`Vec`): + /// - The values of the weights being revealed. + /// + /// * `salt` (`Vec`): + /// - The salt used to generate the commit hash. + /// + /// * `version_key` (`u64`): + /// - The network version key. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to reveal weights when the commit-reveal mechanism is disabled. + /// + /// * `NoWeightsCommitFound`: + /// - Attempting to reveal weights without an existing commit. + /// + /// * `ExpiredWeightCommit`: + /// - Attempting to reveal a weight commit that has expired. + /// + /// * `RevealTooEarly`: + /// - Attempting to reveal weights outside the valid reveal period. + /// + /// * `InvalidRevealCommitHashNotMatch`: + /// - The revealed hash does not match any committed hash. + /// + #[pallet::call_index(116)] + #[pallet::weight((Weight::from_parts(122_000_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn reveal_mechanism_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + Self::do_reveal_mechanism_weights( + origin, + netuid, + mecid, + uids, + values, + salt, + version_key, + ) + } + + /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// # Raises: + /// * `CommitRevealV3Disabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + // #[pallet::call_index(99)] + // #[pallet::weight((Weight::from_parts(77_750_000, 0) + // .saturating_add(T::DbWeight::get().reads(9_u64)) + // .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + // pub fn commit_crv3_weights( + // origin: T::RuntimeOrigin, + // netuid: NetUid, + // commit: BoundedVec>, + // reveal_round: u64, + // ) -> DispatchResult { + // Self::do_commit_timelocked_weights(origin, netuid, commit, reveal_round, 4) + // } + + /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed for mechanisms. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `mecid` (`u8`): + /// - The u8 mechanism identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// # Raises: + /// * `CommitRevealV3Disabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + #[pallet::call_index(117)] + #[pallet::weight((Weight::from_parts(77_750_000, 0) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_crv3_mechanism_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + commit: BoundedVec>, + reveal_round: u64, + ) -> DispatchResult { + Self::do_commit_timelocked_mechanism_weights( + origin, + netuid, + mecid, + commit, + reveal_round, + 4, + ) + } + /// ---- The implementation for batch revealing committed weights. /// /// # Args: @@ -290,7 +564,7 @@ mod dispatches { /// - The input vectors are of mismatched lengths. #[pallet::call_index(98)] #[pallet::weight((Weight::from_parts(412_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_reveal_weights( origin: T::RuntimeOrigin, @@ -431,7 +705,7 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(2)] - #[pallet::weight((Weight::from_parts(340_400_000, 0) + #[pallet::weight((Weight::from_parts(340_800_000, 0) .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(15)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( @@ -523,7 +797,7 @@ mod dispatches { /// - On successfully serving the axon info. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -607,7 +881,7 @@ mod dispatches { /// - On successfully serving the axon info. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -623,7 +897,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(40)] - #[pallet::weight((Weight::from_parts(41_240_000, 0) + #[pallet::weight((Weight::from_parts(32_510_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon_tls( @@ -673,7 +947,7 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(28_660_000, 0) + #[pallet::weight((Weight::from_parts(29_760_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_prometheus( @@ -716,7 +990,7 @@ mod dispatches { /// - On successfully registering a uid to a neuron slot on a subnetwork. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to register to a non existent network. /// /// * 'TooManyRegistrationsThisBlock': @@ -736,7 +1010,7 @@ mod dispatches { /// #[pallet::call_index(6)] #[pallet::weight((Weight::from_parts(197_900_000, 0) - .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().reads(27_u64)) .saturating_add(T::DbWeight::get().writes(23)), DispatchClass::Normal, Pays::Yes))] pub fn register( origin: OriginFor, @@ -752,8 +1026,8 @@ mod dispatches { /// Register the hotkey to root network #[pallet::call_index(62)] - #[pallet::weight((Weight::from_parts(111_700_000, 0) - .saturating_add(T::DbWeight::get().reads(23)) + #[pallet::weight((Weight::from_parts(135_900_000, 0) + .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::No))] pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_root_register(origin, hotkey) @@ -771,7 +1045,7 @@ mod dispatches { /// User register a new subnetwork via burning token #[pallet::call_index(7)] #[pallet::weight((Weight::from_parts(354_200_000, 0) - .saturating_add(T::DbWeight::get().reads(49)) + .saturating_add(T::DbWeight::get().reads(50_u64)) .saturating_add(T::DbWeight::get().writes(43)), DispatchClass::Normal, Pays::Yes))] pub fn burned_register( origin: OriginFor, @@ -1040,8 +1314,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) - .saturating_add(T::DbWeight::get().reads(36)) - .saturating_add(T::DbWeight::get().writes(52)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(39_u64)) + .saturating_add(T::DbWeight::get().writes(57_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1074,11 +1348,11 @@ mod dispatches { .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] pub fn dissolve_network( origin: OriginFor, - coldkey: T::AccountId, + _coldkey: T::AccountId, netuid: NetUid, ) -> DispatchResult { ensure_root(origin)?; - Self::user_remove_network(coldkey, netuid) + Self::do_dissolve_network(netuid) } /// Set a single child for a given hotkey on a specified network. @@ -1107,7 +1381,7 @@ mod dispatches { /// - On successfully registering a child to a hotkey. /// /// # Errors: - /// * `SubNetworkDoesNotExist`: + /// * `MechanismDoesNotExist`: /// - Attempting to register to a non-existent network. /// * `RegistrationNotPermittedOnRootSubnet`: /// - Attempting to register a child on the root network. @@ -1327,8 +1601,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(35)) - .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(38_u64)) + .saturating_add(T::DbWeight::get().writes(56_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -1789,11 +2063,12 @@ mod dispatches { pub fn associate_evm_key( origin: T::RuntimeOrigin, netuid: NetUid, + hotkey: T::AccountId, evm_key: H160, block_number: u64, signature: Signature, ) -> DispatchResult { - Self::do_associate_evm_key(origin, netuid, evm_key, block_number, signature) + Self::do_associate_evm_key(origin, netuid, hotkey, evm_key, block_number, signature) } /// Recycles alpha from a cold/hot key pair, reducing AlphaOut on a subnet @@ -1990,7 +2265,7 @@ mod dispatches { /// - The client (bittensor-drand) version #[pallet::call_index(113)] #[pallet::weight((Weight::from_parts(80_690_000, 0) - .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( origin: T::RuntimeOrigin, @@ -2033,5 +2308,66 @@ mod dispatches { Ok(()) } + + /// ---- Used to commit timelock encrypted commit-reveal weight values to later be revealed for + /// a mechanism. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `mecid` (`u8`): + /// - The u8 mechanism identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// * commit_reveal_version (`u16`): + /// - The client (bittensor-drand) version + #[pallet::call_index(118)] + #[pallet::weight((Weight::from_parts(84_020_000, 0) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_timelocked_mechanism_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + Self::do_commit_timelocked_mechanism_weights( + origin, + netuid, + mecid, + commit, + reveal_round, + commit_reveal_version, + ) + } + + /// Remove a subnetwork + /// The caller must be root + #[pallet::call_index(120)] + #[pallet::weight((Weight::from_parts(119_000_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] + pub fn root_dissolve_network(origin: OriginFor, netuid: NetUid) -> DispatchResult { + ensure_root(origin)?; + Self::do_dissolve_network(netuid) + } } } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index e6d9c231d1..e962b661ca 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -7,8 +7,6 @@ mod errors { #[derive(PartialEq)] #[pallet::error] pub enum Error { - /// The subnet does not exist. - SubNetworkDoesNotExist, /// The root network does not exist. RootNetworkDoesNotExist, /// The user is trying to serve an axon which is not of type 4 (IPv4) or 6 (IPv6). @@ -168,7 +166,7 @@ mod errors { TxChildkeyTakeRateLimitExceeded, /// Invalid identity. InvalidIdentity, - /// Trying to register a subnet into a mechanism that does not exist. + /// Subnet mechanism does not exist. MechanismDoesNotExist, /// Trying to unstake your lock amount. CannotUnstakeLock, @@ -238,6 +236,8 @@ mod errors { BeneficiaryDoesNotOwnHotkey, /// Expected beneficiary origin. ExpectedBeneficiaryOrigin, + /// Admin operation is prohibited during the protected weights window + AdminActionProhibitedDuringWeightsWindow, /// Symbol does not exist. SymbolDoesNotExist, /// Symbol already in use. @@ -250,5 +250,9 @@ mod errors { RevealPeriodTooSmall, /// Generic error for out-of-range parameter value InvalidValue, + /// Subnet limit reached & there is no eligible subnet to prune + SubnetLimitReached, + /// Insufficient funds to meet the subnet lock cost + CannotAffordLockCost, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 25bf93b730..f2d134c189 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -41,7 +41,7 @@ mod events { TaoCurrency, ), /// a caller successfully sets their weights on a subnetwork. - WeightsSet(NetUid, u16), + WeightsSet(NetUidStorageIndex, u16), /// a new neuron account has been registered to the chain. NeuronRegistered(NetUid, u16, T::AccountId), /// multiple uids have been concurrently registered. @@ -114,6 +114,10 @@ mod events { TxDelegateTakeRateLimitSet(u64), /// setting the childkey take transaction rate limit. TxChildKeyTakeRateLimitSet(u64), + /// setting the admin freeze window length (last N blocks of tempo) + AdminFreezeWindowSet(u16), + /// setting the owner hyperparameter rate limit in epochs + OwnerHyperparamRateLimitSet(u16), /// minimum childkey take set MinChildKeyTakeSet(u16), /// maximum childkey take set @@ -147,7 +151,7 @@ mod events { /// the network minimum locking cost is set. NetworkMinLockCostSet(TaoCurrency), /// the maximum number of subnets is set - // SubnetLimitSet(u16), + SubnetLimitSet(u16), /// the lock cost reduction is set NetworkLockCostReductionIntervalSet(u64), /// the take for a delegate is decreased. @@ -242,20 +246,20 @@ mod events { /// - **who**: The account ID of the user committing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. - CRV3WeightsCommitted(T::AccountId, NetUid, H256), + CRV3WeightsCommitted(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully committed. /// /// - **who**: The account ID of the user committing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. - WeightsCommitted(T::AccountId, NetUid, H256), + WeightsCommitted(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully revealed. /// /// - **who**: The account ID of the user revealing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash of the revealed weights. - WeightsRevealed(T::AccountId, NetUid, H256), + WeightsRevealed(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully batch revealed. /// @@ -406,13 +410,13 @@ mod events { /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. /// - **reveal_round**: The round at which weights can be revealed. - TimelockedWeightsCommitted(T::AccountId, NetUid, H256, u64), + TimelockedWeightsCommitted(T::AccountId, NetUidStorageIndex, H256, u64), /// Timelocked Weights have been successfully revealed. /// /// - **netuid**: The network identifier. /// - **who**: The account ID of the user revealing the weights. - TimelockedWeightsRevealed(NetUid, T::AccountId), + TimelockedWeightsRevealed(NetUidStorageIndex, T::AccountId), /// Auto-staking hotkey received stake AutoStakeAdded { @@ -431,9 +435,12 @@ mod events { /// End-of-epoch miner incentive alpha by UID IncentiveAlphaEmittedToMiners { /// Subnet identifier. - netuid: NetUid, + netuid: NetUidStorageIndex, /// UID-indexed array of miner incentive alpha; index equals UID. emissions: Vec, }, + + /// The minimum allowed UIDs for a subnet have been set. + MinAllowedUidsSet(NetUid, u16), } } diff --git a/pallets/subtensor/src/macros/genesis.rs b/pallets/subtensor/src/macros/genesis.rs index e50bf01d7d..b9378e38f6 100644 --- a/pallets/subtensor/src/macros/genesis.rs +++ b/pallets/subtensor/src/macros/genesis.rs @@ -96,9 +96,9 @@ mod genesis { Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); - Incentive::::mutate(netuid, |v| v.push(0)); + Incentive::::mutate(NetUidStorageIndex::from(netuid), |v| v.push(0)); Dividends::::mutate(netuid, |v| v.push(0)); - LastUpdate::::mutate(netuid, |v| v.push(block_number)); + LastUpdate::::mutate(NetUidStorageIndex::from(netuid), |v| v.push(block_number)); PruningScores::::mutate(netuid, |v| v.push(0)); ValidatorTrust::::mutate(netuid, |v| v.push(0)); ValidatorPermit::::mutate(netuid, |v| v.push(false)); diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index b43f9422df..a3cb7a692f 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -139,7 +139,15 @@ mod hooks { // Migrate last block rate limiting storage items .saturating_add(migrations::migrate_rate_limiting_last_blocks::migrate_obsolete_rate_limiting_last_blocks_storage::()) // Migrate remove network modality - .saturating_add(migrations::migrate_remove_network_modality::migrate_remove_network_modality::()); + .saturating_add(migrations::migrate_remove_network_modality::migrate_remove_network_modality::()) + // Migrate Immunity Period + .saturating_add(migrations::migrate_network_immunity_period::migrate_network_immunity_period::()) + // Migrate Subnet Limit + .saturating_add(migrations::migrate_subnet_limit_to_default::migrate_subnet_limit_to_default::()) + // Migrate Lock Reduction Interval + .saturating_add(migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::()) + // Migrate subnet locked balances + .saturating_add(migrations::migrate_subnet_locked::migrate_restore_subnet_locked::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs b/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs index 27f2fe6d65..bf5a0bb2b5 100644 --- a/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs +++ b/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs @@ -22,9 +22,10 @@ pub fn migrate_crv3_commits_add_block() -> Weight { log::info!("Running migration '{}'", String::from_utf8_lossy(&mig_name)); // iterate over *all* (netuid, epoch, queue) triples - for (netuid, epoch, old_q) in CRV3WeightCommits::::drain() { + for (netuid_index, epoch, old_q) in CRV3WeightCommits::::drain() { total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + let (netuid, _) = Pallet::::get_netuid_and_subid(netuid_index).unwrap_or_default(); let commit_block = Pallet::::get_first_block_of_epoch(netuid, epoch); // convert VecDeque<(who,cipher,rnd)> → VecDeque<(who,cb,cipher,rnd)> @@ -34,7 +35,7 @@ pub fn migrate_crv3_commits_add_block() -> Weight { .collect(); // write back under *new* storage definition - CRV3WeightCommitsV2::::insert(netuid, epoch, new_q); + CRV3WeightCommitsV2::::insert(netuid_index, epoch, new_q); } // mark as done diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs index b8b1138b2e..c4e79692ac 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs @@ -6,7 +6,7 @@ use frame_support::{ }; use log::info; use sp_std::vec::Vec; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; /// Constant for logging purposes const LOG_TARGET: &str = "migrate_delete_subnet_21"; @@ -73,8 +73,8 @@ pub fn migrate_delete_subnet_21() -> Weight { // Remove incentive mechanism memory let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + let _ = Weights::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); weight.saturating_accrue(T::DbWeight::get().writes(4)); @@ -83,11 +83,11 @@ pub fn migrate_delete_subnet_21() -> Weight { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs index 289ce6cb36..3470004362 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs @@ -6,7 +6,7 @@ use frame_support::{ }; use log::info; use sp_std::vec::Vec; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; /// Constant for logging purposes const LOG_TARGET: &str = "migrate_delete_subnet_3"; @@ -75,8 +75,8 @@ pub fn migrate_delete_subnet_3() -> Weight { // Remove incentive mechanism memory let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + let _ = Weights::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); weight.saturating_accrue(T::DbWeight::get().writes(4)); @@ -85,11 +85,11 @@ pub fn migrate_delete_subnet_3() -> Weight { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs b/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs index 042ad0fe77..6a05dc5a85 100644 --- a/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs +++ b/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs @@ -15,7 +15,7 @@ pub mod deprecated_loaded_emission_format { } pub(crate) fn migrate_init_total_issuance() -> Weight { - let subnets_len = crate::SubnetLocked::::iter().count() as u64; + let subnets_len = crate::NetworksAdded::::iter().count() as u64; // Retrieve the total balance of all accounts let total_account_balances = <::Currency as fungible::Inspect< diff --git a/pallets/subtensor/src/migrations/migrate_network_immunity_period.rs b/pallets/subtensor/src/migrations/migrate_network_immunity_period.rs new file mode 100644 index 0000000000..a9fcea21e3 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_network_immunity_period.rs @@ -0,0 +1,40 @@ +use crate::{Config, Event, HasMigrationRun, NetworkImmunityPeriod, Pallet, Weight}; +use scale_info::prelude::string::String; + +pub fn migrate_network_immunity_period() -> Weight { + use frame_support::traits::Get; + + const NEW_VALUE: u64 = 864_000; + + let migration_name = b"migrate_network_immunity_period".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + // Skip if already executed + if HasMigrationRun::::get(&migration_name) { + log::info!( + target: "runtime", + "Migration '{}' already run - skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + // ── 1) Set new value ───────────────────────────────────────────────────── + NetworkImmunityPeriod::::put(NEW_VALUE); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + Pallet::::deposit_event(Event::NetworkImmunityPeriodSet(NEW_VALUE)); + + // ── 2) Mark migration done ─────────────────────────────────────────────── + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + target: "runtime", + "Migration '{}' completed - NetworkImmunityPeriod => {}.", + String::from_utf8_lossy(&migration_name), + NEW_VALUE + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs b/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs new file mode 100644 index 0000000000..99bb5b6e97 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs @@ -0,0 +1,55 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +pub fn migrate_network_lock_reduction_interval() -> Weight { + const FOUR_DAYS: u64 = 28_800; + const EIGHT_DAYS: u64 = 57_600; + const ONE_WEEK_BLOCKS: u64 = 50_400; + + let migration_name = b"migrate_network_lock_reduction_interval".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + // Skip if already executed + if HasMigrationRun::::get(&migration_name) { + log::info!( + target: "runtime", + "Migration '{}' already run - skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + let current_block = Pallet::::get_current_block_as_u64(); + + // ── 1) Set new values ───────────────────────────────────────────────── + NetworkLockReductionInterval::::put(EIGHT_DAYS); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + NetworkRateLimit::::put(FOUR_DAYS); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + Pallet::::set_network_last_lock(TaoCurrency::from(1_000_000_000_000)); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + // Hold price at 2000 TAO until day 7, then begin linear decay + Pallet::::set_network_last_lock_block(current_block.saturating_add(ONE_WEEK_BLOCKS)); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + // Allow registrations starting at day 7 + NetworkRegistrationStartBlock::::put(current_block.saturating_add(ONE_WEEK_BLOCKS)); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + // ── 2) Mark migration done ─────────────────────────────────────────── + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + target: "runtime", + "Migration '{}' completed.", + String::from_utf8_lossy(&migration_name), + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs b/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs new file mode 100644 index 0000000000..3d88337a24 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs @@ -0,0 +1,44 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +pub fn migrate_subnet_limit_to_default() -> Weight { + let mig_name: Vec = b"subnet_limit_to_default".to_vec(); + + // 1 read: HasMigrationRun flag + let mut total_weight = T::DbWeight::get().reads(1); + + // Run once guard + if HasMigrationRun::::get(&mig_name) { + log::info!( + "Migration '{}' already executed - skipping", + String::from_utf8_lossy(&mig_name) + ); + return total_weight; + } + log::info!("Running migration '{}'", String::from_utf8_lossy(&mig_name)); + + // Read current and compute target default + let current: u16 = SubnetLimit::::get(); + let target: u16 = DefaultSubnetLimit::::get(); + + if current != target { + total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + SubnetLimit::::put(target); + log::info!("SubnetLimit updated: {current} -> {target}"); + } else { + total_weight = total_weight.saturating_add(T::DbWeight::get().reads(1)); + log::info!("SubnetLimit already equals default ({target}), no update performed."); + } + + // Mark as done + HasMigrationRun::::insert(&mig_name, true); + total_weight = total_weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{}' completed", + String::from_utf8_lossy(&mig_name) + ); + total_weight +} diff --git a/pallets/subtensor/src/migrations/migrate_subnet_locked.rs b/pallets/subtensor/src/migrations/migrate_subnet_locked.rs new file mode 100644 index 0000000000..a430993f9d --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_subnet_locked.rs @@ -0,0 +1,119 @@ +use super::*; +use crate::{Config, HasMigrationRun, SubnetLocked, TaoCurrency}; +use frame_support::weights::Weight; +use log; +use scale_info::prelude::string::String; +use subtensor_runtime_common::NetUid; + +pub fn migrate_restore_subnet_locked() -> Weight { + // Track whether we've already run this migration + let migration_name = b"migrate_restore_subnet_locked".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + target: "runtime", + "Migration '{}' already run - skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + // Snapshot: NetworkLastLockCost at (registration_block + 1) for each netuid. + const SUBNET_LOCKED: &[(u16, u64)] = &[ + (65, 37_274_536_408), + (66, 65_230_444_016), + (67, 114_153_284_032), + (68, 199_768_252_064), + (69, 349_594_445_728), + (70, 349_412_366_216), + (71, 213_408_488_702), + (72, 191_341_473_067), + (73, 246_711_333_592), + (74, 291_874_466_228), + (75, 247_485_227_056), + (76, 291_241_991_316), + (77, 303_154_601_714), + (78, 287_407_417_932), + (79, 254_935_051_664), + (80, 255_413_055_349), + (81, 249_790_431_509), + (82, 261_343_249_180), + (83, 261_361_408_796), + (84, 201_938_003_214), + (85, 264_805_234_604), + (86, 223_171_973_880), + (87, 180_397_358_280), + (88, 270_596_039_760), + (89, 286_399_608_951), + (90, 267_684_201_301), + (91, 284_637_542_762), + (92, 288_373_410_868), + (93, 290_836_604_849), + (94, 270_861_792_144), + (95, 210_595_055_304), + (96, 315_263_727_200), + (97, 158_244_884_792), + (98, 168_102_223_900), + (99, 252_153_339_800), + (100, 378_230_014_000), + (101, 205_977_765_866), + (102, 149_434_017_849), + (103, 135_476_471_008), + (104, 147_970_415_680), + (105, 122_003_668_139), + (106, 133_585_556_570), + (107, 200_137_144_216), + (108, 106_767_623_816), + (109, 124_280_483_748), + (110, 186_420_726_696), + (111, 249_855_564_892), + (112, 196_761_272_984), + (113, 147_120_048_727), + (114, 84_021_895_534), + (115, 98_002_215_656), + (116, 89_944_262_256), + (117, 107_183_582_952), + (118, 110_644_724_664), + (119, 99_380_483_902), + (120, 138_829_019_156), + (121, 111_988_743_976), + (122, 130_264_686_152), + (123, 118_034_291_488), + (124, 79_312_501_676), + (125, 43_214_310_704), + (126, 64_755_449_962), + (127, 97_101_698_382), + (128, 145_645_807_991), + ]; + + let mut inserted: u32 = 0; + let mut total_rao: u128 = 0; + + // ── 1) Re-insert the historical values ──────────────────────────────── + for &(netuid_u16, amount_rao_u64) in SUBNET_LOCKED.iter() { + let key: NetUid = NetUid::from(netuid_u16); + let amount: TaoCurrency = TaoCurrency::from(amount_rao_u64); + + SubnetLocked::::insert(key, amount); + + inserted = inserted.saturating_add(1); + total_rao = total_rao.saturating_add(amount_rao_u64 as u128); + + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + + // ── 2) Mark migration done ──────────────────────────────────────────── + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + target: "runtime", + "Migration '{}' completed - inserted {} SubnetLocked entries; total≈{} RAO.", + String::from_utf8_lossy(&migration_name), + inserted, + total_rao + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index b7265cc6d0..e7c50c0080 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -19,6 +19,8 @@ pub mod migrate_fix_root_subnet_tao; pub mod migrate_fix_root_tao_and_alpha_in; pub mod migrate_identities_v2; pub mod migrate_init_total_issuance; +pub mod migrate_network_immunity_period; +pub mod migrate_network_lock_reduction_interval; pub mod migrate_orphaned_storage_items; pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; @@ -39,6 +41,8 @@ pub mod migrate_set_registration_enable; pub mod migrate_set_subtoken_enabled; pub mod migrate_stake_threshold; pub mod migrate_subnet_identities_to_v3; +pub mod migrate_subnet_limit_to_default; +pub mod migrate_subnet_locked; pub mod migrate_subnet_symbols; pub mod migrate_subnet_volume; pub mod migrate_to_v1_separate_emission; diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 1ad09d4bbb..57c2e15c7f 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -8,7 +8,7 @@ use pallet_commitments::GetCommitments; use substrate_fixed::types::I64F64; use substrate_fixed::types::I96F32; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, MechId, NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("6fc49d5a7dc0e339")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -749,7 +749,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(), // Pruning per UID - last_update: LastUpdate::::get(netuid) + last_update: LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), // Last update per UID @@ -761,7 +761,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(), // Dividends per UID - incentives: Incentive::::get(netuid) + incentives: Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), // Mining incentives per UID @@ -805,6 +805,45 @@ impl Pallet { metagraphs } + pub fn get_mechagraph(netuid: NetUid, mecid: MechId) -> Option> { + if Self::ensure_mechanism_exists(netuid, mecid).is_err() { + return None; + } + + // Get netuid metagraph + let maybe_meta = Self::get_metagraph(netuid); + if let Some(mut meta) = maybe_meta { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); + + // Update with mechanism information + meta.netuid = NetUid::from(u16::from(netuid_index)).into(); + meta.last_update = LastUpdate::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(); + meta.incentives = Incentive::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(); + + Some(meta) + } else { + None + } + } + + pub fn get_all_mechagraphs() -> Vec>> { + let netuids = Self::get_all_subnet_netuids(); + let mut metagraphs = Vec::>>::new(); + for netuid in netuids.clone().iter() { + let mechanism_count = u8::from(MechanismCountCurrent::::get(netuid)); + for mecid in 0..mechanism_count { + metagraphs.push(Self::get_mechagraph(*netuid, MechId::from(mecid))); + } + } + metagraphs + } + pub fn get_selective_metagraph( netuid: NetUid, metagraph_indexes: Vec, @@ -821,6 +860,23 @@ impl Pallet { } } + pub fn get_selective_mechagraph( + netuid: NetUid, + mecid: MechId, + metagraph_indexes: Vec, + ) -> Option> { + if !Self::if_subnet_exist(netuid) { + None + } else { + let mut result = SelectiveMetagraph::default(); + for index in metagraph_indexes.iter() { + let value = Self::get_single_selective_mechagraph(netuid, mecid, *index); + result.merge_value(&value, *index as usize); + } + Some(result) + } + } + fn get_single_selective_metagraph( netuid: NetUid, metagraph_index: u16, @@ -1207,7 +1263,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { netuid: netuid.into(), last_update: Some( - LastUpdate::::get(netuid) + LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), @@ -1240,7 +1296,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { netuid: netuid.into(), incentives: Some( - Incentive::::get(netuid) + Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), @@ -1385,6 +1441,46 @@ impl Pallet { } } + fn get_single_selective_mechagraph( + netuid: NetUid, + mecid: MechId, + metagraph_index: u16, + ) -> SelectiveMetagraph { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); + + // Default to netuid, replace as needed for mecid + match SelectiveMetagraphIndex::from_index(metagraph_index as usize) { + Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { + netuid: netuid.into(), + incentives: Some( + Incentive::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { + netuid: netuid.into(), + last_update: Some( + LastUpdate::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + _ => { + let mut meta = Self::get_single_selective_metagraph(netuid, metagraph_index); + // Replace netuid with index + meta.netuid = NetUid::from(u16::from(netuid_index)).into(); + meta + } + } + } + fn get_validators(netuid: NetUid) -> SelectiveMetagraph { let stake_threshold = Self::get_stake_threshold(); let hotkeys: Vec<(u16, T::AccountId)> = diff --git a/pallets/subtensor/src/rpc_info/neuron_info.rs b/pallets/subtensor/src/rpc_info/neuron_info.rs index 8eae264c6e..6e29a51ef5 100644 --- a/pallets/subtensor/src/rpc_info/neuron_info.rs +++ b/pallets/subtensor/src/rpc_info/neuron_info.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::pallet_prelude::{Decode, Encode}; extern crate alloc; use codec::Compact; -use subtensor_runtime_common::{AlphaCurrency, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex}; #[freeze_struct("9e5a291e7e71482d")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -87,16 +87,16 @@ impl Pallet { let active = Self::get_active_for_uid(netuid, uid); let rank = Self::get_rank_for_uid(netuid, uid); let emission = Self::get_emission_for_uid(netuid, uid); - let incentive = Self::get_incentive_for_uid(netuid, uid); + let incentive = Self::get_incentive_for_uid(netuid.into(), uid); let consensus = Self::get_consensus_for_uid(netuid, uid); let trust = Self::get_trust_for_uid(netuid, uid); let validator_trust = Self::get_validator_trust_for_uid(netuid, uid); let dividends = Self::get_dividends_for_uid(netuid, uid); let pruning_score = Self::get_pruning_score_for_uid(netuid, uid); - let last_update = Self::get_last_update_for_uid(netuid, uid); + let last_update = Self::get_last_update_for_uid(NetUidStorageIndex::from(netuid), uid); let validator_permit = Self::get_validator_permit_for_uid(netuid, uid); - let weights = Weights::::get(netuid, uid) + let weights = Weights::::get(NetUidStorageIndex::from(netuid), uid) .into_iter() .filter_map(|(i, w)| { if w > 0 { @@ -107,7 +107,7 @@ impl Pallet { }) .collect::, Compact)>>(); - let bonds = >::get(netuid, uid) + let bonds = Bonds::::get(NetUidStorageIndex::from(netuid), uid) .iter() .filter_map(|(i, b)| { if *b > 0 { @@ -173,13 +173,13 @@ impl Pallet { let active = Self::get_active_for_uid(netuid, uid); let rank = Self::get_rank_for_uid(netuid, uid); let emission = Self::get_emission_for_uid(netuid, uid); - let incentive = Self::get_incentive_for_uid(netuid, uid); + let incentive = Self::get_incentive_for_uid(netuid.into(), uid); let consensus = Self::get_consensus_for_uid(netuid, uid); let trust = Self::get_trust_for_uid(netuid, uid); let validator_trust = Self::get_validator_trust_for_uid(netuid, uid); let dividends = Self::get_dividends_for_uid(netuid, uid); let pruning_score = Self::get_pruning_score_for_uid(netuid, uid); - let last_update = Self::get_last_update_for_uid(netuid, uid); + let last_update = Self::get_last_update_for_uid(NetUidStorageIndex::from(netuid), uid); let validator_permit = Self::get_validator_permit_for_uid(netuid, uid); let stake: Vec<(T::AccountId, Compact)> = vec![( diff --git a/pallets/subtensor/src/rpc_info/show_subnet.rs b/pallets/subtensor/src/rpc_info/show_subnet.rs index 2123345a4e..abd9670bb8 100644 --- a/pallets/subtensor/src/rpc_info/show_subnet.rs +++ b/pallets/subtensor/src/rpc_info/show_subnet.rs @@ -4,7 +4,7 @@ use crate::epoch::math::*; use codec::Compact; use frame_support::pallet_prelude::{Decode, Encode}; use substrate_fixed::types::I64F64; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("9354762261420485")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -103,7 +103,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(); - let last_update: Vec> = LastUpdate::::get(netuid) + let last_update: Vec> = LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(); @@ -115,7 +115,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(); - let incentives: Vec> = Incentive::::get(netuid) + let incentives: Vec> = Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(); diff --git a/pallets/subtensor/src/staking/helpers.rs b/pallets/subtensor/src/staking/helpers.rs index 7b1b644e85..1625afa811 100644 --- a/pallets/subtensor/src/staking/helpers.rs +++ b/pallets/subtensor/src/staking/helpers.rs @@ -323,8 +323,13 @@ impl Pallet { } pub fn recycle_subnet_alpha(netuid: NetUid, amount: AlphaCurrency) { + // TODO: record recycled alpha in a tracker SubnetAlphaOut::::mutate(netuid, |total| { *total = total.saturating_sub(amount); }); } + + pub fn burn_subnet_alpha(_netuid: NetUid, _amount: AlphaCurrency) { + // Do nothing; TODO: record burned alpha in a tracker + } } diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index 4c1bbd0b9f..7334c8126a 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -23,10 +23,7 @@ impl Pallet { ) -> DispatchResult { let coldkey: T::AccountId = ensure_signed(origin)?; - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); ensure!( !netuid.is_root(), @@ -91,10 +88,7 @@ impl Pallet { ) -> DispatchResult { let coldkey = ensure_signed(origin)?; - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); ensure!( !netuid.is_root(), @@ -126,7 +120,7 @@ impl Pallet { &hotkey, &coldkey, netuid, amount, ); - // This is a burn, so we don't need to update AlphaOut. + Self::burn_subnet_alpha(netuid, amount); // Deposit event Self::deposit_event(Event::AlphaBurned( diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index fd9a974645..9d610ea88f 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -1,6 +1,7 @@ use subtensor_swap_interface::{OrderType, SwapHandler}; use super::*; +use substrate_fixed::types::U96F32; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; impl Pallet { @@ -439,4 +440,171 @@ impl Pallet { Self::do_remove_stake(origin, hotkey, netuid, alpha_unstaked) } } + + pub fn destroy_alpha_in_out_stakes(netuid: NetUid) -> DispatchResult { + // 1) Ensure the subnet exists. + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); + + // 2) Owner / lock cost. + let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); + let lock_cost: TaoCurrency = Self::get_subnet_locked_balance(netuid); + + // 3) Compute owner's received emission in TAO at current price. + // Emission:: is Vec. We: + // - sum emitted α, + // - apply owner fraction to get owner α, + // - price that α using a *simulated* AMM swap. + let total_emitted_alpha_u128: u128 = + Emission::::get(netuid) + .into_iter() + .fold(0u128, |acc, e_alpha| { + let e_u64: u64 = Into::::into(e_alpha); + acc.saturating_add(e_u64 as u128) + }); + + let owner_fraction: U96F32 = Self::get_float_subnet_owner_cut(); + let owner_alpha_u64: u64 = U96F32::from_num(total_emitted_alpha_u128) + .saturating_mul(owner_fraction) + .floor() + .saturating_to_num::(); + + let owner_emission_tao: TaoCurrency = if owner_alpha_u64 > 0 { + match T::SwapInterface::sim_swap(netuid.into(), OrderType::Sell, owner_alpha_u64) { + Ok(sim) => TaoCurrency::from(sim.amount_paid_out), + Err(e) => { + log::debug!( + "destroy_alpha_in_out_stakes: sim_swap owner α→τ failed (netuid={netuid:?}, alpha={owner_alpha_u64}, err={e:?}); falling back to price multiply.", + ); + let cur_price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); + let val_u64: u64 = U96F32::from_num(owner_alpha_u64) + .saturating_mul(cur_price) + .floor() + .saturating_to_num::(); + TaoCurrency::from(val_u64) + } + } + } else { + TaoCurrency::ZERO + }; + + // 4) Enumerate all α entries on this subnet to build distribution weights and cleanup lists. + // - collect keys to remove, + // - per (hot,cold) α VALUE (not shares) with fallback to raw share if pool uninitialized, + // - track hotkeys to clear pool totals. + let mut keys_to_remove: Vec<(T::AccountId, T::AccountId)> = Vec::new(); + let mut hotkeys_seen: Vec = Vec::new(); + let mut stakers: Vec<(T::AccountId, T::AccountId, u128)> = Vec::new(); + let mut total_alpha_value_u128: u128 = 0; + + for ((hot, cold, this_netuid), share_u64f64) in Alpha::::iter() { + if this_netuid != netuid { + continue; + } + + keys_to_remove.push((hot.clone(), cold.clone())); + if !hotkeys_seen.contains(&hot) { + hotkeys_seen.push(hot.clone()); + } + + // Primary: actual α value via share pool. + let pool = Self::get_alpha_share_pool(hot.clone(), netuid); + let actual_val_u64 = pool.try_get_value(&cold).unwrap_or(0); + + // Fallback: if pool uninitialized, treat raw Alpha share as value. + let val_u64 = if actual_val_u64 == 0 { + share_u64f64.saturating_to_num::() + } else { + actual_val_u64 + }; + + if val_u64 > 0 { + let val_u128 = val_u64 as u128; + total_alpha_value_u128 = total_alpha_value_u128.saturating_add(val_u128); + stakers.push((hot, cold, val_u128)); + } + } + + // 5) Determine the TAO pot and pre-adjust accounting to avoid double counting. + let pot_tao: TaoCurrency = SubnetTAO::::get(netuid); + let pot_u64: u64 = pot_tao.into(); + + if pot_u64 > 0 { + SubnetTAO::::remove(netuid); + TotalStake::::mutate(|total| *total = total.saturating_sub(pot_tao)); + } + + // 6) Pro‑rata distribution of the pot by α value (largest‑remainder), + // **credited directly to each staker's COLDKEY free balance**. + if pot_u64 > 0 && total_alpha_value_u128 > 0 && !stakers.is_empty() { + struct Portion { + _hot: A, + cold: C, + share: u64, // TAO to credit to coldkey balance + rem: u128, // remainder for largest‑remainder method + } + + let pot_u128: u128 = pot_u64 as u128; + let mut portions: Vec> = Vec::with_capacity(stakers.len()); + let mut distributed: u128 = 0; + + for (hot, cold, alpha_val) in &stakers { + let prod: u128 = pot_u128.saturating_mul(*alpha_val); + let share_u128: u128 = prod.checked_div(total_alpha_value_u128).unwrap_or_default(); + let share_u64: u64 = share_u128.min(u128::from(u64::MAX)) as u64; + distributed = distributed.saturating_add(u128::from(share_u64)); + + let rem: u128 = prod.checked_rem(total_alpha_value_u128).unwrap_or_default(); + portions.push(Portion { + _hot: hot.clone(), + cold: cold.clone(), + share: share_u64, + rem, + }); + } + + let leftover: u128 = pot_u128.saturating_sub(distributed); + if leftover > 0 { + portions.sort_by(|a, b| b.rem.cmp(&a.rem)); + let give: usize = core::cmp::min(leftover, portions.len() as u128) as usize; + for p in portions.iter_mut().take(give) { + p.share = p.share.saturating_add(1); + } + } + + // Credit each share directly to coldkey free balance. + for p in portions { + if p.share > 0 { + Self::add_balance_to_coldkey_account(&p.cold, p.share); + } + } + } + + // 7) Destroy all α-in/α-out state for this subnet. + // 7.a) Remove every (hot, cold, netuid) α entry. + for (hot, cold) in keys_to_remove { + Alpha::::remove((hot, cold, netuid)); + } + // 7.b) Clear share‑pool totals for each hotkey on this subnet. + for hot in hotkeys_seen { + TotalHotkeyAlpha::::remove(&hot, netuid); + TotalHotkeyShares::::remove(&hot, netuid); + } + // 7.c) Remove α‑in/α‑out counters (fully destroyed). + SubnetAlphaIn::::remove(netuid); + SubnetAlphaInProvided::::remove(netuid); + SubnetAlphaOut::::remove(netuid); + + // 8) Refund remaining lock to subnet owner: + // refund = max(0, lock_cost(τ) − owner_received_emission_in_τ). + let refund: TaoCurrency = lock_cost.saturating_sub(owner_emission_tao); + + // Clear the locked balance on the subnet. + Self::set_subnet_locked_balance(netuid, TaoCurrency::ZERO); + + if !refund.is_zero() { + Self::add_balance_to_coldkey_account(&owner_coldkey, refund.to_u64()); + } + + Ok(()) + } } diff --git a/pallets/subtensor/src/staking/set_children.rs b/pallets/subtensor/src/staking/set_children.rs index 139ea82c5d..cf7103b7ab 100644 --- a/pallets/subtensor/src/staking/set_children.rs +++ b/pallets/subtensor/src/staking/set_children.rs @@ -23,7 +23,7 @@ impl Pallet { /// - If all checks pass and setting the childkeys is scheduled. /// /// # Errors: - /// * `SubNetworkDoesNotExist`: + /// * `MechanismDoesNotExist`: /// - Attempting to register to a non-existent network. /// * `RegistrationNotPermittedOnRootSubnet`: /// - Attempting to register a child on the root network. @@ -48,10 +48,9 @@ impl Pallet { // Ensure the hotkey passes the rate limit. ensure!( - Self::passes_rate_limit_on_subnet( - &TransactionType::SetChildren, // Set children. - &hotkey, // Specific to a hotkey. - netuid, // Specific to a subnet. + TransactionType::SetChildren.passes_rate_limit_on_subnet::( + &hotkey, // Specific to a hotkey. + netuid, // Specific to a subnet. ), Error::::TxRateLimitExceeded ); @@ -63,10 +62,7 @@ impl Pallet { ); // Check that the network we are trying to create the child on exists. - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // Check that the coldkey owns the hotkey. ensure!( @@ -111,12 +107,7 @@ impl Pallet { // Set last transaction block let current_block = Self::get_current_block_as_u64(); - Self::set_last_transaction_block_on_subnet( - &hotkey, - netuid, - &TransactionType::SetChildren, - current_block, - ); + TransactionType::SetChildren.set_last_block_on_subnet::(&hotkey, netuid, current_block); // Calculate cool-down block let cooldown_block = @@ -154,7 +145,7 @@ impl Pallet { /// - On successfully registering children to a hotkey. /// /// # Errors: - /// * `SubNetworkDoesNotExist`: + /// * `MechanismDoesNotExist`: /// - Attempting to register to a non-existent network. /// * `RegistrationNotPermittedOnRootSubnet`: /// - Attempting to register a child on the root network. @@ -319,10 +310,9 @@ impl Pallet { if take > current_take { // Ensure the hotkey passes the rate limit. ensure!( - Self::passes_rate_limit_on_subnet( - &TransactionType::SetChildkeyTake, // Set childkey take. - &hotkey, // Specific to a hotkey. - netuid, // Specific to a subnet. + TransactionType::SetChildkeyTake.passes_rate_limit_on_subnet::( + &hotkey, // Specific to a hotkey. + netuid, // Specific to a subnet. ), Error::::TxChildkeyTakeRateLimitExceeded ); @@ -330,10 +320,9 @@ impl Pallet { // Set last transaction block let current_block = Self::get_current_block_as_u64(); - Self::set_last_transaction_block_on_subnet( + TransactionType::SetChildkeyTake.set_last_block_on_subnet::( &hotkey, netuid, - &TransactionType::SetChildkeyTake, current_block, ); @@ -341,10 +330,9 @@ impl Pallet { ChildkeyTake::::insert(hotkey.clone(), netuid, take); // Update the last transaction block - Self::set_last_transaction_block_on_subnet( + TransactionType::SetChildkeyTake.set_last_block_on_subnet::( &hotkey, netuid, - &TransactionType::SetChildkeyTake, current_block, ); diff --git a/pallets/subtensor/src/subnets/mechanism.rs b/pallets/subtensor/src/subnets/mechanism.rs new file mode 100644 index 0000000000..6598c308f2 --- /dev/null +++ b/pallets/subtensor/src/subnets/mechanism.rs @@ -0,0 +1,385 @@ +//! This file contains all tooling to work with sub-subnets +//! + +use super::*; +use crate::epoch::run_epoch::EpochTerms; +use alloc::collections::BTreeMap; +use safe_math::*; +use substrate_fixed::types::U64F64; +use subtensor_runtime_common::{AlphaCurrency, MechId, NetUid, NetUidStorageIndex}; + +pub type LeaseId = u32; + +pub type CurrencyOf = ::Currency; + +pub type BalanceOf = + as fungible::Inspect<::AccountId>>::Balance; + +/// Theoretical maximum of subnets on bittensor. This value is used in indexed +/// storage of epoch values for sub-subnets as +/// +/// `storage_index = netuid + sub_id * GLOBAL_MAX_SUBNET_COUNT` +/// +/// For sub_id = 0 this index results in netuid and provides backward compatibility +/// for subnets with default sub-subnet count of 1. +/// +/// Changing this value will require a migration of all epoch maps. +/// +pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 4096; + +// Theoretical maximum number of mechanisms per subnet +// GLOBAL_MAX_SUBNET_COUNT * MAX_MECHANISM_COUNT_PER_SUBNET should be 0x10000 +pub const MAX_MECHANISM_COUNT_PER_SUBNET: u8 = 16; + +impl Pallet { + pub fn get_mechanism_storage_index(netuid: NetUid, sub_id: MechId) -> NetUidStorageIndex { + u16::from(sub_id) + .saturating_mul(GLOBAL_MAX_SUBNET_COUNT) + .saturating_add(u16::from(netuid)) + .into() + } + + pub fn get_netuid(netuid_index: NetUidStorageIndex) -> NetUid { + if let Some(netuid) = u16::from(netuid_index).checked_rem(GLOBAL_MAX_SUBNET_COUNT) { + NetUid::from(netuid) + } else { + // Because GLOBAL_MAX_SUBNET_COUNT is not zero, this never happens + NetUid::ROOT + } + } + + pub fn get_netuid_and_subid( + netuid_index: NetUidStorageIndex, + ) -> Result<(NetUid, MechId), Error> { + let maybe_netuid = u16::from(netuid_index).checked_rem(GLOBAL_MAX_SUBNET_COUNT); + if let Some(netuid_u16) = maybe_netuid { + let netuid = NetUid::from(netuid_u16); + + // Make sure the base subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::MechanismDoesNotExist + ); + + // Extract sub_id + let sub_id_u8 = u8::try_from(u16::from(netuid_index).safe_div(GLOBAL_MAX_SUBNET_COUNT)) + .map_err(|_| Error::::MechanismDoesNotExist)?; + let sub_id = MechId::from(sub_id_u8); + + if MechanismCountCurrent::::get(netuid) > sub_id { + Ok((netuid, sub_id)) + } else { + Err(Error::::MechanismDoesNotExist.into()) + } + } else { + Err(Error::::MechanismDoesNotExist.into()) + } + } + + pub fn get_current_mechanism_count(netuid: NetUid) -> MechId { + MechanismCountCurrent::::get(netuid) + } + + pub fn ensure_mechanism_exists(netuid: NetUid, sub_id: MechId) -> DispatchResult { + // Make sure the base subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::MechanismDoesNotExist + ); + + // Make sure the mechanism limit is not exceeded + ensure!( + MechanismCountCurrent::::get(netuid) > sub_id, + Error::::MechanismDoesNotExist + ); + Ok(()) + } + + /// Set the desired valus of sub-subnet count for a subnet identified + /// by netuid + pub fn do_set_mechanism_count(netuid: NetUid, mechanism_count: MechId) -> DispatchResult { + // Make sure the subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::MechanismDoesNotExist + ); + + // Count cannot be zero + ensure!(mechanism_count > 0.into(), Error::::InvalidValue); + + // Make sure we are not exceeding the max sub-subnet count + ensure!( + mechanism_count <= MaxMechanismCount::::get(), + Error::::InvalidValue + ); + + // Make sure we are not allowing numbers that will break the math + ensure!( + mechanism_count <= MechId::from(MAX_MECHANISM_COUNT_PER_SUBNET), + Error::::InvalidValue + ); + + Self::update_mechanism_counts_if_needed(netuid, mechanism_count); + + Ok(()) + } + + /// Update current count for a subnet identified by netuid + /// - Cleans up all sub-subnet maps if count is reduced + /// + pub fn update_mechanism_counts_if_needed(netuid: NetUid, new_count: MechId) { + let old_count = u8::from(MechanismCountCurrent::::get(netuid)); + let new_count_u8 = u8::from(new_count); + if old_count != new_count_u8 { + if old_count > new_count_u8 { + for mecid in new_count_u8..old_count { + let netuid_index = + Self::get_mechanism_storage_index(netuid, MechId::from(mecid)); + + // Cleanup Weights + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup Incentive + Incentive::::remove(netuid_index); + + // Cleanup LastUpdate + LastUpdate::::remove(netuid_index); + + // Cleanup Bonds + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup WeightCommits + let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup TimelockedWeightCommits + let _ = + TimelockedWeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + } + } + + MechanismCountCurrent::::insert(netuid, MechId::from(new_count)); + + // Reset split back to even + MechanismEmissionSplit::::remove(netuid); + } + } + + pub fn do_set_emission_split(netuid: NetUid, maybe_split: Option>) -> DispatchResult { + // Make sure the subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::MechanismDoesNotExist + ); + + if let Some(split) = maybe_split { + // Check the length + ensure!(!split.is_empty(), Error::::InvalidValue); + ensure!( + split.len() <= u8::from(MechanismCountCurrent::::get(netuid)) as usize, + Error::::InvalidValue + ); + + // Check that values add up to 65535 + let total: u64 = split.iter().map(|s| *s as u64).sum(); + ensure!(total == u16::MAX as u64, Error::::InvalidValue); + + MechanismEmissionSplit::::insert(netuid, split); + } else { + MechanismEmissionSplit::::remove(netuid); + } + + Ok(()) + } + + /// Split alpha emission in sub-subnet proportions + /// stored in MechanismEmissionSplit + /// + pub fn split_emissions(netuid: NetUid, alpha: AlphaCurrency) -> Vec { + let mechanism_count = u64::from(MechanismCountCurrent::::get(netuid)); + let maybe_split = MechanismEmissionSplit::::get(netuid); + + // Unset split means even distribution + let mut result: Vec = if let Some(split) = maybe_split { + split + .iter() + .map(|s| { + AlphaCurrency::from( + (u64::from(alpha) as u128) + .saturating_mul(*s as u128) + .safe_div(u16::MAX as u128) as u64, + ) + }) + .collect() + } else { + let per_mechanism = u64::from(alpha).safe_div(mechanism_count); + vec![AlphaCurrency::from(per_mechanism); mechanism_count as usize] + }; + + // Trim / extend and pad with zeroes if result is shorter than mechanism_count + if result.len() != mechanism_count as usize { + result.resize(mechanism_count as usize, 0u64.into()); // pad with AlphaCurrency::from(0) + } + + // If there's any rounding error or lost due to truncation emission, credit it to mechanism 0 + let rounding_err = + u64::from(alpha).saturating_sub(result.iter().map(|s| u64::from(*s)).sum()); + if let Some(cell) = result.first_mut() { + *cell = cell.saturating_add(AlphaCurrency::from(rounding_err)); + } + result + } + + fn weighted_acc_u16(existing: u16, added: u16, weight: U64F64) -> u16 { + U64F64::saturating_from_num(existing) + .saturating_add(U64F64::saturating_from_num(added).saturating_mul(weight)) + .saturating_to_num::() + } + + fn weighted_acc_alpha( + existing: AlphaCurrency, + added: AlphaCurrency, + weight: U64F64, + ) -> AlphaCurrency { + U64F64::saturating_from_num(existing) + .saturating_add(U64F64::saturating_from_num(added).saturating_mul(weight)) + .saturating_to_num::() + .into() + } + + /// Splits rao_emission between different sub-subnets using `split_emissions` function. + /// + /// Runs the epoch function for each sub-subnet and consolidates hotkey_emission + /// into a single vector. + /// + pub fn epoch_with_mechanisms( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + let aggregated: BTreeMap = + Self::split_emissions(netuid, rao_emission) + .into_iter() + .enumerate() + // Run epoch function for each mechanism to distribute its portion of emissions + .flat_map(|(sub_id_usize, sub_emission)| { + let sub_id_u8: u8 = sub_id_usize.try_into().unwrap_or_default(); + let sub_id = MechId::from(sub_id_u8); + + // Run epoch function on the mechanism emission + let epoch_output = Self::epoch_mechanism(netuid, sub_id, sub_emission); + Self::persist_mechanism_epoch_terms(netuid, sub_id, epoch_output.as_map()); + + // Calculate mechanism weight from the split emission (not the other way because preserving + // emission accuracy is the priority) + // For zero emission the first mechanism gets full weight + let sub_weight = U64F64::saturating_from_num(sub_emission).safe_div_or( + U64F64::saturating_from_num(rao_emission), + U64F64::saturating_from_num(if sub_id_u8 == 0 { 1 } else { 0 }), + ); + + // Produce an iterator of (hotkey, (terms, sub_weight)) tuples + epoch_output + .0 + .into_iter() + .map(move |(hotkey, terms)| (hotkey, (terms, sub_weight))) + }) + // Consolidate the hotkey emissions into a single BTreeMap + .fold(BTreeMap::new(), |mut acc, (hotkey, (terms, sub_weight))| { + acc.entry(hotkey) + .and_modify(|acc_terms| { + // Server and validator emission come from mechanism emission and need to be added up + acc_terms.validator_emission = acc_terms + .validator_emission + .saturating_add(terms.validator_emission); + acc_terms.server_emission = acc_terms + .server_emission + .saturating_add(terms.server_emission); + + // The rest of the terms need to be aggregated as weighted sum + acc_terms.dividend = Self::weighted_acc_u16( + acc_terms.dividend, + terms.dividend, + sub_weight, + ); + acc_terms.stake_weight = Self::weighted_acc_u16( + acc_terms.stake_weight, + terms.stake_weight, + sub_weight, + ); + acc_terms.active |= terms.active; + acc_terms.emission = Self::weighted_acc_alpha( + acc_terms.emission, + terms.emission, + sub_weight, + ); + acc_terms.rank = + Self::weighted_acc_u16(acc_terms.rank, terms.rank, sub_weight); + acc_terms.trust = + Self::weighted_acc_u16(acc_terms.trust, terms.trust, sub_weight); + acc_terms.consensus = Self::weighted_acc_u16( + acc_terms.consensus, + terms.consensus, + sub_weight, + ); + acc_terms.pruning_score = Self::weighted_acc_u16( + acc_terms.pruning_score, + terms.pruning_score, + sub_weight, + ); + acc_terms.validator_trust = Self::weighted_acc_u16( + acc_terms.validator_trust, + terms.validator_trust, + sub_weight, + ); + acc_terms.new_validator_permit |= terms.new_validator_permit; + }) + .or_insert_with(|| { + // weighted insert for the first sub-subnet seen for this hotkey + EpochTerms { + uid: terms.uid, + dividend: Self::weighted_acc_u16(0, terms.dividend, sub_weight), + incentive: Self::weighted_acc_u16(0, terms.incentive, sub_weight), + validator_emission: terms.validator_emission, + server_emission: terms.server_emission, + stake_weight: Self::weighted_acc_u16( + 0, + terms.stake_weight, + sub_weight, + ), + active: terms.active, // booleans are ORed across subs + emission: Self::weighted_acc_alpha( + 0u64.into(), + terms.emission, + sub_weight, + ), + rank: Self::weighted_acc_u16(0, terms.rank, sub_weight), + trust: Self::weighted_acc_u16(0, terms.trust, sub_weight), + consensus: Self::weighted_acc_u16(0, terms.consensus, sub_weight), + pruning_score: Self::weighted_acc_u16( + 0, + terms.pruning_score, + sub_weight, + ), + validator_trust: Self::weighted_acc_u16( + 0, + terms.validator_trust, + sub_weight, + ), + new_validator_permit: terms.new_validator_permit, + bond: Vec::new(), // aggregated map doesn’t use bonds; keep empty + } + }); + acc + }); + + // State updates from epoch function + Self::persist_netuid_epoch_terms(netuid, &aggregated); + + // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format + // for processing emissions in run_coinbase + // Emission tuples ( hotkeys, server_emission, validator_emission ) + aggregated + .into_iter() + .map(|(hotkey, terms)| (hotkey, terms.server_emission, terms.validator_emission)) + .collect() + } +} diff --git a/pallets/subtensor/src/subnets/mod.rs b/pallets/subtensor/src/subnets/mod.rs index a823773395..e93628eef4 100644 --- a/pallets/subtensor/src/subnets/mod.rs +++ b/pallets/subtensor/src/subnets/mod.rs @@ -1,5 +1,6 @@ use super::*; pub mod leasing; +pub mod mechanism; pub mod registration; pub mod serving; pub mod subnet; diff --git a/pallets/subtensor/src/subnets/registration.rs b/pallets/subtensor/src/subnets/registration.rs index c8f6b04cb6..bd7bdeed57 100644 --- a/pallets/subtensor/src/subnets/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -55,7 +55,7 @@ impl Pallet { /// - On successfully registereing a uid to a neuron slot on a subnetwork. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to registed to a non existent network. /// /// * 'TooManyRegistrationsThisBlock': @@ -78,10 +78,7 @@ impl Pallet { !netuid.is_root(), Error::::RegistrationNotPermittedOnRootSubnet ); - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // --- 3. Ensure the passed network allows registrations. ensure!( @@ -193,7 +190,7 @@ impl Pallet { /// - On successfully registereing a uid to a neuron slot on a subnetwork. /// /// # Raises: - /// *'SubNetworkDoesNotExist': + /// *'MechanismDoesNotExist': /// - Attempting to registed to a non existent network. /// /// *'TooManyRegistrationsThisBlock': @@ -236,10 +233,7 @@ impl Pallet { !netuid.is_root(), Error::::RegistrationNotPermittedOnRootSubnet ); - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // --- 3. Ensure the passed network allows registrations. ensure!( @@ -388,6 +382,63 @@ impl Pallet { real_hash } + fn get_immune_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + Self::get_immune_owner_tuples(netuid, coldkey) + .into_iter() + .map(|(_, hk)| hk) + .collect() + } + + pub fn get_immune_owner_uids(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + Self::get_immune_owner_tuples(netuid, coldkey) + .into_iter() + .map(|(uid, _)| uid) + .collect() + } + + fn get_immune_owner_tuples(netuid: NetUid, coldkey: &T::AccountId) -> Vec<(u16, T::AccountId)> { + // Gather (block, uid, hotkey) only for hotkeys that have a UID and a registration block. + let mut triples: Vec<(u64, u16, T::AccountId)> = OwnedHotkeys::::get(coldkey) + .into_iter() + .filter_map(|hotkey| { + // Uids must exist, filter_map ignores hotkeys without UID + Uids::::get(netuid, &hotkey).map(|uid| { + let block = BlockAtRegistration::::get(netuid, uid); + (block, uid, hotkey) + }) + }) + .collect(); + + // Sort by BlockAtRegistration (descending), then by uid (ascending) + // Recent registration is priority so that we can let older keys expire (get non-immune) + triples.sort_by(|(b1, u1, _), (b2, u2, _)| b2.cmp(b1).then(u1.cmp(u2))); + + // Keep first ImmuneOwnerUidsLimit + let limit = ImmuneOwnerUidsLimit::::get(netuid).into(); + if triples.len() > limit { + triples.truncate(limit); + } + + // Project to uid/hotkey tuple + let mut immune_tuples: Vec<(u16, T::AccountId)> = + triples.into_iter().map(|(_, uid, hk)| (uid, hk)).collect(); + + // Insert subnet owner hotkey in the beginning of the list if valid and not + // already present + if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) { + if let Some(owner_uid) = Uids::::get(netuid, &owner_hk) { + if !immune_tuples.contains(&(owner_uid, owner_hk.clone())) { + immune_tuples.insert(0, (owner_uid, owner_hk.clone())); + if immune_tuples.len() > limit { + immune_tuples.truncate(limit); + } + } + } + } + + immune_tuples + } + /// Determine which peer to prune from the network by finding the element with the lowest pruning score out of /// immunity period. If there is a tie for lowest pruning score, the neuron registered earliest is pruned. /// If all neurons are in immunity period, the neuron with the lowest pruning score is pruned. If there is a tie for @@ -411,13 +462,14 @@ impl Pallet { return 0; // If there are no neurons in this network. } + // Get the list of immortal (top-k by registration time of owner owned) keys + let subnet_owner_coldkey = SubnetOwner::::get(netuid); + let immortal_hotkeys = Self::get_immune_owner_hotkeys(netuid, &subnet_owner_coldkey); for neuron_uid in 0..neurons_n { - // Do not deregister the owner's hotkey from the `SubnetOwnerHotkey` map + // Do not deregister the owner's owned hotkeys if let Ok(hotkey) = Self::get_hotkey_for_net_and_uid(netuid, neuron_uid) { - if let Ok(top_sn_owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { - if top_sn_owner_hotkey == hotkey { - continue; - } + if immortal_hotkeys.contains(&hotkey) { + continue; } } diff --git a/pallets/subtensor/src/subnets/serving.rs b/pallets/subtensor/src/subnets/serving.rs index ae1c97cc7c..cdaf39e51b 100644 --- a/pallets/subtensor/src/subnets/serving.rs +++ b/pallets/subtensor/src/subnets/serving.rs @@ -40,7 +40,7 @@ impl Pallet { /// - On successfully serving the axon info. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -144,7 +144,7 @@ impl Pallet { /// - On successfully serving the axon info. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 38be89cba0..8439297e14 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -1,19 +1,9 @@ use super::*; use sp_core::Get; use subtensor_runtime_common::{NetUid, TaoCurrency}; +use subtensor_swap_interface::SwapHandler; impl Pallet { - /// Fetches the total count of subnets. - /// - /// This function retrieves the total number of subnets present on the chain. - /// - /// # Returns: - /// * 'u16': The total number of subnets. - /// - pub fn get_num_subnets() -> u16 { - TotalNetworks::::get() - } - /// Returns true if the subnetwork exists. /// /// This function checks if a subnetwork with the given UID exists. @@ -100,19 +90,25 @@ impl Pallet { /// Facilitates user registration of a new subnetwork. /// - /// # Args: - /// * 'origin': ('T::RuntimeOrigin'): The calling origin. Must be signed. - /// * `identity` (`Option`): Optional identity to be associated with the new subnetwork. - /// - /// # Event: - /// * 'NetworkAdded': Emitted when a new network is successfully added. - /// - /// # Raises: - /// * 'TxRateLimitExceeded': If the rate limit for network registration is exceeded. - /// * 'NotEnoughBalanceToStake': If there isn't enough balance to stake for network registration. - /// * 'BalanceWithdrawalError': If an error occurs during balance withdrawal for network registration. - /// * `SubnetIdentitySet(netuid)`: Emitted when a custom identity is set for a new subnetwork. - /// * `SubnetIdentityRemoved(netuid)`: Emitted when the identity of a removed network is also deleted. + /// ### Args + /// * **`origin`** – `T::RuntimeOrigin`  Must be **signed** by the coldkey. + /// * **`hotkey`** – `&T::AccountId`  First neuron of the new subnet. + /// * **`mechid`** – `u16`  Only the dynamic mechanism (`1`) is currently supported. + /// * **`identity`** – `Option`  Optional metadata for the subnet. + /// + /// ### Events + /// * `NetworkAdded(netuid, mechid)` – always. + /// * `SubnetIdentitySet(netuid)` – when a custom identity is supplied. + /// * `NetworkRemoved(netuid)` – when a subnet is pruned to make room. + /// + /// ### Errors + /// * `NonAssociatedColdKey` – `hotkey` already belongs to another coldkey. + /// * `MechanismDoesNotExist` – unsupported `mechid`. + /// * `NetworkTxRateLimitExceeded` – caller hit the register-network rate limit. + /// * `SubnetLimitReached` – limit hit **and** no eligible subnet to prune. + /// * `CannotAffordLockCost` – caller lacks the lock cost. + /// * `BalanceWithdrawalError` – failed to lock balance. + /// * `InvalidIdentity` – supplied `identity` failed validation. /// pub fn do_register_network( origin: T::RuntimeOrigin, @@ -132,24 +128,43 @@ impl Pallet { // --- 3. Ensure the mechanism is Dynamic. ensure!(mechid == 1, Error::::MechanismDoesNotExist); - // --- 4. Rate limit for network registrations. let current_block = Self::get_current_block_as_u64(); + + ensure!( + current_block >= NetworkRegistrationStartBlock::::get(), + Error::::SubNetRegistrationDisabled + ); + + // --- 4. Rate limit for network registrations. ensure!( - Self::passes_rate_limit(&TransactionType::RegisterNetwork, &coldkey), + TransactionType::RegisterNetwork.passes_rate_limit::(&coldkey), Error::::NetworkTxRateLimitExceeded ); - // --- 5. Calculate and lock the required tokens. + // --- 5. Check if we need to prune a subnet (if at SubnetLimit). + // But do not prune yet; we only do it after all checks pass. + let subnet_limit = Self::get_max_subnets(); + let current_count: u16 = NetworksAdded::::iter() + .filter(|(netuid, added)| *added && *netuid != NetUid::ROOT) + .count() as u16; + + let mut recycle_netuid: Option = None; + if current_count >= subnet_limit { + if let Some(netuid) = Self::get_network_to_prune() { + recycle_netuid = Some(netuid); + } else { + return Err(Error::::SubnetLimitReached.into()); + } + } + + // --- 6. Calculate and lock the required tokens. let lock_amount = Self::get_network_lock_cost(); log::debug!("network lock_amount: {lock_amount:?}"); ensure!( Self::can_remove_balance_from_coldkey_account(&coldkey, lock_amount.into()), - Error::::NotEnoughBalanceToStake + Error::::CannotAffordLockCost ); - // --- 6. Determine the netuid to register. - let netuid_to_register = Self::get_next_netuid(); - // --- 7. Perform the lock operation. let actual_tao_lock_amount = Self::remove_balance_from_coldkey_account(&coldkey, lock_amount.into())?; @@ -157,44 +172,64 @@ impl Pallet { // --- 8. Set the lock amount for use to determine pricing. Self::set_network_last_lock(actual_tao_lock_amount); + Self::set_network_last_lock_block(current_block); + + // --- 9. If we identified a subnet to prune, do it now. + if let Some(prune_netuid) = recycle_netuid { + Self::do_dissolve_network(prune_netuid)?; + } + + // --- 10. Determine netuid to register. If we pruned a subnet, reuse that netuid. + let netuid_to_register: NetUid = match recycle_netuid { + Some(prune_netuid) => prune_netuid, + None => Self::get_next_netuid(), + }; - // --- 9. Set initial and custom parameters for the network. + // --- 11. Set initial and custom parameters for the network. let default_tempo = DefaultTempo::::get(); Self::init_new_network(netuid_to_register, default_tempo); log::debug!("init_new_network: {netuid_to_register:?}"); - // --- 10. Add the caller to the neuron set. + // --- 12. Add the caller to the neuron set. Self::create_account_if_non_existent(&coldkey, hotkey); Self::append_neuron(netuid_to_register, hotkey, current_block); log::debug!("Appended neuron for netuid {netuid_to_register:?}, hotkey: {hotkey:?}"); - // --- 11. Set the mechanism. + // --- 13. Set the mechanism. SubnetMechanism::::insert(netuid_to_register, mechid); log::debug!("SubnetMechanism for netuid {netuid_to_register:?} set to: {mechid:?}"); - // --- 12. Set the creation terms. - Self::set_network_last_lock_block(current_block); + // --- 14. Set the creation terms. NetworkRegisteredAt::::insert(netuid_to_register, current_block); - // --- 13. Set the symbol. + // --- 15. Set the symbol. let symbol = Self::get_next_available_symbol(netuid_to_register); TokenSymbol::::insert(netuid_to_register, symbol); - // --- 14. Init the pool by putting the lock as the initial alpha. - // Put initial TAO from lock into subnet TAO and produce numerically equal amount of Alpha - // The initial TAO is the locked amount, with a minimum of 1 RAO and a cap of 100 TAO. - let pool_initial_tao = Self::get_network_min_lock(); - // FIXME: the result from function is used as a mixed type alpha/tao - let pool_initial_alpha = AlphaCurrency::from(Self::get_network_min_lock().to_u64()); + // The initial TAO is the locked amount + // Put initial TAO from lock into subnet TAO and produce numerically equal amount of Alpha. + let pool_initial_tao: TaoCurrency = Self::get_network_min_lock(); + let pool_initial_alpha: AlphaCurrency = pool_initial_tao.to_u64().into(); let actual_tao_lock_amount_less_pool_tao = actual_tao_lock_amount.saturating_sub(pool_initial_tao); + + // Core pool + ownership SubnetTAO::::insert(netuid_to_register, pool_initial_tao); SubnetAlphaIn::::insert(netuid_to_register, pool_initial_alpha); SubnetOwner::::insert(netuid_to_register, coldkey.clone()); SubnetOwnerHotkey::::insert(netuid_to_register, hotkey.clone()); + SubnetLocked::::insert(netuid_to_register, actual_tao_lock_amount); + SubnetTaoProvided::::insert(netuid_to_register, TaoCurrency::ZERO); + SubnetAlphaInProvided::::insert(netuid_to_register, AlphaCurrency::ZERO); + SubnetAlphaOut::::insert(netuid_to_register, AlphaCurrency::ZERO); + SubnetVolume::::insert(netuid_to_register, 0u128); + RAORecycledForRegistration::::insert( + netuid_to_register, + actual_tao_lock_amount_less_pool_tao, + ); if actual_tao_lock_amount_less_pool_tao > TaoCurrency::ZERO { - Self::burn_tokens(actual_tao_lock_amount_less_pool_tao); + Self::recycle_tao(actual_tao_lock_amount_less_pool_tao); } if actual_tao_lock_amount > TaoCurrency::ZERO && pool_initial_tao > TaoCurrency::ZERO { @@ -202,7 +237,7 @@ impl Pallet { Self::increase_total_stake(pool_initial_tao); } - // --- 15. Add the identity if it exists + // --- 17. Add the identity if it exists if let Some(identity_value) = identity { ensure!( Self::is_valid_subnet_identity(&identity_value), @@ -213,15 +248,12 @@ impl Pallet { Self::deposit_event(Event::SubnetIdentitySet(netuid_to_register)); } - // --- 16. Enable registration for new subnet - NetworkRegistrationAllowed::::set(netuid_to_register, true); - NetworkPowRegistrationAllowed::::set(netuid_to_register, true); - - // --- 17. Emit the NetworkAdded event. + T::SwapInterface::toggle_user_liquidity(netuid_to_register, true); + // --- 18. Emit the NetworkAdded event. log::info!("NetworkAdded( netuid:{netuid_to_register:?}, mechanism:{mechid:?} )"); Self::deposit_event(Event::NetworkAdded(netuid_to_register, mechid)); - // --- 18. Return success. + // --- 19. Return success. Ok(()) } @@ -311,7 +343,7 @@ impl Pallet { /// /// # Raises /// - /// * `Error::::SubNetworkDoesNotExist`: If the subnet does not exist. + /// * `Error::::SubnetNotExists`: If the subnet does not exist. /// * `DispatchError::BadOrigin`: If the caller is not the subnet owner. /// * `Error::::FirstEmissionBlockNumberAlreadySet`: If the last emission block number has already been set. /// @@ -319,10 +351,7 @@ impl Pallet { /// /// * `DispatchResult`: A result indicating the success or failure of the operation. pub fn do_start_call(origin: T::RuntimeOrigin, netuid: NetUid) -> DispatchResult { - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); Self::ensure_subnet_owner(origin, netuid)?; ensure!( FirstEmissionBlockNumber::::get(netuid).is_none(), @@ -391,8 +420,7 @@ impl Pallet { // Rate limit: 1 call per week ensure!( - Self::passes_rate_limit_on_subnet( - &TransactionType::SetSNOwnerHotkey, + TransactionType::SetSNOwnerHotkey.passes_rate_limit_on_subnet::( hotkey, // ignored netuid, // Specific to a subnet. ), @@ -401,10 +429,9 @@ impl Pallet { // Set last transaction block let current_block = Self::get_current_block_as_u64(); - Self::set_last_transaction_block_on_subnet( + TransactionType::SetSNOwnerHotkey.set_last_block_on_subnet::( hotkey, netuid, - &TransactionType::SetSNOwnerHotkey, current_block, ); diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index 4b37e9b6b0..6fd3e82f56 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -96,7 +96,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE1\x9A\xA6", // ᚦ (Thurisaz, giant, 83) b"\xE1\x9A\xA8", // ᚨ (Ansuz, god, 84) b"\xE1\x9A\xB1", // ᚱ (Raidho, ride, 85) - b"\xE1\x9A\xB3", // ᚲ (Kaunan, ulcer, 86) + b"\xE1\x9A\xB3", // ᚳ (Kaunan, ulcer, 86) b"\xD0\xAB", // Ы (Cyrillic Yeru, 87) b"\xE1\x9B\x89", // ᛉ (Algiz, protection, 88) b"\xE1\x9B\x92", // ᛒ (Berkanan, birch, 89) @@ -130,7 +130,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xD0\x83", // Ѓ (Gje, 113) b"\xD0\x84", // Є (Ukrainian Ie, 114) b"\xD0\x85", // Ѕ (Dze, 115) - b"\xD1\x8A", // Ъ (Hard sign, 116) + b"\xD1\x8A", // ъ (Hard sign, 116) // Coptic Alphabet b"\xE2\xB2\x80", // Ⲁ (Alfa, 117) b"\xE2\xB2\x81", // ⲁ (Small Alfa, 118) @@ -145,12 +145,12 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xF0\x91\x80\x83", // 𑀃 (Ii, 126) b"\xF0\x91\x80\x85", // 𑀅 (U, 127) // End of Sinhala Alphabet - b"\xE0\xB6\xB1", // ඲ (La, 128) - b"\xE0\xB6\xB2", // ඳ (Va, 129) - b"\xE0\xB6\xB3", // ප (Sha, 130) - b"\xE0\xB6\xB4", // ඵ (Ssa, 131) - b"\xE0\xB6\xB5", // බ (Sa, 132) - b"\xE0\xB6\xB6", // භ (Ha, 133) + b"\xE0\xB6\xB1", // න (La, 128) + b"\xE0\xB6\xB3", // ඳ (Va, 129) + b"\xE0\xB6\xB4", // ප (Sha, 130) + b"\xE0\xB6\xB5", // ඵ (Ssa, 131) + b"\xE0\xB6\xB6", // බ (Sa, 132) + b"\xE0\xB6\xB7", // භ (Ha, 133) // Glagolitic Alphabet b"\xE2\xB0\x80", // Ⰰ (Az, 134) b"\xE2\xB0\x81", // Ⰱ (Buky, 135) @@ -231,42 +231,42 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE0\xB8\xAD", // อ (O Ang, 209) b"\xE0\xB8\xAE", // ฮ (Ho Nokhuk, 210) // Hangul Alphabet (Korean) - b"\xE1\x84\x80", // ㄱ (Giyeok, 211) - b"\xE1\x84\x81", // ㄴ (Nieun, 212) - b"\xE1\x84\x82", // ㄷ (Digeut, 213) - b"\xE1\x84\x83", // ㄹ (Rieul, 214) - b"\xE1\x84\x84", // ㅁ (Mieum, 215) - b"\xE1\x84\x85", // ㅂ (Bieup, 216) - b"\xE1\x84\x86", // ㅅ (Siot, 217) - b"\xE1\x84\x87", // ㅇ (Ieung, 218) - b"\xE1\x84\x88", // ㅈ (Jieut, 219) - b"\xE1\x84\x89", // ㅊ (Chieut, 220) - b"\xE1\x84\x8A", // ㅋ (Kieuk, 221) - b"\xE1\x84\x8B", // ㅌ (Tieut, 222) - b"\xE1\x84\x8C", // ㅍ (Pieup, 223) - b"\xE1\x84\x8D", // ㅎ (Hieut, 224) + b"\xE3\x84\xB1", // ㄱ (Giyeok, 211) + b"\xE3\x84\xB4", // ㄴ (Nieun, 212) + b"\xE3\x84\xB7", // ㄷ (Digeut, 213) + b"\xE3\x84\xB9", // ㄹ (Rieul, 214) + b"\xE3\x85\x81", // ㅁ (Mieum, 215) + b"\xE3\x85\x82", // ㅂ (Bieup, 216) + b"\xE3\x85\x85", // ㅅ (Siot, 217) + b"\xE3\x85\x87", // ㅇ (Ieung, 218) + b"\xE3\x85\x88", // ㅈ (Jieut, 219) + b"\xE3\x85\x8A", // ㅊ (Chieut, 220) + b"\xE3\x85\x8B", // ㅋ (Kieuk, 221) + b"\xE3\x85\x8C", // ㅌ (Tieut, 222) + b"\xE3\x85\x8D", // ㅍ (Pieup, 223) + b"\xE3\x85\x8E", // ㅎ (Hieut, 224) // Hangul Vowels - b"\xE1\x85\xA1", // ㅏ (A, 225) - b"\xE1\x85\xA2", // ㅐ (Ae, 226) - b"\xE1\x85\xA3", // ㅑ (Ya, 227) - b"\xE1\x85\xA4", // ㅒ (Yae, 228) - b"\xE1\x85\xA5", // ㅓ (Eo, 229) - b"\xE1\x85\xA6", // ㅔ (E, 230) - b"\xE1\x85\xA7", // ㅕ (Yeo, 231) - b"\xE1\x85\xA8", // ㅖ (Ye, 232) - b"\xE1\x85\xA9", // ㅗ (O, 233) - b"\xE1\x85\xAA", // ㅘ (Wa, 234) - b"\xE1\x85\xAB", // ㅙ (Wae, 235) - b"\xE1\x85\xAC", // ㅚ (Oe, 236) - b"\xE1\x85\xAD", // ㅛ (Yo, 237) - b"\xE1\x85\xAE", // ㅜ (U, 238) - b"\xE1\x85\xAF", // ㅝ (Weo, 239) - b"\xE1\x85\xB0", // ㅞ (We, 240) - b"\xE1\x85\xB1", // ㅟ (Wi, 241) - b"\xE1\x85\xB2", // ㅠ (Yu, 242) - b"\xE1\x85\xB3", // ㅡ (Eu, 243) - b"\xE1\x85\xB4", // ㅢ (Ui, 244) - b"\xE1\x85\xB5", // ㅣ (I, 245) + b"\xE3\x85\x8F", // ㅏ (A, 225) + b"\xE3\x85\x90", // ㅐ (Ae, 226) + b"\xE3\x85\x91", // ㅑ (Ya, 227) + b"\xE3\x85\x92", // ㅒ (Yae, 228) + b"\xE3\x85\x93", // ㅓ (Eo, 229) + b"\xE3\x85\x94", // ㅔ (E, 230) + b"\xE3\x85\x95", // ㅕ (Yeo, 231) + b"\xE3\x85\x96", // ㅖ (Ye, 232) + b"\xE3\x85\x97", // ㅗ (O, 233) + b"\xE3\x85\x98", // ㅘ (Wa, 234) + b"\xE3\x85\x99", // ㅙ (Wae, 235) + b"\xE3\x85\x9A", // ㅚ (Oe, 236) + b"\xE3\x85\x9B", // ㅛ (Yo, 237) + b"\xE3\x85\x9C", // ㅜ (U, 238) + b"\xE3\x85\x9D", // ㅝ (Weo, 239) + b"\xE3\x85\x9E", // ㅞ (We, 240) + b"\xE3\x85\x9F", // ㅟ (Wi, 241) + b"\xE3\x85\xA0", // ㅠ (Yu, 242) + b"\xE3\x85\xA1", // ㅡ (Eu, 243) + b"\xE3\x85\xA2", // ㅢ (Ui, 244) + b"\xE3\x85\xA3", // ㅣ (I, 245) // Ethiopic Alphabet b"\xE1\x8A\xA0", // አ (Glottal A, 246) b"\xE1\x8A\xA1", // ኡ (Glottal U, 247) @@ -290,13 +290,13 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE1\x8A\xB4", // ኴ (Ke, 265) b"\xE1\x8A\xB5", // ኵ (Kwe, 266) b"\xE1\x8A\xB6", // ኶ (Ko, 267) - b"\xE1\x8A\x90", // ጐ (Go, 268) - b"\xE1\x8A\x91", // ጑ (Gu, 269) - b"\xE1\x8A\x92", // ጒ (Gi, 270) - b"\xE1\x8A\x93", // መ (Gua, 271) - b"\xE1\x8A\x94", // ጔ (Ge, 272) - b"\xE1\x8A\x95", // ጕ (Gwe, 273) - b"\xE1\x8A\x96", // ጖ (Go, 274) + b"\xE1\x8C\x90", // ጐ (Go, 268) + b"\xE1\x8C\x91", // ጑ (Gu, 269) + b"\xE1\x8C\x92", // ጒ (Gi, 270) + b"\xE1\x88\x98", // መ (Gua, 271) + b"\xE1\x8C\x94", // ጔ (Ge, 272) + b"\xE1\x8C\x95", // ጕ (Gwe, 273) + b"\xE1\x8C\x96", // ጖ (Go, 274) // Devanagari Alphabet b"\xE0\xA4\x85", // अ (A, 275) b"\xE0\xA4\x86", // आ (Aa, 276) @@ -429,12 +429,12 @@ pub static SYMBOLS: [&[u8]; 439] = [ // Sinhala Alphabet b"\xE0\xB6\x85", // අ (A, 401) b"\xE0\xB6\x86", // ආ (Aa, 402) - b"\xE0\xB6\x87", // ඉ (I, 403) - b"\xE0\xB6\x88", // ඊ (Ii, 404) - b"\xE0\xB6\x89", // උ (U, 405) - b"\xE0\xB6\x8A", // ඌ (Uu, 406) - b"\xE0\xB6\x8B", // ඍ (R, 407) - b"\xE0\xB6\x8C", // ඎ (Rr, 408) + b"\xE0\xB6\x89", // ඉ (I, 403) + b"\xE0\xB6\x8A", // ඊ (Ii, 404) + b"\xE0\xB6\x8B", // උ (U, 405) + b"\xE0\xB6\x8C", // ඌ (Uu, 406) + b"\xE0\xB6\x8D", // ඍ (R, 407) + b"\xE0\xB6\x8E", // ඎ (Rr, 408) b"\xE0\xB6\x8F", // ඏ (L, 409) b"\xE0\xB6\x90", // ඐ (Ll, 410) b"\xE0\xB6\x91", // එ (E, 411) @@ -448,15 +448,15 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE0\xB6\x9C", // ග (Ga, 419) b"\xE0\xB6\x9D", // ඝ (Gha, 420) b"\xE0\xB6\x9E", // ඞ (Nga, 421) - b"\xE0\xB6\x9F", // ච (Cha, 422) - b"\xE0\xB6\xA0", // ඡ (Chha, 423) - b"\xE0\xB6\xA1", // ජ (Ja, 424) - b"\xE0\xB6\xA2", // ඣ (Jha, 425) - b"\xE0\xB6\xA3", // ඤ (Nya, 426) - b"\xE0\xB6\xA4", // ට (Ta, 427) + b"\xE0\xB6\xA0", // ච (Cha, 422) + b"\xE0\xB6\xA1", // ඡ (Chha, 423) + b"\xE0\xB6\xA2", // ජ (Ja, 424) + b"\xE0\xB6\xA3", // ඣ (Jha, 425) + b"\xE0\xB6\xA4", // ඤ (Nya, 426) + b"\xE0\xB6\xA7", // ට (Ta, 427) b"\xE0\xB6\xA5", // ඥ (Tha, 428) b"\xE0\xB6\xA6", // ඦ (Da, 429) - b"\xE0\xB6\xA7", // ට (Dha, 430) + b"\xE0\xB6\xA9", // ඩ (Dha, 430) b"\xE0\xB6\xA8", // ඨ (Na, 431) b"\xE0\xB6\xAA", // ඪ (Pa, 432) b"\xE0\xB6\xAB", // ණ (Pha, 433) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index d6b776252e..b68fabfbd5 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -1,6 +1,8 @@ use super::*; use frame_support::storage::IterableStorageDoubleMap; -use sp_std::vec; +use sp_runtime::Percent; +use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use sp_std::{cmp, vec}; use subtensor_runtime_common::NetUid; impl Pallet { @@ -16,15 +18,34 @@ impl Pallet { } } - /// Resets the trust, emission, consensus, incentive, dividends of the neuron to default + /// Resets the trust, emission, consensus, incentive, dividends, bonds, and weights of + /// the neuron to default pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { let neuron_index: usize = neuron_uid.into(); Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + for mecid in 0..MechanismCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); + Incentive::::mutate(netuid_index, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid_index, neuron_uid); // Remove bonds for Validator. + + // Clear weights set BY the neuron_uid + Weights::::remove(netuid_index, neuron_uid); + + // Set weights FOR the neuron_uid to 0 + let all_uids: Vec = Weights::::iter_key_prefix(netuid_index).collect(); + for uid in all_uids { + Weights::::mutate(netuid_index, uid, |weight_vec: &mut Vec<(u16, u16)>| { + for (weight_uid, w) in weight_vec.iter_mut() { + if *weight_uid == neuron_uid { + *w = 0; + } + } + }); + } + } Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. } /// Replace the neuron under this uid. @@ -93,9 +114,12 @@ impl Pallet { Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); - Incentive::::mutate(netuid, |v| v.push(0)); + for mecid in 0..MechanismCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); + Incentive::::mutate(netuid_index, |v| v.push(0)); + Self::set_last_update_for_uid(netuid_index, next_uid, block_number); + } Dividends::::mutate(netuid, |v| v.push(0)); - LastUpdate::::mutate(netuid, |v| v.push(block_number)); PruningScores::::mutate(netuid, |v| v.push(0)); ValidatorTrust::::mutate(netuid, |v| v.push(0)); ValidatorPermit::::mutate(netuid, |v| v.push(false)); @@ -107,6 +131,234 @@ impl Pallet { IsNetworkMember::::insert(new_hotkey.clone(), netuid, true); // Fill network is member. } + pub fn trim_to_max_allowed_uids(netuid: NetUid, max_n: u16) -> DispatchResult { + // Reasonable limits + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); + ensure!( + max_n >= MinAllowedUids::::get(netuid), + Error::::InvalidValue + ); + ensure!( + max_n <= MaxAllowedUids::::get(netuid), + Error::::InvalidValue + ); + + MaxAllowedUids::::insert(netuid, max_n); + + let current_n = Self::get_subnetwork_n(netuid); + if current_n > max_n { + let owner = SubnetOwner::::get(netuid); + let owner_uids = BTreeSet::from_iter(Self::get_immune_owner_uids(netuid, &owner)); + + // Count the number of immune UIDs + let mut immune_count: u16 = 0; + for uid in 0..current_n { + if owner_uids.contains(&{ uid }) || Self::get_neuron_is_immune(netuid, uid) { + immune_count = immune_count.saturating_add(1); + } + } + + // Ensure the number of immune UIDs is less than 80% + let immune_percentage = Percent::from_rational(immune_count, max_n); + ensure!( + immune_percentage < T::MaxImmuneUidsPercentage::get(), + Error::::InvalidValue + ); + + // Get all emissions with their UIDs and sort by emission (descending) + // This ensures we keep the highest emitters and remove the lowest ones + let mut emissions = Emission::::get(netuid) + .into_iter() + .enumerate() + .collect::>(); + emissions.sort_by_key(|(_, emission)| cmp::Reverse(*emission)); + + let mut removed_uids = BTreeSet::new(); + let mut uids_left_to_process = current_n; + let mechanisms_count = MechanismCountCurrent::::get(netuid).into(); + + // Iterate from the end (lowest emitters) to the beginning + for i in (0..current_n).rev() { + if uids_left_to_process == max_n { + break; // We've reached the target number of UIDs + } + + if let Some((uid, _)) = emissions.get(i as usize).cloned() { + let neuron_uid = uid as u16; + + // Skip subnet owner's or temporally immune uids + if owner_uids.contains(&neuron_uid) + || Self::get_neuron_is_immune(netuid, neuron_uid) + { + continue; + } + + // Remove hotkey related storage items if hotkey exists + if let Ok(hotkey) = Keys::::try_get(netuid, neuron_uid) { + Uids::::remove(netuid, &hotkey); + IsNetworkMember::::remove(&hotkey, netuid); + LastHotkeyEmissionOnNetuid::::remove(&hotkey, netuid); + AlphaDividendsPerSubnet::::remove(netuid, &hotkey); + TaoDividendsPerSubnet::::remove(netuid, &hotkey); + Axons::::remove(netuid, &hotkey); + NeuronCertificates::::remove(netuid, &hotkey); + Prometheus::::remove(netuid, &hotkey); + } + + // Remove all storage items associated with this uid + #[allow(unknown_lints)] + Keys::::remove(netuid, neuron_uid); + BlockAtRegistration::::remove(netuid, neuron_uid); + for mecid in 0..mechanisms_count { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); + Weights::::remove(netuid_index, neuron_uid); + Bonds::::remove(netuid_index, neuron_uid); + } + + // Remove from emissions array and track as removed + emissions.remove(i.into()); + removed_uids.insert(uid); + uids_left_to_process = uids_left_to_process.saturating_sub(1); + } + } + + // Sort remaining emissions by uid to compress uids to the left + // This ensures consecutive uid indices in the final arrays + emissions.sort_by_key(|(uid, _)| *uid); + + // Extract the final uids and emissions after trimming and sorting + let (trimmed_uids, trimmed_emissions): (Vec, Vec) = + emissions.into_iter().unzip(); + + // Get all current arrays from storage + let ranks = Rank::::get(netuid); + let trust = Trust::::get(netuid); + let active = Active::::get(netuid); + let consensus = Consensus::::get(netuid); + let dividends = Dividends::::get(netuid); + let pruning_scores = PruningScores::::get(netuid); + let vtrust = ValidatorTrust::::get(netuid); + let vpermit = ValidatorPermit::::get(netuid); + let stake_weight = StakeWeight::::get(netuid); + + // Create trimmed arrays by extracting values for kept uids only + // Pre-allocate vectors with exact capacity for efficiency + let len = trimmed_uids.len(); + let mut trimmed_ranks = Vec::with_capacity(len); + let mut trimmed_trust = Vec::with_capacity(len); + let mut trimmed_active = Vec::with_capacity(len); + let mut trimmed_consensus = Vec::with_capacity(len); + let mut trimmed_dividends = Vec::with_capacity(len); + let mut trimmed_pruning_scores = Vec::with_capacity(len); + let mut trimmed_vtrust = Vec::with_capacity(len); + let mut trimmed_vpermit = Vec::with_capacity(len); + let mut trimmed_stake_weight = Vec::with_capacity(len); + + // Single iteration to extract values for all kept uids + for &uid in &trimmed_uids { + trimmed_ranks.push(ranks.get(uid).cloned().unwrap_or_default()); + trimmed_trust.push(trust.get(uid).cloned().unwrap_or_default()); + trimmed_active.push(active.get(uid).cloned().unwrap_or_default()); + trimmed_consensus.push(consensus.get(uid).cloned().unwrap_or_default()); + trimmed_dividends.push(dividends.get(uid).cloned().unwrap_or_default()); + trimmed_pruning_scores.push(pruning_scores.get(uid).cloned().unwrap_or_default()); + trimmed_vtrust.push(vtrust.get(uid).cloned().unwrap_or_default()); + trimmed_vpermit.push(vpermit.get(uid).cloned().unwrap_or_default()); + trimmed_stake_weight.push(stake_weight.get(uid).cloned().unwrap_or_default()); + } + + // Update storage with trimmed arrays + Emission::::insert(netuid, trimmed_emissions); + Rank::::insert(netuid, trimmed_ranks); + Trust::::insert(netuid, trimmed_trust); + Active::::insert(netuid, trimmed_active); + Consensus::::insert(netuid, trimmed_consensus); + Dividends::::insert(netuid, trimmed_dividends); + PruningScores::::insert(netuid, trimmed_pruning_scores); + ValidatorTrust::::insert(netuid, trimmed_vtrust); + ValidatorPermit::::insert(netuid, trimmed_vpermit); + StakeWeight::::insert(netuid, trimmed_stake_weight); + + // Update incentives/lastupdates for mechanisms + for mecid in 0..mechanisms_count { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); + let incentive = Incentive::::get(netuid_index); + let lastupdate = LastUpdate::::get(netuid_index); + let mut trimmed_incentive = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_lastupdate = Vec::with_capacity(trimmed_uids.len()); + + for uid in &trimmed_uids { + trimmed_incentive.push(incentive.get(*uid).cloned().unwrap_or_default()); + trimmed_lastupdate.push(lastupdate.get(*uid).cloned().unwrap_or_default()); + } + + Incentive::::insert(netuid_index, trimmed_incentive); + LastUpdate::::insert(netuid_index, trimmed_lastupdate); + } + + // Create mapping from old uid to new compressed uid + // This is needed to update connections (weights and bonds) with correct uid references + let old_to_new_uid: BTreeMap = trimmed_uids + .iter() + .enumerate() + .map(|(new_uid, &old_uid)| (old_uid, new_uid)) + .collect(); + + // Update connections (weights and bonds) for each kept uid + // This involves three operations per uid: + // 1. Swap the uid storage to the new compressed position + // 2. Update all connections to reference the new compressed uids + // 3. Clear the connections to the trimmed uids + for (old_uid, new_uid) in &old_to_new_uid { + let old_neuron_uid = *old_uid as u16; + let new_neuron_uid = *new_uid as u16; + + // Swap uid specific storage items to new compressed positions + Keys::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); + BlockAtRegistration::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); + + for mecid in 0..mechanisms_count { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); + + // Swap to new position and remap all target uids + Weights::::swap(netuid_index, old_neuron_uid, netuid_index, new_neuron_uid); + Weights::::mutate(netuid_index, new_neuron_uid, |weights| { + weights.retain_mut(|(target_uid, _weight)| { + if let Some(new_target_uid) = + old_to_new_uid.get(&(*target_uid as usize)) + { + *target_uid = *new_target_uid as u16; + true + } else { + false + } + }) + }); + + // Swap to new position and remap all target uids + Bonds::::swap(netuid_index, old_neuron_uid, netuid_index, new_neuron_uid); + Bonds::::mutate(netuid_index, new_neuron_uid, |bonds| { + bonds.retain_mut(|(target_uid, _bond)| { + if let Some(new_target_uid) = + old_to_new_uid.get(&(*target_uid as usize)) + { + *target_uid = *new_target_uid as u16; + true + } else { + false + } + }) + }); + } + } + + // Update the subnet's uid count to reflect the new maximum + SubnetworkN::::insert(netuid, max_n); + } + + Ok(()) + } + /// Returns true if the uid is set on the network. /// pub fn is_uid_exist_on_network(netuid: NetUid, uid: u16) -> bool { diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 737329ccb7..61b64df137 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -10,7 +10,8 @@ use sp_runtime::{ traits::{BlakeTwo256, Hash}, }; use sp_std::{collections::vec_deque::VecDeque, vec}; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{MechId, NetUid, NetUidStorageIndex}; + impl Pallet { /// ---- The implementation for committing weight hashes. /// @@ -45,6 +46,30 @@ impl Pallet { netuid: NetUid, commit_hash: H256, ) -> DispatchResult { + Self::internal_commit_weights(origin, netuid, MechId::MAIN, commit_hash) + } + + pub fn do_commit_mechanism_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + commit_hash: H256, + ) -> DispatchResult { + Self::internal_commit_weights(origin, netuid, mecid, commit_hash) + } + + fn internal_commit_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + commit_hash: H256, + ) -> DispatchResult { + // Ensure netuid and mecid exist + Self::ensure_mechanism_exists(netuid, mecid)?; + + // Calculate subnet storage index + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); + // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -66,7 +91,8 @@ impl Pallet { let commit_block = Self::get_current_block_as_u64(); let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &who)?; ensure!( - Self::check_rate_limit(netuid, neuron_uid, commit_block), + // Rate limiting should happen per sub-subnet, so use netuid_index here + Self::check_rate_limit(netuid_index, neuron_uid, commit_block), Error::::CommittingWeightsTooFast ); @@ -74,7 +100,7 @@ impl Pallet { let (first_reveal_block, last_reveal_block) = Self::get_reveal_blocks(netuid, commit_block); // 6. Retrieve or initialize the VecDeque of commits for the hotkey. - WeightCommits::::try_mutate(netuid, &who, |maybe_commits| -> DispatchResult { + WeightCommits::::try_mutate(netuid_index, &who, |maybe_commits| -> DispatchResult { let mut commits: VecDeque<(H256, u64, u64, u64)> = maybe_commits.take().unwrap_or_default(); @@ -102,10 +128,14 @@ impl Pallet { *maybe_commits = Some(commits); // 11. Emit the WeightsCommitted event - Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid, commit_hash)); + Self::deposit_event(Event::WeightsCommitted( + who.clone(), + netuid_index, + commit_hash, + )); // 12. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid, neuron_uid, commit_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); // 13. Return success. Ok(()) @@ -234,6 +264,48 @@ impl Pallet { reveal_round: u64, commit_reveal_version: u16, ) -> DispatchResult { + Self::internal_commit_timelocked_weights( + origin, + netuid, + MechId::MAIN, + commit, + reveal_round, + commit_reveal_version, + ) + } + + pub fn do_commit_timelocked_mechanism_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + Self::internal_commit_timelocked_weights( + origin, + netuid, + mecid, + commit, + reveal_round, + commit_reveal_version, + ) + } + + pub fn internal_commit_timelocked_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + // Ensure netuid and mecid exist + Self::ensure_mechanism_exists(netuid, mecid)?; + + // Calculate netuid storage index + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); + // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -261,7 +333,7 @@ impl Pallet { let commit_block = Self::get_current_block_as_u64(); let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &who)?; ensure!( - Self::check_rate_limit(netuid, neuron_uid, commit_block), + Self::check_rate_limit(netuid_index, neuron_uid, commit_block), Error::::CommittingWeightsTooFast ); @@ -272,37 +344,41 @@ impl Pallet { false => Self::get_epoch_index(netuid, cur_block), }; - TimelockedWeightCommits::::try_mutate(netuid, cur_epoch, |commits| -> DispatchResult { - // 7. Verify that the number of unrevealed commits is within the allowed limit. + TimelockedWeightCommits::::try_mutate( + netuid_index, + cur_epoch, + |commits| -> DispatchResult { + // 7. Verify that the number of unrevealed commits is within the allowed limit. - let unrevealed_commits_for_who = commits - .iter() - .filter(|(account, _, _, _)| account == &who) - .count(); - ensure!( - unrevealed_commits_for_who < 10, - Error::::TooManyUnrevealedCommits - ); + let unrevealed_commits_for_who = commits + .iter() + .filter(|(account, _, _, _)| account == &who) + .count(); + ensure!( + unrevealed_commits_for_who < 10, + Error::::TooManyUnrevealedCommits + ); - // 8. Append the new commit with calculated reveal blocks. - // Hash the commit before it is moved, for the event - let commit_hash = BlakeTwo256::hash(&commit); - commits.push_back((who.clone(), cur_block, commit, reveal_round)); + // 8. Append the new commit with calculated reveal blocks. + // Hash the commit before it is moved, for the event + let commit_hash = BlakeTwo256::hash(&commit); + commits.push_back((who.clone(), cur_block, commit, reveal_round)); - // 9. Emit the WeightsCommitted event - Self::deposit_event(Event::TimelockedWeightsCommitted( - who.clone(), - netuid, - commit_hash, - reveal_round, - )); + // 9. Emit the WeightsCommitted event + Self::deposit_event(Event::TimelockedWeightsCommitted( + who.clone(), + netuid_index, + commit_hash, + reveal_round, + )); - // 10. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid, neuron_uid, commit_block); + // 10. Update the last commit block for the hotkey's UID. + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); - // 11. Return success. - Ok(()) - }) + // 11. Return success. + Ok(()) + }, + ) } /// ---- The implementation for revealing committed weights. @@ -349,6 +425,41 @@ impl Pallet { salt: Vec, version_key: u64, ) -> DispatchResult { + Self::internal_reveal_weights( + origin, + netuid, + MechId::MAIN, + uids, + values, + salt, + version_key, + ) + } + + pub fn do_reveal_mechanism_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + Self::internal_reveal_weights(origin, netuid, mecid, uids, values, salt, version_key) + } + + fn internal_reveal_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); + // --- 1. Check the caller's signature (hotkey). let who = ensure_signed(origin.clone())?; @@ -361,80 +472,95 @@ impl Pallet { ); // --- 3. Mutate the WeightCommits to retrieve existing commits for the user. - WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { - let commits = maybe_commits - .as_mut() - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 4. Remove any expired commits from the front of the queue, collecting their hashes. - let mut expired_hashes = Vec::new(); - while let Some((hash, commit_block, _, _)) = commits.front() { - if Self::is_commit_expired(netuid, *commit_block) { - // Collect the expired commit hash - expired_hashes.push(*hash); - commits.pop_front(); - } else { - break; - } - } - - // --- 5. Hash the provided data. - let provided_hash: H256 = - Self::get_commit_hash(&who, netuid, &uids, &values, &salt, version_key); - - // --- 6. After removing expired commits, check if any commits are left. - if commits.is_empty() { - // Check if provided_hash matches any expired commits - if expired_hashes.contains(&provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); - } else { - return Err(Error::::NoWeightsCommitFound.into()); - } - } - - // --- 7. Search for the provided_hash in the non-expired commits. - if let Some(position) = commits - .iter() - .position(|(hash, _, _, _)| *hash == provided_hash) - { - // --- 8. Get the commit block for the commit being revealed. - let (_, commit_block, _, _) = commits - .get(position) + WeightCommits::::try_mutate_exists( + netuid_index, + &who, + |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() .ok_or(Error::::NoWeightsCommitFound)?; - // --- 9. Ensure the commit is ready to be revealed in the current block range. - ensure!( - Self::is_reveal_block_range(netuid, *commit_block), - Error::::RevealTooEarly - ); - - // --- 10. Remove all commits up to and including the one being revealed. - for _ in 0..=position { - commits.pop_front(); + // --- 4. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); + } else { + break; + } } - // --- 11. If the queue is now empty, remove the storage entry for the user. + // --- 5. Hash the provided data. + let provided_hash: H256 = + Self::get_commit_hash(&who, netuid_index, &uids, &values, &salt, version_key); + + // --- 6. After removing expired commits, check if any commits are left. if commits.is_empty() { - *maybe_commits = None; + // Check if provided_hash matches any expired commits + if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::NoWeightsCommitFound.into()); + } } - // --- 12. Proceed to set the revealed weights. - Self::do_set_weights(origin, netuid, uids.clone(), values.clone(), version_key)?; + // --- 7. Search for the provided_hash in the non-expired commits. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8. Get the commit block for the commit being revealed. + let (_, commit_block, _, _) = commits + .get(position) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 9. Ensure the commit is ready to be revealed in the current block range. + ensure!( + Self::is_reveal_block_range(netuid, *commit_block), + Error::::RevealTooEarly + ); + + // --- 10. Remove all commits up to and including the one being revealed. + for _ in 0..=position { + commits.pop_front(); + } - // --- 13. Emit the WeightsRevealed event. - Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid, provided_hash)); + // --- 11. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } - // --- 14. Return ok. - Ok(()) - } else { - // --- 15. The provided_hash does not match any non-expired commits. - if expired_hashes.contains(&provided_hash) { - Err(Error::::ExpiredWeightCommit.into()) + // --- 12. Proceed to set the revealed weights. + Self::do_set_mechanism_weights( + origin, + netuid, + mecid, + uids.clone(), + values.clone(), + version_key, + )?; + + // --- 13. Emit the WeightsRevealed event. + Self::deposit_event(Event::WeightsRevealed( + who.clone(), + netuid_index, + provided_hash, + )); + + // --- 14. Return ok. + Ok(()) } else { - Err(Error::::InvalidRevealCommitHashNotMatch.into()) + // --- 15. The provided_hash does not match any non-expired commits. + if expired_hashes.contains(&provided_hash) { + Err(Error::::ExpiredWeightCommit.into()) + } else { + Err(Error::::InvalidRevealCommitHashNotMatch.into()) + } } - } - }) + }, + ) } /// ---- The implementation for batch revealing committed weights. @@ -484,6 +610,9 @@ impl Pallet { salts_list: Vec>, version_keys: Vec, ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_mechanism_storage_index(netuid, MechId::MAIN); + // --- 1. Check that the input lists are of the same length. let num_reveals = uids_list.len(); ensure!( @@ -505,176 +634,128 @@ impl Pallet { ); // --- 4. Mutate the WeightCommits to retrieve existing commits for the user. - WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { - let commits = maybe_commits - .as_mut() - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 5. Remove any expired commits from the front of the queue, collecting their hashes. - let mut expired_hashes = Vec::new(); - while let Some((hash, commit_block, _, _)) = commits.front() { - if Self::is_commit_expired(netuid, *commit_block) { - // Collect the expired commit hash - expired_hashes.push(*hash); - commits.pop_front(); - } else { - break; - } - } - - // --- 6. Prepare to collect all provided hashes and their corresponding reveals. - let mut provided_hashes = Vec::new(); - let mut reveals = Vec::new(); - let mut revealed_hashes: Vec = Vec::with_capacity(num_reveals); - - for ((uids, values), (salt, version_key)) in uids_list - .into_iter() - .zip(values_list) - .zip(salts_list.into_iter().zip(version_keys)) - { - // --- 6a. Hash the provided data. - let provided_hash: H256 = BlakeTwo256::hash_of(&( - who.clone(), - netuid, - uids.clone(), - values.clone(), - salt.clone(), - version_key, - )); - provided_hashes.push(provided_hash); - reveals.push((uids, values, version_key, provided_hash)); - } + WeightCommits::::try_mutate_exists( + netuid_index, + &who, + |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() + .ok_or(Error::::NoWeightsCommitFound)?; - // --- 7. Validate all reveals first to ensure atomicity. - for (_uids, _values, _version_key, provided_hash) in &reveals { - // --- 7a. Check if the provided_hash is in the non-expired commits. - if !commits - .iter() - .any(|(hash, _, _, _)| *hash == *provided_hash) - { - // --- 7b. If not found, check if it matches any expired commits. - if expired_hashes.contains(provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); + // --- 5. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); } else { - return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + break; } } - // --- 7c. Find the commit corresponding to the provided_hash. - let commit = commits - .iter() - .find(|(hash, _, _, _)| *hash == *provided_hash) - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 7d. Check if the commit is within the reveal window. - ensure!( - Self::is_reveal_block_range(netuid, commit.1), - Error::::RevealTooEarly - ); - } + // --- 6. Prepare to collect all provided hashes and their corresponding reveals. + let mut provided_hashes = Vec::new(); + let mut reveals = Vec::new(); + let mut revealed_hashes: Vec = Vec::with_capacity(num_reveals); - // --- 8. All reveals are valid. Proceed to remove and process each reveal. - for (uids, values, version_key, provided_hash) in reveals { - // --- 8a. Find the position of the provided_hash. - if let Some(position) = commits - .iter() - .position(|(hash, _, _, _)| *hash == provided_hash) + for ((uids, values), (salt, version_key)) in uids_list + .into_iter() + .zip(values_list) + .zip(salts_list.into_iter().zip(version_keys)) { - // --- 8b. Remove the commit from the queue. - commits.remove(position); + // --- 6a. Hash the provided data. + let provided_hash: H256 = BlakeTwo256::hash_of(&( + who.clone(), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + version_key, + )); + provided_hashes.push(provided_hash); + reveals.push((uids, values, version_key, provided_hash)); + } - // --- 8c. Proceed to set the revealed weights. - Self::do_set_weights(origin.clone(), netuid, uids, values, version_key)?; + // --- 7. Validate all reveals first to ensure atomicity. + for (_uids, _values, _version_key, provided_hash) in &reveals { + // --- 7a. Check if the provided_hash is in the non-expired commits. + if !commits + .iter() + .any(|(hash, _, _, _)| *hash == *provided_hash) + { + // --- 7b. If not found, check if it matches any expired commits. + if expired_hashes.contains(provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } - // --- 8d. Collect the revealed hash. - revealed_hashes.push(provided_hash); - } else if expired_hashes.contains(&provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); - } else { - return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + // --- 7c. Find the commit corresponding to the provided_hash. + let commit = commits + .iter() + .find(|(hash, _, _, _)| *hash == *provided_hash) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 7d. Check if the commit is within the reveal window. + ensure!( + Self::is_reveal_block_range(netuid, commit.1), + Error::::RevealTooEarly + ); } - } - // --- 9. If the queue is now empty, remove the storage entry for the user. - if commits.is_empty() { - *maybe_commits = None; - } + // --- 8. All reveals are valid. Proceed to remove and process each reveal. + for (uids, values, version_key, provided_hash) in reveals { + // --- 8a. Find the position of the provided_hash. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8b. Remove the commit from the queue. + commits.remove(position); + + // --- 8c. Proceed to set the revealed weights. + Self::do_set_weights(origin.clone(), netuid, uids, values, version_key)?; + + // --- 8d. Collect the revealed hash. + revealed_hashes.push(provided_hash); + } else if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } - // --- 10. Emit the WeightsBatchRevealed event with all revealed hashes. - Self::deposit_event(Event::WeightsBatchRevealed( - who.clone(), - netuid, - revealed_hashes, - )); + // --- 9. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } - // --- 11. Return ok. - Ok(()) - }) + // --- 10. Emit the WeightsBatchRevealed event with all revealed hashes. + Self::deposit_event(Event::WeightsBatchRevealed( + who.clone(), + netuid, + revealed_hashes, + )); + + // --- 11. Return ok. + Ok(()) + }, + ) } - /// ---- The implementation for the extrinsic set_weights. - /// - /// # Args: - /// * 'origin': (RuntimeOrigin): - /// - The signature of the calling hotkey. - /// - /// * 'netuid' (u16): - /// - The u16 network identifier. - /// - /// * 'uids' ( Vec ): - /// - The uids of the weights to be set on the chain. - /// - /// * 'values' ( Vec ): - /// - The values of the weights to set on the chain. - /// - /// * 'version_key' ( u64 ): - /// - The network version key. - /// - /// # Event: - /// * WeightsSet; - /// - On successfully setting the weights on chain. - /// - /// # Raises: - /// * 'SubNetworkDoesNotExist': - /// - Attempting to set weights on a non-existent network. - /// - /// * 'NotRegistered': - /// - Attempting to set weights from a non registered account. - /// - /// * 'IncorrectWeightVersionKey': - /// - Attempting to set weights without having an up-to-date version_key. - /// - /// * 'SettingWeightsTooFast': - /// - Attempting to set weights faster than the weights_set_rate_limit. - /// - /// * 'NeuronNoValidatorPermit': - /// - Attempting to set non-self weights without a validator permit. - /// - /// * 'WeightVecNotEqualSize': - /// - Attempting to set weights with uids not of same length. - /// - /// * 'DuplicateUids': - /// - Attempting to set weights with duplicate uids. - /// - /// * 'UidsLengthExceedUidsInSubNet': - /// - Attempting to set weights above the max allowed uids. - /// - /// * 'UidVecContainInvalidOne': - /// - Attempting to set weights with invalid uids. - /// - /// * 'WeightVecLengthIsLow': - /// - Attempting to set weights with fewer weights than min. - /// - /// * 'MaxWeightExceeded': - /// - Attempting to set weights with max value exceeding limit. - /// - pub fn do_set_weights( + fn internal_set_weights( origin: T::RuntimeOrigin, netuid: NetUid, + mecid: MechId, uids: Vec, values: Vec, version_key: u64, ) -> dispatch::DispatchResult { + // Calculate subnet storage index + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); + // --- 1. Check the caller's signature. This is the hotkey of a registered account. let hotkey = ensure_signed(origin)?; log::debug!( @@ -690,11 +771,8 @@ impl Pallet { Error::::WeightVecNotEqualSize ); - // --- 3. Check to see if this is a valid network. - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + // --- 3. Check to see if this is a valid network and sub-subnet. + Self::ensure_mechanism_exists(netuid, mecid)?; // --- 4. Check to see if the number of uids is within the max allowed uids for this network. ensure!( @@ -725,7 +803,8 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); if !Self::get_commit_reveal_weights_enabled(netuid) { ensure!( - Self::check_rate_limit(netuid, neuron_uid, current_block), + // Rate limit should apply per sub-subnet, so use netuid_index here + Self::check_rate_limit(netuid_index, neuron_uid, current_block), Error::::SettingWeightsTooFast ); } @@ -765,22 +844,158 @@ impl Pallet { zipped_weights.push((*uid, *val)) } - // --- 17. Set weights under netuid, uid double map entry. - Weights::::insert(netuid, neuron_uid, zipped_weights); + // --- 17. Set weights under netuid_index (sub-subnet), uid double map entry. + Weights::::insert(netuid_index, neuron_uid, zipped_weights); // --- 18. Set the activity for the weights on this network. if !Self::get_commit_reveal_weights_enabled(netuid) { - Self::set_last_update_for_uid(netuid, neuron_uid, current_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, current_block); } // --- 19. Emit the tracking event. - log::debug!("WeightsSet( netuid:{netuid:?}, neuron_uid:{neuron_uid:?} )"); - Self::deposit_event(Event::WeightsSet(netuid, neuron_uid)); + log::debug!("WeightsSet( netuid:{netuid_index:?}, neuron_uid:{neuron_uid:?} )"); + Self::deposit_event(Event::WeightsSet(netuid_index, neuron_uid)); // --- 20. Return ok. Ok(()) } + /// ---- The implementation for the extrinsic set_weights. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'uids' ( Vec ): + /// - The uids of the weights to be set on the chain. + /// + /// * 'values' ( Vec ): + /// - The values of the weights to set on the chain. + /// + /// * 'version_key' ( u64 ): + /// - The network version key. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'MechanismDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'IncorrectWeightVersionKey': + /// - Attempting to set weights without having an up-to-date version_key. + /// + /// * 'SettingWeightsTooFast': + /// - Attempting to set weights faster than the weights_set_rate_limit. + /// + /// * 'NeuronNoValidatorPermit': + /// - Attempting to set non-self weights without a validator permit. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + /// + pub fn do_set_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + uids: Vec, + values: Vec, + version_key: u64, + ) -> dispatch::DispatchResult { + Self::internal_set_weights(origin, netuid, MechId::MAIN, uids, values, version_key) + } + + /// ---- The implementation for the extrinsic set_weights. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'mecid' (u8): + /// - The u8 identifier of sub-subnet. + /// + /// * 'uids' ( Vec ): + /// - The uids of the weights to be set on the chain. + /// + /// * 'values' ( Vec ): + /// - The values of the weights to set on the chain. + /// + /// * 'version_key' ( u64 ): + /// - The network version key. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'MechanismDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'IncorrectWeightVersionKey': + /// - Attempting to set weights without having an up-to-date version_key. + /// + /// * 'SettingWeightsTooFast': + /// - Attempting to set weights faster than the weights_set_rate_limit. + /// + /// * 'NeuronNoValidatorPermit': + /// - Attempting to set non-self weights without a validator permit. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + /// + pub fn do_set_mechanism_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + mecid: MechId, + uids: Vec, + values: Vec, + version_key: u64, + ) -> dispatch::DispatchResult { + Self::internal_set_weights(origin, netuid, mecid, uids, values, version_key) + } + /// ---- The implementation for the extrinsic batch_set_weights. /// /// This call runs a batch of set weights calls, continuing on errors. @@ -888,17 +1103,25 @@ impl Pallet { /// Checks if the neuron has set weights within the weights_set_rate_limit. /// - pub fn check_rate_limit(netuid: NetUid, neuron_uid: u16, current_block: u64) -> bool { - if Self::is_uid_exist_on_network(netuid, neuron_uid) { - // --- 1. Ensure that the diff between current and last_set weights is greater than limit. - let last_set_weights: u64 = Self::get_last_update_for_uid(netuid, neuron_uid); - if last_set_weights == 0 { - return true; - } // (Storage default) Never set weights. - return current_block.saturating_sub(last_set_weights) - >= Self::get_weights_set_rate_limit(netuid); + pub fn check_rate_limit( + netuid_index: NetUidStorageIndex, + neuron_uid: u16, + current_block: u64, + ) -> bool { + let maybe_netuid_and_subid = Self::get_netuid_and_subid(netuid_index); + if let Ok((netuid, _)) = maybe_netuid_and_subid { + if Self::is_uid_exist_on_network(netuid, neuron_uid) { + // --- 1. Ensure that the diff between current and last_set weights is greater than limit. + let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); + if last_set_weights == 0 { + return true; + } // (Storage default) Never set weights. + return current_block.saturating_sub(last_set_weights) + >= Self::get_weights_set_rate_limit(netuid); + } } - // --- 3. Non registered peers cant pass. + + // --- 3. Non registered peers cant pass. Neither can non-existing mecid false } @@ -1094,13 +1317,13 @@ impl Pallet { pub fn get_commit_hash( who: &T::AccountId, - netuid: NetUid, + netuid_index: NetUidStorageIndex, uids: &[u16], values: &[u16], salt: &[u16], version_key: u64, ) -> H256 { - BlakeTwo256::hash_of(&(who.clone(), netuid, uids, values, salt, version_key)) + BlakeTwo256::hash_of(&(who.clone(), netuid_index, uids, values, salt, version_key)) } pub fn find_commit_block_via_hash(hash: H256) -> Option { diff --git a/pallets/subtensor/src/swap/swap_coldkey.rs b/pallets/subtensor/src/swap/swap_coldkey.rs index f7f9997183..8180650bf8 100644 --- a/pallets/subtensor/src/swap/swap_coldkey.rs +++ b/pallets/subtensor/src/swap/swap_coldkey.rs @@ -62,10 +62,10 @@ impl Pallet { Error::::NotEnoughBalanceToPaySwapColdKey ); - // 7. Remove and burn the swap cost from the old coldkey's account + // 7. Remove and recycle the swap cost from the old coldkey's account let actual_burn_amount = Self::remove_balance_from_coldkey_account(old_coldkey, swap_cost.into())?; - Self::burn_tokens(actual_burn_amount); + Self::recycle_tao(actual_burn_amount); // 8. Update the weight for the balance operations weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index 0454b1dd16..82a16bc800 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::weights::Weight; use sp_core::Get; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{Currency, NetUid}; +use subtensor_runtime_common::{Currency, MechId, NetUid}; impl Pallet { /// Swaps the hotkey of a coldkey account. @@ -97,11 +97,11 @@ impl Pallet { weight.saturating_accrue(T::DbWeight::get().reads_writes(3, 0)); // 14. Remove the swap cost from the coldkey's account - let actual_burn_amount = + let actual_recycle_amount = Self::remove_balance_from_coldkey_account(&coldkey, swap_cost.into())?; - // 18. Burn the tokens - Self::burn_tokens(actual_burn_amount); + // 18. Recycle the tokens + Self::recycle_tao(actual_recycle_amount); weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 2)); // 19. Perform the hotkey swap @@ -296,11 +296,11 @@ impl Pallet { ); // 5. Remove the swap cost from the coldkey's account - let actual_burn_amount = Self::remove_balance_from_coldkey_account(coldkey, swap_cost)?; + let actual_recycle_amount = Self::remove_balance_from_coldkey_account(coldkey, swap_cost)?; weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 0)); - // 6. Burn the tokens - Self::burn_tokens(actual_burn_amount); + // 6. Recycle the tokens + Self::recycle_tao(actual_recycle_amount); weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); // 7. Swap owner. @@ -411,10 +411,15 @@ impl Pallet { // 3.5 Swap WeightCommits // WeightCommits( hotkey ) --> Vec -- the weight commits for the hotkey. if is_network_member { - if let Ok(old_weight_commits) = WeightCommits::::try_get(netuid, old_hotkey) { - WeightCommits::::remove(netuid, old_hotkey); - WeightCommits::::insert(netuid, new_hotkey, old_weight_commits); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + for mecid in 0..MechanismCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_mechanism_storage_index(netuid, MechId::from(mecid)); + if let Ok(old_weight_commits) = + WeightCommits::::try_get(netuid_index, old_hotkey) + { + WeightCommits::::remove(netuid_index, old_hotkey); + WeightCommits::::insert(netuid_index, new_hotkey, old_weight_commits); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } } } diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index a0505fa9f3..0fee0af2ca 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -6,7 +6,7 @@ use super::mock::*; use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_noop, assert_ok}; use substrate_fixed::types::{I64F64, I96F32, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use crate::{utils::rate_limiting::TransactionType, *}; @@ -62,7 +62,7 @@ fn test_do_set_child_singular_network_does_not_exist() { netuid, vec![(proportion, child)] ), - Error::::SubNetworkDoesNotExist + Error::::SubnetNotExists ); }); } @@ -328,7 +328,7 @@ fn test_add_singular_child() { netuid, vec![(u64::MAX, child)] ), - Err(Error::::SubNetworkDoesNotExist.into()) + Err(Error::::SubnetNotExists.into()) ); add_network(netuid, 1, 0); step_rate_limit(&TransactionType::SetChildren, netuid); @@ -472,7 +472,7 @@ fn test_do_set_empty_children_network_does_not_exist() { netuid, vec![] ), - Error::::SubNetworkDoesNotExist + Error::::SubnetNotExists ); }); } @@ -601,7 +601,7 @@ fn test_do_schedule_children_multiple_network_does_not_exist() { netuid, vec![(proportion, child1)] ), - Error::::SubNetworkDoesNotExist + Error::::SubnetNotExists ); }); } @@ -955,17 +955,15 @@ fn test_childkey_take_rate_limiting() { // Helper function to log rate limit information let log_rate_limit_info = || { let current_block = SubtensorModule::get_current_block_as_u64(); - let last_block = SubtensorModule::get_last_transaction_block_on_subnet( + let last_block = TransactionType::SetChildkeyTake.last_block_on_subnet::( &hotkey, netuid, - &TransactionType::SetChildkeyTake, ); - let passes = SubtensorModule::passes_rate_limit_on_subnet( - &TransactionType::SetChildkeyTake, + let passes = TransactionType::SetChildkeyTake.passes_rate_limit_on_subnet::( &hotkey, netuid, ); - let limit = SubtensorModule::get_rate_limit_on_subnet(&TransactionType::SetChildkeyTake, netuid); + let limit = TransactionType::SetChildkeyTake.rate_limit_on_subnet::(netuid); log::info!( "Rate limit info: current_block: {}, last_block: {}, limit: {}, passes: {}, diff: {}", current_block, @@ -1202,7 +1200,7 @@ fn test_do_revoke_children_multiple_network_does_not_exist() { netuid, vec![(u64::MAX / 2, child1), (u64::MAX / 2, child2)] ), - Error::::SubNetworkDoesNotExist + Error::::SubnetNotExists ); }); } @@ -2489,12 +2487,7 @@ fn test_revoke_child_no_min_stake_check() { assert_eq!(children_after, vec![(proportion, child)]); // Bypass tx rate limit - SubtensorModule::set_last_transaction_block_on_subnet( - &parent, - netuid, - &TransactionType::SetChildren, - 0, - ); + TransactionType::SetChildren.set_last_block_on_subnet::(&parent, netuid, 0); // Schedule parent-child relationship revokation assert_ok!(SubtensorModule::do_schedule_children( @@ -2609,18 +2602,13 @@ fn test_set_children_rate_limit_fail_then_succeed() { // Try again after rate limit period has passed // Check rate limit - let limit = - SubtensorModule::get_rate_limit_on_subnet(&TransactionType::SetChildren, netuid); + let limit = TransactionType::SetChildren.rate_limit_on_subnet::(netuid); // Step that many blocks step_block(limit as u16); // Verify rate limit passes - assert!(SubtensorModule::passes_rate_limit_on_subnet( - &TransactionType::SetChildren, - &hotkey, - netuid - )); + assert!(TransactionType::SetChildren.passes_rate_limit_on_subnet::(&hotkey, netuid)); // Try again mock_set_children(&coldkey, &hotkey, netuid, &[(100, child2)]); @@ -2841,6 +2829,7 @@ fn test_set_weights_no_parent() { /// Test that drain_pending_emission sends childkey take fully to the nominators if childkey /// doesn't have its own stake, independently of parent hotkey take. +/// cargo test --package pallet-subtensor --lib -- tests::children::test_childkey_take_drain --exact --show-output #[allow(clippy::assertions_on_constants)] #[test] fn test_childkey_take_drain() { @@ -2917,12 +2906,12 @@ fn test_childkey_take_drain() { )); // Setup YUMA so that it creates emissions - Weights::::insert(netuid, 0, vec![(2, 0xFFFF)]); - Weights::::insert(netuid, 1, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 0, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 1, vec![(2, 0xFFFF)]); BlockAtRegistration::::set(netuid, 0, 1); BlockAtRegistration::::set(netuid, 1, 1); BlockAtRegistration::::set(netuid, 2, 1); - LastUpdate::::set(netuid, vec![2, 2, 2]); + LastUpdate::::set(NetUidStorageIndex::from(netuid), vec![2, 2, 2]); Kappa::::set(netuid, u16::MAX / 5); ActivityCutoff::::set(netuid, u16::MAX); // makes all stake active ValidatorPermit::::insert(netuid, vec![true, true, false]); diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 30cef8556f..a4ea3988b9 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -9,7 +9,7 @@ use frame_support::assert_ok; use pallet_subtensor_swap::position::PositionId; use sp_core::U256; use substrate_fixed::types::{I64F64, I96F32, U96F32}; -use subtensor_runtime_common::AlphaCurrency; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex}; use subtensor_swap_interface::SwapHandler; #[allow(clippy::arithmetic_side_effects)] @@ -1857,62 +1857,6 @@ fn test_incentive_to_subnet_owners_hotkey_is_burned() { }); } -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_incentive_to_subnet_owners_hotkey_is_burned_with_limit --exact --show-output --nocapture -#[test] -fn test_incentive_to_subnet_owners_hotkey_is_burned_with_limit() { - new_test_ext(1).execute_with(|| { - let subnet_owner_ck = U256::from(0); - let subnet_owner_hk = U256::from(1); - - // Other hk owned by owner - let other_hk = U256::from(3); - Owner::::insert(other_hk, subnet_owner_ck); - OwnedHotkeys::::insert(subnet_owner_ck, vec![subnet_owner_hk, other_hk]); - - let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); - Uids::::insert(netuid, other_hk, 1); - - // Set the burn key limit to 1 - testing the limits - ImmuneOwnerUidsLimit::::insert(netuid, 1); - - let pending_tao: u64 = 1_000_000_000; - let pending_alpha = AlphaCurrency::ZERO; // None to valis - let owner_cut = AlphaCurrency::ZERO; - let mut incentives: BTreeMap = BTreeMap::new(); - - // Give incentive to other_hk - incentives.insert(other_hk, 10_000_000.into()); - - // Give incentives to subnet_owner_hk - incentives.insert(subnet_owner_hk, 10_000_000.into()); - - // Verify stake before - let subnet_owner_stake_before = - SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); - assert_eq!(subnet_owner_stake_before, 0.into()); - let other_stake_before = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); - assert_eq!(other_stake_before, 0.into()); - - // Distribute dividends and incentives - SubtensorModule::distribute_dividends_and_incentives( - netuid, - owner_cut, - incentives, - BTreeMap::new(), - BTreeMap::new(), - ); - - // Verify stake after - let subnet_owner_stake_after = - SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); - assert_eq!(subnet_owner_stake_after, 0.into()); - let other_stake_after = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); - - // Testing the limit - should be not burned - assert!(other_stake_after > 0.into()); - }); -} - // Test that if number of sn owner hotkeys is greater than ImmuneOwnerUidsLimit, then the ones with // higher BlockAtRegistration are used to burn // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_burn_key_sorting --exact --show-output --nocapture @@ -1949,9 +1893,6 @@ fn test_burn_key_sorting() { Uids::::insert(netuid, other_hk_2, 3); Uids::::insert(netuid, other_hk_3, 2); - // Set the burn key limit to 3 because we also have sn owner - ImmuneOwnerUidsLimit::::insert(netuid, 3); - let pending_tao: u64 = 1_000_000_000; let pending_alpha = AlphaCurrency::ZERO; // None to valis let owner_cut = AlphaCurrency::ZERO; @@ -1979,7 +1920,7 @@ fn test_burn_key_sorting() { SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); assert_eq!(subnet_owner_stake_after, 0.into()); - // Testing the limits - HK1 and HK3 should be burned, HK2 should be not burned + // No burn limits, all HKs should be burned let other_stake_after_1 = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk_1, netuid); let other_stake_after_2 = @@ -1987,7 +1928,7 @@ fn test_burn_key_sorting() { let other_stake_after_3 = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk_3, netuid); assert_eq!(other_stake_after_1, 0.into()); - assert!(other_stake_after_2 > 0.into()); + assert_eq!(other_stake_after_2, 0.into()); assert_eq!(other_stake_after_3, 0.into()); }); } @@ -2445,6 +2386,7 @@ fn test_drain_pending_emission_no_miners_all_drained() { }); } +// cargo test --package pallet-subtensor --lib -- tests::coinbase::test_drain_pending_emission_zero_emission --exact --show-output #[test] fn test_drain_pending_emission_zero_emission() { new_test_ext(1).execute_with(|| { @@ -2493,7 +2435,7 @@ fn test_drain_pending_emission_zero_emission() { run_to_block_no_epoch(netuid, 50); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Set the emission to be ZERO. @@ -2511,7 +2453,12 @@ fn test_drain_pending_emission_zero_emission() { assert_eq!(new_stake, init_stake.into()); // Check that the incentive and dividends are set by epoch. - assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .iter() + .sum::() + > 0 + ); assert!(Dividends::::get(netuid).iter().sum::() > 0); }); } @@ -2578,7 +2525,7 @@ fn test_run_coinbase_not_started() { )); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Step so tempo should run. @@ -2600,7 +2547,12 @@ fn test_run_coinbase_not_started() { assert_eq!(new_stake, init_stake.into()); // Check that the incentive and dividends are set. - assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .iter() + .sum::() + > 0 + ); assert!(Dividends::::get(netuid).iter().sum::() > 0); }); } @@ -2664,7 +2616,7 @@ fn test_run_coinbase_not_started_start_after() { )); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Step so tempo should run. diff --git a/pallets/subtensor/src/tests/consensus.rs b/pallets/subtensor/src/tests/consensus.rs index 6a7aa7d467..7eb65c3fc0 100644 --- a/pallets/subtensor/src/tests/consensus.rs +++ b/pallets/subtensor/src/tests/consensus.rs @@ -13,6 +13,7 @@ use sp_core::U256; use std::time::Instant; use substrate_fixed::transcendental::{PI, cos, ln, sqrt}; use substrate_fixed::types::{I32F32, I64F64}; +use subtensor_runtime_common::NetUidStorageIndex; pub fn fixed(val: f32) -> I32F32 { I32F32::from_num(val) @@ -134,7 +135,7 @@ fn uid_stats(netuid: NetUid, uid: u16) { ); log::info!( "incentive: {:?}", - SubtensorModule::get_incentive_for_uid(netuid, uid) + SubtensorModule::get_incentive_for_uid(NetUidStorageIndex::from(netuid), uid) ); log::info!( "dividend: {:?}", diff --git a/pallets/subtensor/src/tests/ensure.rs b/pallets/subtensor/src/tests/ensure.rs new file mode 100644 index 0000000000..298339defa --- /dev/null +++ b/pallets/subtensor/src/tests/ensure.rs @@ -0,0 +1,159 @@ +use frame_support::{assert_noop, assert_ok}; +use frame_system::Config; +use sp_core::U256; +use subtensor_runtime_common::NetUid; + +use super::mock::*; +use crate::utils::rate_limiting::{Hyperparameter, TransactionType}; +use crate::{OwnerHyperparamRateLimit, SubnetOwner, SubtokenEnabled}; + +#[test] +fn ensure_subnet_owner_returns_who_and_checks_ownership() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + add_network(netuid, 10, 0); + + let owner: U256 = U256::from(42); + SubnetOwner::::insert(netuid, owner); + + // Non-owner signed should fail + assert!( + crate::Pallet::::ensure_subnet_owner( + <::RuntimeOrigin>::signed(U256::from(7)), + netuid + ) + .is_err() + ); + + // Owner signed returns who + let who = crate::Pallet::::ensure_subnet_owner( + <::RuntimeOrigin>::signed(owner), + netuid, + ) + .expect("owner must pass"); + assert_eq!(who, owner); + }); +} + +#[test] +fn ensure_subnet_owner_or_root_distinguishes_root_and_owner() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(2); + add_network(netuid, 10, 0); + let owner: U256 = U256::from(9); + SubnetOwner::::insert(netuid, owner); + + // Root path returns None + let root = crate::Pallet::::ensure_subnet_owner_or_root( + <::RuntimeOrigin>::root(), + netuid, + ) + .expect("root allowed"); + assert!(root.is_none()); + + // Owner path returns Some(owner) + let maybe_owner = crate::Pallet::::ensure_subnet_owner_or_root( + <::RuntimeOrigin>::signed(owner), + netuid, + ) + .expect("owner allowed"); + assert_eq!(maybe_owner, Some(owner)); + }); +} + +#[test] +fn ensure_root_with_rate_limit_blocks_in_freeze_window() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let tempo = 10; + add_network(netuid, 10, 0); + + // Set freeze window to 3 + let freeze_window = 3; + crate::Pallet::::set_admin_freeze_window(freeze_window); + + run_to_block((tempo - freeze_window + 1).into()); + + // Root is blocked in freeze window + assert!( + crate::Pallet::::ensure_root_with_rate_limit( + <::RuntimeOrigin>::root(), + netuid + ) + .is_err() + ); + }); +} + +#[test] +fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let tempo = 10; + add_network(netuid, 10, 0); + SubtokenEnabled::::insert(netuid, true); + let owner: U256 = U256::from(5); + SubnetOwner::::insert(netuid, owner); + // Set freeze window to 0 initially to avoid blocking when tempo is small + crate::Pallet::::set_admin_freeze_window(0); + + // Set tempo to 1 so owner hyperparam RL = 2 blocks + crate::Pallet::::set_tempo(netuid, 1); + + assert_eq!(OwnerHyperparamRateLimit::::get(), 2); + + // Outside freeze window initially; should pass and return Some(owner) + let res = crate::Pallet::::ensure_sn_owner_or_root_with_limits( + <::RuntimeOrigin>::signed(owner), + netuid, + &[Hyperparameter::Kappa.into()], + ) + .expect("should pass"); + assert_eq!(res, Some(owner)); + + // Simulate previous update at current block -> next call should fail due to rate limit + let now = crate::Pallet::::get_current_block_as_u64(); + TransactionType::from(Hyperparameter::Kappa) + .set_last_block_on_subnet::(&owner, netuid, now); + assert_noop!( + crate::Pallet::::ensure_sn_owner_or_root_with_limits( + <::RuntimeOrigin>::signed(owner), + netuid, + &[Hyperparameter::Kappa.into()], + ), + crate::Error::::TxRateLimitExceeded + ); + + // Advance beyond RL and ensure passes again + run_to_block(now + 3); + assert_ok!(crate::Pallet::::ensure_sn_owner_or_root_with_limits( + <::RuntimeOrigin>::signed(owner), + netuid, + &[Hyperparameter::Kappa.into()] + )); + + // Now advance into the freeze window; ensure blocks + // (using loop for clarity, because epoch calculation function uses netuid) + // Restore tempo and configure freeze window for this part + let freeze_window = 3; + crate::Pallet::::set_tempo(netuid, tempo); + crate::Pallet::::set_admin_freeze_window(freeze_window); + let freeze_window = freeze_window as u64; + loop { + let cur = crate::Pallet::::get_current_block_as_u64(); + let rem = crate::Pallet::::blocks_until_next_epoch(netuid, tempo, cur); + if rem < freeze_window { + break; + } + run_to_block(cur + 1); + } + assert_noop!( + crate::Pallet::::ensure_sn_owner_or_root_with_limits( + <::RuntimeOrigin>::signed(owner), + netuid, + &[Hyperparameter::Kappa.into()], + ), + crate::Error::::AdminActionProhibitedDuringWeightsWindow + ); + }); +} diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 25b4c48781..7c23dc2b2c 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -11,7 +11,7 @@ use frame_support::{assert_err, assert_ok}; use rand::{Rng, SeedableRng, distributions::Uniform, rngs::StdRng, seq::SliceRandom, thread_rng}; use sp_core::{Get, U256}; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use super::mock::*; @@ -128,7 +128,7 @@ fn uid_stats(netuid: NetUid, uid: u16) { ); log::info!( "incentive: {:?}", - SubtensorModule::get_incentive_for_uid(netuid, uid) + SubtensorModule::get_incentive_for_uid(NetUidStorageIndex::from(netuid), uid) ); log::info!( "dividend: {:?}", @@ -595,7 +595,10 @@ fn test_1_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); }); } @@ -657,7 +660,10 @@ fn test_10_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, i as u16), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, i as u16), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, i as u16), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, i as u16), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), i as u16), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, i as u16), 0); assert_eq!( SubtensorModule::get_emission_for_uid(netuid, i as u16), @@ -705,7 +711,7 @@ fn test_512_graph() { false, u16::MAX, ); - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); for uid in validators { assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&(U256::from(uid))), @@ -714,7 +720,10 @@ fn test_512_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 1023); // Note D = floor(1 / 64 * 65_535) = 1023 assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), @@ -732,7 +741,10 @@ fn test_512_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 146); // Note R = floor(1 / (512 - 64) * 65_535) = 146 assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 65535); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 146); // Note C = floor(1 / (512 - 64) * 65_535) = 146 - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 146); // Note I = floor(1 / (512 - 64) * 65_535) = 146 + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 146 + ); // Note I = floor(1 / (512 - 64) * 65_535) = 146 assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), @@ -795,10 +807,10 @@ fn test_512_graph_random_weights() { bonds_penalty, ); - let bond = SubtensorModule::get_bonds(netuid); + let bond = SubtensorModule::get_bonds(netuid.into()); for uid in 0..network_n { rank.push(SubtensorModule::get_rank_for_uid(netuid, uid)); - incentive.push(SubtensorModule::get_incentive_for_uid(netuid, uid)); + incentive.push(SubtensorModule::get_incentive_for_uid(netuid.into(), uid)); dividend.push(SubtensorModule::get_dividends_for_uid(netuid, uid)); emission.push(SubtensorModule::get_emission_for_uid(netuid, uid)); bondv.push(bond[uid as usize][validator]); @@ -826,14 +838,14 @@ fn test_512_graph_random_weights() { bonds_penalty, ); // Assert that dense and sparse epoch results are equal - let bond = SubtensorModule::get_bonds(netuid); + let bond = SubtensorModule::get_bonds(netuid.into()); for uid in 0..network_n { assert_eq!( SubtensorModule::get_rank_for_uid(netuid, uid), rank[uid as usize] ); assert_eq!( - SubtensorModule::get_incentive_for_uid(netuid, uid), + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), incentive[uid as usize] ); assert_eq!( @@ -1070,7 +1082,7 @@ fn test_bonds() { E: [49999998, 99999999, 150000000, 200000001, 49998779, 100000610, 149996337, 200004272] P: [0.0499999989, 0.0999999992, 0.1500000006, 0.2000000011, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] emaB: [[(4, 0.2499999937), (5, 0.2499999953), (6, 0.2499999937), (7, 0.2499999937)], [(4, 0.4999999942), (5, 0.499999997), (6, 0.4999999942), (7, 0.4999999942)], [(4, 0.7499999937), (5, 0.7499999981), (6, 0.7499999995), (7, 0.7499999995)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 16383); assert_eq!(bonds[1][4], 32767); assert_eq!(bonds[2][4], 49151); @@ -1118,7 +1130,7 @@ fn test_bonds() { E: [44998351, 101110561, 151667215, 202223870, 49998779, 100000610, 149996337, 200004272] P: [0.0449983515, 0.1011105615, 0.1516672159, 0.2022238704, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] emaB: [[(4, 0.2225175085), (5, 0.2225175085), (6, 0.2225175085), (7, 0.2225175085)], [(4, 0.499993208), (5, 0.4999932083), (6, 0.4999932083), (7, 0.4999932083)], [(4, 0.7499966028), (5, 0.7499966032), (6, 0.7499966032), (7, 0.7499966032)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 14582); assert_eq!(bonds[1][4], 32767); assert_eq!(bonds[2][4], 49151); @@ -1155,7 +1167,7 @@ fn test_bonds() { E: [40496805, 90999783, 157929636, 210573773, 49998779, 100000610, 149996337, 200004272] P: [0.040496806, 0.0909997837, 0.157929636, 0.2105737738, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] emaB: [[(4, 0.192316476), (5, 0.192316476), (6, 0.192316476), (7, 0.192316476)], [(4, 0.4321515555), (5, 0.4321515558), (6, 0.4321515558), (7, 0.4321515558)], [(4, 0.7499967015), (5, 0.7499967027), (6, 0.7499967027), (7, 0.7499967027)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 12603); assert_eq!(bonds[1][4], 28321); assert_eq!(bonds[2][4], 49151); @@ -1192,7 +1204,7 @@ fn test_bonds() { E: [99999999, 199999999, 299999999, 399999999, 0, 0, 0, 0] P: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] emaB: [[(4, 0.1923094518), (5, 0.1923094518), (6, 0.1923094518), (7, 0.1923094518)], [(4, 0.4321507583), (5, 0.4321507583), (6, 0.4321507583), (7, 0.4321507583)], [(4, 0.7499961846), (5, 0.7499961846), (6, 0.7499961846), (7, 0.7499961846)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 12602); assert_eq!(bonds[1][7], 28320); assert_eq!(bonds[2][7], 49150); @@ -1228,7 +1240,7 @@ fn test_bonds() { E: [36443733, 81898628, 163565493, 218092144, 0, 0, 0, 500000000] P: [0.0364437331, 0.081898629, 0.1635654932, 0.2180921442, 0, 0, 0, 0.5] emaB: [[(4, 0.1922941932), (5, 0.1922941932), (6, 0.1922941932), (7, 0.1671024568)], [(4, 0.4321354993), (5, 0.4321354993), (6, 0.4321354993), (7, 0.3755230587)], [(4, 0.7499809256), (5, 0.7499809256), (6, 0.7499809256), (7, 0.749983425)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 10951); assert_eq!(bonds[1][7], 24609); assert_eq!(bonds[2][7], 49150); @@ -1250,7 +1262,7 @@ fn test_bonds() { E: [32799427, 73706612, 168638129, 224855830, 0, 0, 0, 500000000] P: [0.0327994274, 0.0737066122, 0.1686381293, 0.2248558307, 0, 0, 0, 0.5] emaB: [[(4, 0.1922789337), (5, 0.1922789337), (6, 0.1922789337), (7, 0.1458686984)], [(4, 0.4321202405), (5, 0.4321202405), (6, 0.4321202405), (7, 0.3277949789)], [(4, 0.749965667), (5, 0.749965667), (6, 0.749965667), (7, 0.74998335)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 9559); assert_eq!(bonds[1][7], 21482); assert_eq!(bonds[2][7], 49150); @@ -1272,7 +1284,7 @@ fn test_bonds() { E: [29518068, 66336137, 173203134, 230942659, 0, 0, 0, 500000000] P: [0.029518068, 0.0663361375, 0.1732031347, 0.2309426593, 0, 0, 0, 0.5] emaB: [[(4, 0.192263675), (5, 0.192263675), (6, 0.192263675), (7, 0.1278155716)], [(4, 0.4321049813), (5, 0.4321049813), (6, 0.4321049813), (7, 0.2872407278)], [(4, 0.7499504078), (5, 0.7499504078), (6, 0.7499504078), (7, 0.7499832863)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 8376); assert_eq!(bonds[1][7], 18824); assert_eq!(bonds[2][7], 49150); @@ -1408,7 +1420,7 @@ fn test_active_stake() { } else { SubtensorModule::epoch_dense(netuid, 1_000_000_000.into()); } - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); for uid in 0..n { // log::info!("\n{uid}" ); // uid_stats(netuid, uid); @@ -1473,7 +1485,7 @@ fn test_active_stake() { E: [274999999, 224999999, 250000000, 250000000] P: [0.275, 0.2249999999, 0.25, 0.25] P (u16): [65535, 53619, 59577, 59577] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 36044); // Note D = floor((0.5 * 0.9 + 0.1) * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -1537,7 +1549,7 @@ fn test_active_stake() { E: [272501132, 227498866, 250000000, 250000000] P: [0.272501133, 0.2274988669, 0.25, 0.25] P (u16): [65535, 54711, 60123, 60123] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 35716); // Note D = floor((0.55 * 0.9 + 0.5 * 0.1) * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -1558,6 +1570,7 @@ fn test_active_stake() { } // Test that epoch masks out outdated weights and bonds of validators on deregistered servers. +// #[test] fn test_outdated_weights() { new_test_ext(1).execute_with(|| { @@ -1736,7 +1749,7 @@ fn test_outdated_weights() { E: [250000000, 250000000, 500000000, 0] P: [0.25, 0.25, 0.5, 0] P (u16): [32767, 32767, 65535, 0] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 32767); // Note D = floor(0.5 * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -2035,7 +2048,7 @@ fn test_deregistered_miner_bonds() { } // Check the bond values for the servers - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); let bond_0_2 = bonds[0][2]; let bond_0_3 = bonds[0][3]; @@ -2107,7 +2120,7 @@ fn test_deregistered_miner_bonds() { } // Check the bond values for the servers - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); let bond_0_2_new = bonds[0][2]; let bond_0_3_new = bonds[0][3]; @@ -2452,6 +2465,7 @@ fn test_blocks_since_last_step() { }); } +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::epoch::test_can_set_self_weight_as_subnet_owner --exact --show-output #[test] fn test_can_set_self_weight_as_subnet_owner() { new_test_ext(1).execute_with(|| { @@ -2483,19 +2497,27 @@ fn test_can_set_self_weight_as_subnet_owner() { // Set weight of 50% to each hotkey. // This includes a self-weight let fifty_percent: u16 = u16::MAX / 2; - Weights::::insert(netuid, 0, vec![(0, fifty_percent), (1, fifty_percent)]); + Weights::::insert( + NetUidStorageIndex::from(netuid), + 0, + vec![(0, fifty_percent), (1, fifty_percent)], + ); step_block(1); // Set updated so weights are valid - LastUpdate::::insert(netuid, vec![2, 0]); + LastUpdate::::insert(NetUidStorageIndex::from(netuid), vec![2, 0]); // Run epoch let hotkey_emission = SubtensorModule::epoch(netuid, to_emit.into()); // hotkey_emission is [(hotkey, incentive, dividend)] assert_eq!(hotkey_emission.len(), 2); - assert_eq!(hotkey_emission[0].0, subnet_owner_hotkey); - assert_eq!(hotkey_emission[1].0, other_hotkey); + assert!( + hotkey_emission + .iter() + .any(|(hk, _, _)| *hk == subnet_owner_hotkey) + ); + assert!(hotkey_emission.iter().any(|(hk, _, _)| *hk == other_hotkey)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Both should have received incentive emission @@ -2742,7 +2764,7 @@ fn run_epoch_and_check_bonds_dividends( target_dividends: &[f32], ) { run_epoch(netuid, sparse); - let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid); + let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid.into()); let dividends = SubtensorModule::get_dividends(netuid); let epsilon = I32F32::from_num(1e-3); @@ -3485,7 +3507,7 @@ fn test_yuma_3_bonds_reset() { if epoch == 20 { let hotkey = SubtensorModule::get_hotkey_for_net_and_uid(netuid, 3) .expect("Hotkey not found"); - let _ = SubtensorModule::do_reset_bonds(netuid, &hotkey); + let _ = SubtensorModule::do_reset_bonds(netuid.into(), &hotkey); } } 21 => { @@ -3650,7 +3672,10 @@ fn test_epoch_masks_incoming_to_sniped_uid_prevents_inheritance() { SubtensorModule::epoch(netuid, 1_000.into()); assert_eq!(SubtensorModule::get_rank_for_uid(netuid, new_uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, new_uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), new_uid), + 0 + ); }); } @@ -3693,7 +3718,7 @@ fn test_epoch_no_mask_when_commit_reveal_disabled() { for _ in 0..3 { SubtensorModule::epoch(netuid, 1.into()); assert!( - !SubtensorModule::get_weights_sparse(netuid)[0].is_empty(), + !SubtensorModule::get_weights_sparse(netuid.into())[0].is_empty(), "row visible when CR disabled" ); run_to_block(System::block_number() + tempo as u64 + 1); @@ -3802,3 +3827,59 @@ fn test_epoch_does_not_mask_outside_window_but_masks_inside() { ); }); } + +// Test an epoch doesn't panic when LastUpdate size doesn't match to Weights size. +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::epoch::test_last_update_size_mismatch --exact --show-output --nocapture +#[test] +fn test_last_update_size_mismatch() { + new_test_ext(1).execute_with(|| { + log::info!("test_1_graph:"); + let netuid = NetUid::from(1); + let coldkey = U256::from(0); + let hotkey = U256::from(0); + let uid: u16 = 0; + let stake_amount: u64 = 1_000_000_000; + add_network_disable_commit_reveal(netuid, u16::MAX - 1, 0); + SubtensorModule::set_max_allowed_uids(netuid, 1); + SubtensorModule::add_balance_to_coldkey_account( + &coldkey, + stake_amount + ExistentialDeposit::get(), + ); + register_ok_neuron(netuid, hotkey, coldkey, 1); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + stake_amount.into() + )); + + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); + run_to_block(1); // run to next block to ensure weights are set on nodes after their registration block + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + vec![uid], + vec![u16::MAX], + 0 + )); + + // Set mismatching LastUpdate vector + LastUpdate::::insert(NetUidStorageIndex::from(netuid), vec![1, 1, 1]); + + SubtensorModule::epoch(netuid, 1_000_000_000.into()); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + stake_amount.into() + ); + assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); + assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); + assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); + assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); + }); +} diff --git a/pallets/subtensor/src/tests/evm.rs b/pallets/subtensor/src/tests/evm.rs index a65e69c207..95d0c4e6db 100644 --- a/pallets/subtensor/src/tests/evm.rs +++ b/pallets/subtensor/src/tests/evm.rs @@ -58,8 +58,9 @@ fn test_associate_evm_key_success() { let signature = sign_evm_message(&pair, message); assert_ok!(SubtensorModule::associate_evm_key( - RuntimeOrigin::signed(hotkey), + RuntimeOrigin::signed(coldkey), netuid, + hotkey, evm_key, block_number, signature, @@ -104,8 +105,9 @@ fn test_associate_evm_key_different_block_number_success() { let signature = sign_evm_message(&pair, message); assert_ok!(SubtensorModule::associate_evm_key( - RuntimeOrigin::signed(hotkey), + RuntimeOrigin::signed(coldkey), netuid, + hotkey, evm_key, block_number, signature, @@ -123,6 +125,43 @@ fn test_associate_evm_key_different_block_number_success() { }); } +#[test] +fn test_associate_evm_key_coldkey_does_not_own_hotkey() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + + let tempo: u16 = 2; + let modality: u16 = 2; + + add_network(netuid, tempo, modality); + + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let pair = ecdsa::Pair::generate().0; + let public = pair.public(); + let evm_key = public_to_evm_key(&public); + let block_number = frame_system::Pallet::::block_number(); + let hashed_block_number = keccak_256(block_number.encode().as_ref()); + let hotkey_bytes = hotkey.encode(); + + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); + let signature = sign_evm_message(&pair, message); + + assert_err!( + SubtensorModule::associate_evm_key( + RuntimeOrigin::signed(coldkey), + netuid, + hotkey, + evm_key, + block_number, + signature, + ), + Error::::NonAssociatedColdKey + ); + }); +} + #[test] fn test_associate_evm_key_hotkey_not_registered_in_subnet() { new_test_ext(1).execute_with(|| { @@ -149,8 +188,9 @@ fn test_associate_evm_key_hotkey_not_registered_in_subnet() { assert_err!( SubtensorModule::associate_evm_key( - RuntimeOrigin::signed(hotkey), + RuntimeOrigin::signed(coldkey), netuid, + hotkey, evm_key, block_number, signature, @@ -189,8 +229,9 @@ fn test_associate_evm_key_using_wrong_hash_function() { assert_err!( SubtensorModule::associate_evm_key( - RuntimeOrigin::signed(hotkey), + RuntimeOrigin::signed(coldkey), netuid, + hotkey, evm_key, block_number, signature, diff --git a/pallets/subtensor/src/tests/math.rs b/pallets/subtensor/src/tests/math.rs index a200fa8b25..6c6636ca68 100644 --- a/pallets/subtensor/src/tests/math.rs +++ b/pallets/subtensor/src/tests/math.rs @@ -1341,39 +1341,6 @@ fn test_math_row_sum_sparse() { assert_vec_compare(&result, &target, I32F32::from_num(0)); } -#[test] -fn test_math_col_sum() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let result = col_sum(&matrix); - let target: Vec = vec_to_fixed(&[22., 26., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); -} - -#[test] -fn test_math_col_sum_sparse() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[22., 26., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[21., 21., 21.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![1., 0., 3., 4., 0., 6., 7., 0., 9., 10., 0., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[22., 0., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[0., 0., 0.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); -} - #[test] fn test_math_matmul() { let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); @@ -1468,51 +1435,6 @@ fn test_math_col_clip_sparse() { assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); } -#[test] -fn test_math_clip_sparse() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let target: Vec = vec![0., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = clip_sparse( - &matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); -} - -#[test] -fn test_math_clip() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_mat_fixed(&target, 4, false); - let result = clip( - &matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_mat_compare(&result, &target, I32F32::from_num(0)); -} - -#[test] -fn test_math_inplace_clip() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mut matrix = vec_to_mat_fixed(&matrix, 4, false); - let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_mat_fixed(&target, 4, false); - inplace_clip( - &mut matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_mat_compare(&matrix, &target, I32F32::from_num(0)); -} - #[test] fn test_math_weighted_median() { let mut rng = thread_rng(); @@ -2083,70 +2005,6 @@ fn test_math_interpolate_sparse() { assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); } -#[test] -fn test_math_hadamard() { - let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mat1: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., - ]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); -} - -#[test] -fn test_math_hadamard_sparse() { - let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mat1: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., - ]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); -} - #[test] fn test_math_mat_ema_alpha() { let old: Vec = vec![ @@ -2726,9 +2584,7 @@ fn test_mat_ema_alpha_single_element() { assert_eq!(result, expected); } -// TODO: (@sd): Should these be non panicking? #[test] -#[should_panic(expected = "assertion failed")] fn test_mat_ema_alpha_mismatched_dimensions() { let new = mat_to_fixed(&[vec![1.0, 2.0], vec![3.0, 4.0]]); let old = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); @@ -2740,41 +2596,6 @@ fn test_mat_ema_alpha_mismatched_dimensions() { ]; 2 ]; - let _result = mat_ema_alpha(&new, &old, &alpha); -} - -#[test] -fn test_quantile() { - // Test with a non-empty vector and valid quantile values - let data = vec![ - I32F32::from_num(1.0), - I32F32::from_num(2.0), - I32F32::from_num(3.0), - I32F32::from_num(4.0), - I32F32::from_num(5.0), - ]; - - // Test 0th quantile (minimum) - let result = quantile(&data, 0.0); - assert_eq!(result, I32F32::from_num(1.0)); - - // Test 25th quantile - let result = quantile(&data, 0.25); - assert_eq!(result, I32F32::from_num(2.0)); - - // Test 50th quantile (median) - let result = quantile(&data, 0.5); - assert_eq!(result, I32F32::from_num(3.0)); - - // Test 66th quantile - let result = quantile(&data, 0.66); - assert_eq!(result, I32F32::from_num(3.64)); - - // Test 75th quantile - let result = quantile(&data, 0.75); - assert_eq!(result, I32F32::from_num(4.0)); - - // Test 100th quantile (maximum) - let result = quantile(&data, 1.0); - assert_eq!(result, I32F32::from_num(5.0)); + let result = mat_ema_alpha(&new, &old, &alpha); + assert_eq!(result[0][0], old[0][0]) } diff --git a/pallets/subtensor/src/tests/mechanism.rs b/pallets/subtensor/src/tests/mechanism.rs new file mode 100644 index 0000000000..8e5b1563d8 --- /dev/null +++ b/pallets/subtensor/src/tests/mechanism.rs @@ -0,0 +1,1601 @@ +#![allow( + clippy::arithmetic_side_effects, + clippy::indexing_slicing, + clippy::unwrap_used +)] + +// Run all tests +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::mechanism --show-output + +// Test plan: +// - [x] Netuid index math (with MechanismCountCurrent limiting) +// - [x] Sub-subnet validity tests +// - [x] do_set_desired tests +// - [x] Emissions are split proportionally +// - [x] Sum of split emissions is equal to rao_emission passed to epoch +// - [x] Only subnet owner or root can set desired mechanism count (pallet admin test) +// - [x] Weights can be set by mechanism +// - [x] Weights can be commited/revealed by mechanism +// - [x] Weights can be commited/revealed in crv3 by mechanism +// - [x] Prevent weight setting/commitment/revealing above mechanism_limit_in_force +// - [x] Prevent weight commitment/revealing above mechanism_limit_in_force +// - [x] Prevent weight commitment/revealing in crv3 above mechanism_limit_in_force +// - [x] When a miner is deregistered, their weights are cleaned across all mechanisms +// - [x] Weight setting rate limiting is enforced by mechanism +// - [x] Bonds are applied per mechanism +// - [x] Incentives are per mechanism +// - [x] Per-mechanism incentives are distributed proportionally to miner weights +// - [x] Mechanism limit can be set up to 8 (with admin pallet) +// - [x] When reduction of mechanism limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared +// - [x] Epoch terms of subnet are weighted sum (or logical OR) of all mechanism epoch terms +// - [x] Subnet epoch terms persist in state +// - [x] Mechanism epoch terms persist in state +// - [x] "Yuma Emergency Mode" (consensus sum is 0 for a mechanism), emission distributed by stake +// - [x] Miner with no weights on any mechanism receives no reward +// - [x] MechanismEmissionSplit is reset on mechanism count increase +// - [x] MechanismEmissionSplit is reset on mechanism count decrease + +use super::mock::*; +use crate::coinbase::reveal_commits::WeightsTlockPayload; +use crate::subnets::mechanism::{GLOBAL_MAX_SUBNET_COUNT, MAX_MECHANISM_COUNT_PER_SUBNET}; +use crate::*; +use alloc::collections::BTreeMap; +use approx::assert_abs_diff_eq; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use codec::Encode; +use frame_support::{assert_noop, assert_ok}; +use frame_system::RawOrigin; +use pallet_drand::types::Pulse; +use rand_chacha::{ChaCha20Rng, rand_core::SeedableRng}; +use sha2::Digest; +use sp_core::{H256, U256}; +use sp_runtime::traits::{BlakeTwo256, Hash}; +use sp_std::collections::vec_deque::VecDeque; +use substrate_fixed::types::{I32F32, U64F64}; +use subtensor_runtime_common::{MechId, NetUid, NetUidStorageIndex}; +use tle::{ + curves::drand::TinyBLS381, ibe::fullident::Identity, + stream_ciphers::AESGCMStreamCipherProvider, tlock::tle, +}; +use w3f_bls::EngineBLS; + +#[test] +fn test_index_from_netuid_and_subnet() { + new_test_ext(1).execute_with(|| { + [ + (0_u16, 0_u8), + (GLOBAL_MAX_SUBNET_COUNT / 2, 1), + (GLOBAL_MAX_SUBNET_COUNT / 2, 7), + (GLOBAL_MAX_SUBNET_COUNT / 2, 14), + (GLOBAL_MAX_SUBNET_COUNT / 2, 15), + (GLOBAL_MAX_SUBNET_COUNT - 1, 1), + (GLOBAL_MAX_SUBNET_COUNT - 1, 7), + (GLOBAL_MAX_SUBNET_COUNT - 1, 14), + (GLOBAL_MAX_SUBNET_COUNT - 1, 15), + ] + .iter() + .for_each(|(netuid, sub_id)| { + let idx = SubtensorModule::get_mechanism_storage_index( + NetUid::from(*netuid), + MechId::from(*sub_id), + ); + let expected = *sub_id as u64 * GLOBAL_MAX_SUBNET_COUNT as u64 + *netuid as u64; + assert_eq!(idx, NetUidStorageIndex::from(expected as u16)); + }); + }); +} + +#[test] +fn test_netuid_and_subnet_from_index() { + new_test_ext(1).execute_with(|| { + [ + 0_u16, + 1, + 14, + 15, + 16, + 17, + GLOBAL_MAX_SUBNET_COUNT - 1, + GLOBAL_MAX_SUBNET_COUNT, + GLOBAL_MAX_SUBNET_COUNT + 1, + 0xFFFE / 2, + 0xFFFE, + 0xFFFF, + ] + .iter() + .for_each(|netuid_index| { + let expected_netuid = (*netuid_index as u64 % GLOBAL_MAX_SUBNET_COUNT as u64) as u16; + let expected_subid = (*netuid_index as u64 / GLOBAL_MAX_SUBNET_COUNT as u64) as u8; + + // Allow subnet ID + NetworksAdded::::insert(NetUid::from(expected_netuid), true); + MechanismCountCurrent::::insert( + NetUid::from(expected_netuid), + MechId::from(expected_subid + 1), + ); + + let (netuid, mecid) = + SubtensorModule::get_netuid_and_subid(NetUidStorageIndex::from(*netuid_index)) + .unwrap(); + assert_eq!(netuid, NetUid::from(expected_netuid)); + assert_eq!(mecid, MechId::from(expected_subid)); + }); + }); +} + +#[test] +fn test_netuid_index_math_constants() { + assert_eq!( + GLOBAL_MAX_SUBNET_COUNT as u64 * MAX_MECHANISM_COUNT_PER_SUBNET as u64, + 0x10000 + ); +} + +#[test] +fn ensure_mechanism_exists_ok() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 3u16.into(); + let sub_id = MechId::from(1u8); + + // ensure base subnet exists + NetworksAdded::::insert(NetUid::from(netuid), true); + + // Allow at least 2 sub-subnets (so sub_id = 1 is valid) + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + assert_ok!(SubtensorModule::ensure_mechanism_exists(netuid, sub_id)); + }); +} + +#[test] +fn ensure_mechanism_fails_when_base_subnet_missing() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 7u16.into(); + let sub_id = MechId::from(0u8); + + // Intentionally DO NOT create the base subnet + + assert_noop!( + SubtensorModule::ensure_mechanism_exists(netuid, sub_id), + Error::::MechanismDoesNotExist + ); + }); +} + +#[test] +fn ensure_mechanism_fails_when_subid_out_of_range() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 9u16.into(); + NetworksAdded::::insert(NetUid::from(netuid), true); + + // Current allowed sub-subnet count is 2 => valid sub_ids: {0, 1} + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + + // sub_id == 2 is out of range (must be < 2) + let sub_id_eq = MechId::from(2u8); + assert_noop!( + SubtensorModule::ensure_mechanism_exists(netuid, sub_id_eq), + Error::::MechanismDoesNotExist + ); + + // sub_id > 2 is also out of range + let sub_id_gt = MechId::from(3u8); + assert_noop!( + SubtensorModule::ensure_mechanism_exists(netuid, sub_id_gt), + Error::::MechanismDoesNotExist + ); + }); +} + +#[test] +fn do_set_mechanism_count_ok_minimal() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(3u16); + NetworksAdded::::insert(NetUid::from(3u16), true); // base subnet exists + + assert_ok!(SubtensorModule::do_set_mechanism_count( + netuid, + MechId::from(1u8) + )); + + assert_eq!( + MechanismCountCurrent::::get(netuid), + MechId::from(1u8) + ); + }); +} + +#[test] +fn do_set_mechanism_count_ok_at_effective_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(4u16); + NetworksAdded::::insert(NetUid::from(4u16), true); // base subnet exists + + // Effective bound is min(runtime cap, compile-time cap) + let runtime_cap = MaxMechanismCount::::get(); // e.g., MechId::from(8) + let compile_cap = MechId::from(MAX_MECHANISM_COUNT_PER_SUBNET); + let bound = if runtime_cap <= compile_cap { + runtime_cap + } else { + compile_cap + }; + + assert_ok!(SubtensorModule::do_set_mechanism_count(netuid, bound)); + assert_eq!(MechanismCountCurrent::::get(netuid), bound); + }); +} + +#[test] +fn do_set_fails_when_base_subnet_missing() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(7u16); + // No NetworksAdded insert => base subnet absent + + assert_noop!( + SubtensorModule::do_set_mechanism_count(netuid, MechId::from(1u8)), + Error::::MechanismDoesNotExist + ); + }); +} + +#[test] +fn do_set_fails_for_zero() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(9u16); + NetworksAdded::::insert(NetUid::from(9u16), true); // base subnet exists + + assert_noop!( + SubtensorModule::do_set_mechanism_count(netuid, MechId::from(0u8)), + Error::::InvalidValue + ); + }); +} + +#[test] +fn do_set_fails_when_over_runtime_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(11u16); + NetworksAdded::::insert(NetUid::from(11u16), true); // base subnet exists + + // Runtime cap is 8 (per function), so 9 must fail + assert_noop!( + SubtensorModule::do_set_mechanism_count(netuid, MechId::from(9u8)), + Error::::InvalidValue + ); + }); +} + +#[test] +fn do_set_fails_when_over_compile_time_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(12u16); + NetworksAdded::::insert(NetUid::from(12u16), true); // base subnet exists + + let too_big = MechId::from(MAX_MECHANISM_COUNT_PER_SUBNET + 1); + assert_noop!( + SubtensorModule::do_set_mechanism_count(netuid, too_big), + Error::::InvalidValue + ); + }); +} + +#[test] +fn update_mechanism_counts_decreases_and_cleans() { + new_test_ext(1).execute_with(|| { + let hotkey = U256::from(1); + + // Base subnet exists + let netuid = NetUid::from(42u16); + NetworksAdded::::insert(NetUid::from(42u16), true); + + // Choose counts so result is deterministic. + let old = MechId::from(3); + let desired = MechId::from(2u8); + MechanismCountCurrent::::insert(netuid, old); + + // Set non-default subnet emission split + MechanismEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + + // Seed data at a kept mecid (1) and a removed mecid (2) + let idx_keep = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1u8)); + let idx_rm3 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(2u8)); + + Weights::::insert(idx_keep, 0u16, vec![(1u16, 1u16)]); + Incentive::::insert(idx_keep, vec![1u16]); + LastUpdate::::insert(idx_keep, vec![123u64]); + Bonds::::insert(idx_keep, 0u16, vec![(1u16, 2u16)]); + WeightCommits::::insert( + idx_keep, + hotkey, + VecDeque::from([(sp_core::H256::zero(), 1u64, 2u64, 3u64)]), + ); + TimelockedWeightCommits::::insert( + idx_keep, + 1u64, + VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), + ); + + Weights::::insert(idx_rm3, 0u16, vec![(9u16, 9u16)]); + Incentive::::insert(idx_rm3, vec![9u16]); + LastUpdate::::insert(idx_rm3, vec![999u64]); + Bonds::::insert(idx_rm3, 0u16, vec![(9u16, 9u16)]); + WeightCommits::::insert( + idx_rm3, + hotkey, + VecDeque::from([(sp_core::H256::zero(), 1u64, 2u64, 3u64)]), + ); + TimelockedWeightCommits::::insert( + idx_rm3, + 1u64, + VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), + ); + + // Act + SubtensorModule::update_mechanism_counts_if_needed(netuid, desired); + + // New count is as desired + assert_eq!(MechanismCountCurrent::::get(netuid), desired); + + // Kept prefix intact + assert_eq!(Incentive::::get(idx_keep), vec![1u16]); + assert!(Weights::::iter_prefix(idx_keep).next().is_some()); + assert!(LastUpdate::::contains_key(idx_keep)); + assert!(Bonds::::iter_prefix(idx_keep).next().is_some()); + assert!(WeightCommits::::contains_key(idx_keep, hotkey)); + assert!(TimelockedWeightCommits::::contains_key( + idx_keep, 1u64 + )); + + // Removed prefix (mecid 3) cleared + assert!(Weights::::iter_prefix(idx_rm3).next().is_none()); + assert_eq!(Incentive::::get(idx_rm3), Vec::::new()); + assert!(!LastUpdate::::contains_key(idx_rm3)); + assert!(Bonds::::iter_prefix(idx_rm3).next().is_none()); + assert!(!WeightCommits::::contains_key(idx_rm3, hotkey)); + assert!(!TimelockedWeightCommits::::contains_key( + idx_rm3, 1u64 + )); + + // MechanismEmissionSplit is reset + assert!(MechanismEmissionSplit::::get(netuid).is_none()); + }); +} + +#[test] +fn update_mechanism_counts_increases() { + new_test_ext(1).execute_with(|| { + // Base subnet exists + let netuid = NetUid::from(42u16); + NetworksAdded::::insert(NetUid::from(42u16), true); + + // Choose counts + let old = MechId::from(1u8); + let desired = MechId::from(2u8); + MechanismCountCurrent::::insert(netuid, old); + + // Set non-default subnet emission split + MechanismEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + + // Act + SubtensorModule::update_mechanism_counts_if_needed(netuid, desired); + + // New count is as desired + assert_eq!(MechanismCountCurrent::::get(netuid), desired); + + // MechanismEmissionSplit is reset + assert!(MechanismEmissionSplit::::get(netuid).is_none()); + }); +} + +#[test] +fn split_emissions_even_division() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(5u16); + MechanismCountCurrent::::insert(netuid, MechId::from(5u8)); // 5 sub-subnets + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(25u64)); + assert_eq!(out, vec![AlphaCurrency::from(5u64); 5]); + }); +} + +#[test] +fn split_emissions_rounding_to_first() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(6u16); + MechanismCountCurrent::::insert(netuid, MechId::from(4u8)); // 4 sub-subnets + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(10u64)); // 10 / 4 = 2, rem=2 + assert_eq!( + out, + vec![ + AlphaCurrency::from(4u64), // 2 + remainder(2) + AlphaCurrency::from(2u64), + AlphaCurrency::from(2u64), + AlphaCurrency::from(2u64), + ] + ); + }); +} + +#[test] +fn split_emissions_fibbonacci() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(5u16); + MechanismCountCurrent::::insert(netuid, MechId::from(5u8)); // 5 sub-subnets + MechanismEmissionSplit::::insert(netuid, vec![3450, 6899, 10348, 17247, 27594]); + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(19u64)); + assert_eq!( + out, + vec![ + AlphaCurrency::from(1u64), + AlphaCurrency::from(2u64), + AlphaCurrency::from(3u64), + AlphaCurrency::from(5u64), + AlphaCurrency::from(8u64), + ] + ); + }); +} + +/// Seeds a 2-neuron and 2-mechanism subnet so `epoch_mechanism` produces non-zero +/// incentives & dividends. +/// Returns the sub-subnet storage index. +pub fn mock_epoch_state(netuid: NetUid, ck0: U256, hk0: U256, ck1: U256, hk1: U256) { + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); + + // Base subnet exists; 2 neurons. + NetworksAdded::::insert(NetUid::from(u16::from(netuid)), true); + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + SubnetworkN::::insert(netuid, 2); + + // Register two neurons (UID 0,1) → keys drive `get_subnetwork_n`. + Keys::::insert(netuid, 0u16, hk0); + Keys::::insert(netuid, 1u16, hk1); + + // Make both ACTIVE: recent updates & old registrations. + Tempo::::insert(netuid, 1u16); + ActivityCutoff::::insert(netuid, u16::MAX); // large cutoff keeps them active + LastUpdate::::insert(idx0, vec![2, 2]); + LastUpdate::::insert(idx1, vec![2, 2]); + BlockAtRegistration::::insert(netuid, 0, 1u64); // registered long ago + BlockAtRegistration::::insert(netuid, 1, 1u64); + + // Add stake + let stake_amount = AlphaCurrency::from(1_000_000_000); // 1 Alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk0, + &ck0, + netuid, + stake_amount, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + stake_amount, + ); + + // Non-zero stake above threshold; permit both as validators. + StakeThreshold::::put(0u64); + ValidatorPermit::::insert(netuid, vec![true, true]); + + // Simple weights, setting for each other on both mechanisms + Weights::::insert(idx0, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx0, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx1, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + + // Keep weight masking off for simplicity. + CommitRevealWeightsEnabled::::insert(netuid, false); + Yuma3On::::insert(netuid, false); +} + +pub fn mock_3_neurons(netuid: NetUid, hk: U256) { + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); + + SubnetworkN::::insert(netuid, 3); + Keys::::insert(netuid, 2u16, hk); + LastUpdate::::insert(idx0, vec![2, 2, 2]); + LastUpdate::::insert(idx1, vec![2, 2, 2]); + BlockAtRegistration::::insert(netuid, 2, 1u64); +} + +#[test] +fn epoch_with_mechanisms_produces_per_mechanism_incentive() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + SubtensorModule::epoch_with_mechanisms(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + let expected_incentive = 0xFFFF / 2; + assert_eq!(actual_incentive_sub0[0], expected_incentive); + assert_eq!(actual_incentive_sub0[1], expected_incentive); + assert_eq!(actual_incentive_sub1[0], expected_incentive); + assert_eq!(actual_incentive_sub1[1], expected_incentive); + }); +} + +#[test] +fn epoch_with_mechanisms_updates_bonds() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + + // Cause bonds to be asymmetric on diff mechanisms + Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0)]); + Weights::::insert(idx1, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + + SubtensorModule::epoch_with_mechanisms(netuid, emission); + + let bonds_uid0_sub0 = Bonds::::get(idx0, 0); + let bonds_uid1_sub0 = Bonds::::get(idx0, 1); + let bonds_uid0_sub1 = Bonds::::get(idx1, 0); + let bonds_uid1_sub1 = Bonds::::get(idx1, 1); + + // Mechanism 0: UID0 fully bonds to UID1, UID1 fully bonds to UID0 + assert_eq!(bonds_uid0_sub0, vec![(1, 65535)]); + assert_eq!(bonds_uid1_sub0, vec![(0, 65535)]); + + // Mechanism 1: UID0 no bond to UID1, UID1 fully bonds to UID0 + assert_eq!(bonds_uid0_sub1, vec![]); + assert_eq!(bonds_uid1_sub1, vec![(0, 65535)]); + }); +} + +#[test] +fn epoch_with_mechanisms_incentives_proportional_to_weights() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(6); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + + // Need 3 neurons for this: One validator that will be setting weights to 2 miners + ValidatorPermit::::insert(netuid, vec![true, false, false]); + + // Set greater weight to uid1 on sub-subnet 0 and to uid2 on mechanism 1 + Weights::::insert(idx0, 0, vec![(1u16, 0xFFFF / 5 * 4), (2u16, 0xFFFF / 5)]); + Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); + + SubtensorModule::epoch_with_mechanisms(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + + let expected_incentive_high = 0xFFFF / 5 * 4; + let expected_incentive_low = 0xFFFF / 5; + assert_abs_diff_eq!( + actual_incentive_sub0[1], + expected_incentive_high, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub0[2], + expected_incentive_low, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub1[1], + expected_incentive_low, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub1[2], + expected_incentive_high, + epsilon = 1 + ); + }); +} + +#[test] +fn epoch_with_mechanisms_persists_and_aggregates_all_terms() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); + + // Three neurons: validator (uid=0) + two miners (uid=1,2) + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(6); + let emission = AlphaCurrency::from(1_000_000_000u64); + + // Healthy minimal state and 3rd neuron + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + let uid0 = 0_usize; + let uid1 = 1_usize; + let uid2 = 2_usize; + + // Two sub-subnets with non-equal split (~25% / 75%) + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + let split0 = u16::MAX / 4; + let split1 = u16::MAX - split0; + MechanismEmissionSplit::::insert(netuid, vec![split0, split1]); + + // One validator; skew weights differently per sub-subnet + ValidatorPermit::::insert(netuid, vec![true, false, false]); + // sub 0: uid1 heavy, uid2 light + Weights::::insert( + idx0, + 0, + vec![(1u16, 0xFFFF / 5 * 3), (2u16, 0xFFFF / 5 * 2)], + ); + // sub 1: uid1 light, uid2 heavy + Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); + + // Per-sub emissions (and weights used for aggregation) + let mechanism_emissions = SubtensorModule::split_emissions(netuid, emission); + let w0 = U64F64::from_num(u64::from(mechanism_emissions[0])) + / U64F64::from_num(u64::from(emission)); + let w1 = U64F64::from_num(u64::from(mechanism_emissions[1])) + / U64F64::from_num(u64::from(emission)); + assert_abs_diff_eq!(w0.to_num::(), 0.25, epsilon = 0.0001); + assert_abs_diff_eq!(w1.to_num::(), 0.75, epsilon = 0.0001); + + // Get per-mechanism epoch outputs to build expectations + let out0 = + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), mechanism_emissions[0]); + let out1 = + SubtensorModule::epoch_mechanism(netuid, MechId::from(1), mechanism_emissions[1]); + + // Now run the real aggregated path (also persists terms) + let agg = SubtensorModule::epoch_with_mechanisms(netuid, emission); + + // hotkey -> (server_emission_u64, validator_emission_u64) + let agg_map: BTreeMap = agg + .into_iter() + .map(|(hk, se, ve)| (hk, (u64::from(se), u64::from(ve)))) + .collect(); + + // Helper to fetch per-sub terms by hotkey + let terms0 = |hk: &U256| out0.0.get(hk).unwrap(); + let terms1 = |hk: &U256| out1.0.get(hk).unwrap(); + + // Returned aggregated emissions match plain sums of mechanism emissions + for hk in [&hk1, &hk2] { + let (got_se, got_ve) = agg_map.get(hk).cloned().expect("present"); + let t0 = terms0(hk); + let t1 = terms1(hk); + let exp_se = (U64F64::saturating_from_num(u64::from(t0.server_emission)) + + U64F64::saturating_from_num(u64::from(t1.server_emission))) + .saturating_to_num::(); + let exp_ve = (U64F64::saturating_from_num(u64::from(t0.validator_emission)) + + U64F64::saturating_from_num(u64::from(t1.validator_emission))) + .saturating_to_num::(); + assert_abs_diff_eq!(u64::from(got_se), exp_se, epsilon = 1); + assert_abs_diff_eq!(u64::from(got_ve), exp_ve, epsilon = 1); + } + + // Persisted per-mechanism Incentive vectors match per-sub terms + let inc0 = Incentive::::get(idx0); + let inc1 = Incentive::::get(idx1); + let exp_inc0 = { + let mut v = vec![0u16; 3]; + v[terms0(&hk0).uid] = terms0(&hk0).incentive; + v[terms0(&hk1).uid] = terms0(&hk1).incentive; + v[terms0(&hk2).uid] = terms0(&hk2).incentive; + v + }; + let exp_inc1 = { + let mut v = vec![0u16; 3]; + v[terms1(&hk0).uid] = terms1(&hk0).incentive; + v[terms1(&hk1).uid] = terms1(&hk1).incentive; + v[terms1(&hk2).uid] = terms1(&hk2).incentive; + v + }; + for (a, e) in inc0.iter().zip(exp_inc0.iter()) { + assert_abs_diff_eq!(*a, *e, epsilon = 1); + } + for (a, e) in inc1.iter().zip(exp_inc1.iter()) { + assert_abs_diff_eq!(*a, *e, epsilon = 1); + } + + // Persisted Bonds for validator (uid0) exist and mirror per-sub terms + let b0 = Bonds::::get(idx0, 0u16); + let b1 = Bonds::::get(idx1, 0u16); + let exp_b0 = &terms0(&hk0).bond; + let exp_b1 = &terms1(&hk0).bond; + + assert!(!b0.is_empty(), "bonds sub0 empty"); + assert!(!b1.is_empty(), "bonds sub1 empty"); + assert_eq!(b0.len(), exp_b0.len()); + assert_eq!(b1.len(), exp_b1.len()); + for ((u_a, w_a), (u_e, w_e)) in b0.iter().zip(exp_b0.iter()) { + assert_eq!(u_a, u_e); + assert_abs_diff_eq!(*w_a, *w_e, epsilon = 1); + } + for ((u_a, w_a), (u_e, w_e)) in b1.iter().zip(exp_b1.iter()) { + assert_eq!(u_a, u_e); + assert_abs_diff_eq!(*w_a, *w_e, epsilon = 1); + } + + // Persisted subnet-level terms are weighted/OR aggregates of sub-subnets + // Fetch persisted vectors + let active = Active::::get(netuid); + let emission_v = Emission::::get(netuid); + let rank_v = Rank::::get(netuid); + let trust_v = Trust::::get(netuid); + let cons_v = Consensus::::get(netuid); + let div_v = Dividends::::get(netuid); + let prun_v = PruningScores::::get(netuid); + let vtrust_v = ValidatorTrust::::get(netuid); + let vperm_v = ValidatorPermit::::get(netuid); + + // Helpers for weighted u16 / u64 + let wu16 = |a: u16, b: u16| -> u16 { + (U64F64::saturating_from_num(a) * w0 + U64F64::saturating_from_num(b) * w1) + .saturating_to_num::() + }; + let wu64 = |a: u64, b: u64| -> u64 { + (U64F64::saturating_from_num(a) * w0 + U64F64::saturating_from_num(b) * w1) + .saturating_to_num::() + }; + + // For each UID, compute expected aggregate from out0/out1 terms + let check_uid = |uid: usize, hk: &U256| { + let t0 = terms0(hk); + let t1 = terms1(hk); + + // Active & ValidatorPermit are OR-aggregated + assert_eq!(active[uid], t0.active || t1.active); + assert_eq!( + vperm_v[uid], + t0.new_validator_permit || t1.new_validator_permit + ); + + // Emission (u64) + let exp_em = wu64(u64::from(t0.emission), u64::from(t1.emission)); + assert_abs_diff_eq!(u64::from(emission_v[uid]), exp_em, epsilon = 1); + + // u16 terms + assert_abs_diff_eq!(rank_v[uid], wu16(t0.rank, t1.rank), epsilon = 1); + assert_abs_diff_eq!(trust_v[uid], wu16(t0.trust, t1.trust), epsilon = 1); + assert_abs_diff_eq!(cons_v[uid], wu16(t0.consensus, t1.consensus), epsilon = 1); + assert_abs_diff_eq!(div_v[uid], wu16(t0.dividend, t1.dividend), epsilon = 1); + assert_abs_diff_eq!( + prun_v[uid], + wu16(t0.pruning_score, t1.pruning_score), + epsilon = 1 + ); + assert_abs_diff_eq!( + vtrust_v[uid], + wu16(t0.validator_trust, t1.validator_trust), + epsilon = 1 + ); + }; + + check_uid(uid0, &hk0); + check_uid(uid1, &hk1); + check_uid(uid2, &hk2); + }); +} + +#[test] +fn epoch_with_mechanisms_no_weight_no_incentive() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(5); // No weight miner + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + + // Need 3 neurons for this: One validator that will be setting weights to 2 miners + ValidatorPermit::::insert(netuid, vec![true, false, false]); + + // Set no weight to uid2 on sub-subnet 0 and 1 + Weights::::insert(idx0, 0, vec![(1u16, 1), (2u16, 0)]); + Weights::::insert(idx1, 0, vec![(1u16, 1), (2u16, 0)]); + + SubtensorModule::epoch_with_mechanisms(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + let expected_incentive = 0xFFFF; + assert_eq!(actual_incentive_sub0[0], 0); + assert_eq!(actual_incentive_sub0[1], expected_incentive); + assert_eq!(actual_incentive_sub0[2], 0); + assert_eq!(actual_incentive_sub1[0], 0); + assert_eq!(actual_incentive_sub1[1], expected_incentive); + assert_eq!(actual_incentive_sub1[2], 0); + assert_eq!(actual_incentive_sub0.len(), 3); + assert_eq!(actual_incentive_sub1.len(), 3); + }); +} + +#[test] +fn neuron_dereg_cleans_weights_across_subids() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(77u16); + let neuron_uid: u16 = 1; // we'll deregister UID=1 + // two sub-subnets + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + + // Setup initial map values + Emission::::insert( + netuid, + vec![ + AlphaCurrency::from(1u64), + AlphaCurrency::from(9u64), + AlphaCurrency::from(3u64), + ], + ); + Trust::::insert(netuid, vec![11u16, 99u16, 33u16]); + Consensus::::insert(netuid, vec![21u16, 88u16, 44u16]); + Dividends::::insert(netuid, vec![7u16, 77u16, 17u16]); + + // Clearing per-mecid maps + for sub in [0u8, 1u8] { + let idx = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(sub)); + + // Incentive vector: position 1 should become 0 + Incentive::::insert(idx, vec![10u16, 20u16, 30u16]); + + // Row set BY neuron_uid (to be removed) + Weights::::insert(idx, neuron_uid, vec![(0u16, 5u16)]); + Bonds::::insert(idx, neuron_uid, vec![(0u16, 6u16)]); + + // Rows FOR neuron_uid inside other validators' vecs => value should be set to 0 (not removed) + Weights::::insert(idx, 0u16, vec![(neuron_uid, 7u16), (42u16, 3u16)]); + Bonds::::insert(idx, 0u16, vec![(neuron_uid, 8u16), (42u16, 4u16)]); + } + + // Act + SubtensorModule::clear_neuron(netuid, neuron_uid); + + // Top-level zeroed at index 1, others intact + let e = Emission::::get(netuid); + assert_eq!(e[0], 1u64.into()); + assert_eq!(e[1], 0u64.into()); + assert_eq!(e[2], 3u64.into()); + + let t = Trust::::get(netuid); + assert_eq!(t, vec![11, 0, 33]); + + let c = Consensus::::get(netuid); + assert_eq!(c, vec![21, 0, 44]); + + let d = Dividends::::get(netuid); + assert_eq!(d, vec![7, 0, 17]); + + // Per-mecid cleanup + for sub in [0u8, 1u8] { + let idx = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(sub)); + + // Incentive element at index 1 set to 0 + let inc = Incentive::::get(idx); + assert_eq!(inc, vec![10, 0, 30]); + + // Rows BY neuron_uid removed + assert!(!Weights::::contains_key(idx, neuron_uid)); + assert!(!Bonds::::contains_key(idx, neuron_uid)); + + // In other rows, entries FOR neuron_uid are zeroed, others unchanged + let w0 = Weights::::get(idx, 0u16); + assert!(w0.iter().any(|&(u, w)| u == neuron_uid && w == 0)); + assert!(w0.iter().any(|&(u, w)| u == 42 && w == 3)); + } + }); +} + +#[test] +fn clear_neuron_handles_absent_rows_gracefully() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(55u16); + MechanismCountCurrent::::insert(netuid, MechId::from(1u8)); // single sub-subnet + + // Minimal vectors with non-zero at index 0 (we will clear UID=0) + Emission::::insert(netuid, vec![AlphaCurrency::from(5u64)]); + Trust::::insert(netuid, vec![5u16]); + Consensus::::insert(netuid, vec![6u16]); + Dividends::::insert(netuid, vec![7u16]); + + // No Weights/Bonds rows at all → function should not panic + let neuron_uid: u16 = 0; + SubtensorModule::clear_neuron(netuid, neuron_uid); + + // All zeroed at index 0 + assert_eq!( + Emission::::get(netuid), + vec![AlphaCurrency::from(0u64)] + ); + assert_eq!(Trust::::get(netuid), vec![0u16]); + assert_eq!(Consensus::::get(netuid), vec![0u16]); + assert_eq!(Dividends::::get(netuid), vec![0u16]); + }); +} + +#[test] +fn test_set_mechanism_weights_happy_path_sets_row_under_subid() { + new_test_ext(0).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network_disable_commit_reveal(netuid, tempo, 0); + + // Register validator (caller) and a destination neuron + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + let hk3 = U256::from(99); + let ck3 = U256::from(111); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).expect("caller uid"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).expect("dest uid 1"); + let uid3 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk3).expect("dest uid 2"); + + // Make caller a permitted validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); + + // Have at least two sub-subnets; write under mecid = 1 + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + let mecid = MechId::from(1u8); + + // Call extrinsic + let dests = vec![uid2, uid3]; + let weights = vec![88u16, 0xFFFF]; + assert_ok!(SubtensorModule::set_mechanism_weights( + RawOrigin::Signed(hk1).into(), + netuid, + mecid, + dests.clone(), + weights.clone(), + 0, // version_key + )); + + // Verify row exists under the chosen mecid and not under a different mecid + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, mecid); + assert_eq!( + Weights::::get(idx1, uid1), + vec![(uid2, 88u16), (uid3, 0xFFFF)] + ); + + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0u8)); + assert!(Weights::::get(idx0, uid1).is_empty()); + }); +} + +#[test] +fn test_set_mechanism_weights_above_mechanism_count_fails() { + new_test_ext(0).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network_disable_commit_reveal(netuid, tempo, 0); + + // Register validator (caller) and a destination neuron + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).expect("caller uid"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).expect("dest uid 1"); + + // Make caller a permitted validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); + + // Have exactly two sub-subnets; write under mecid = 1 + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + let subid_above = MechId::from(2u8); + + // Call extrinsic + let dests = vec![uid2]; + let weights = vec![88u16]; + assert_noop!( + SubtensorModule::set_mechanism_weights( + RawOrigin::Signed(hk1).into(), + netuid, + subid_above, + dests.clone(), + weights.clone(), + 0, // version_key + ), + Error::::MechanismDoesNotExist + ); + }); +} + +#[test] +fn test_commit_reveal_mechanism_weights_ok() { + new_test_ext(1).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network(netuid, tempo, 0); + + // Three neurons: validator (caller) + two destinations + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + let hk3 = U256::from(99); + let ck3 = U256::from(111); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).unwrap(); // caller + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).unwrap(); + let uid3 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk3).unwrap(); + + // Enable commit-reveal path and make caller a validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); + + // Ensure sub-subnet exists; write under mecid = 1 + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + let mecid = MechId::from(1u8); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0u8)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, mecid); + + // Prepare payload and commit hash (include mecid!) + let dests = vec![uid2, uid3]; + let weights = vec![88u16, 0xFFFFu16]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hk1, + idx1, + dests.clone(), + weights.clone(), + salt.clone(), + version_key, + )); + + // Commit in epoch 0 + assert_ok!(SubtensorModule::commit_mechanism_weights( + RuntimeOrigin::signed(hk1), + netuid, + mecid, + commit_hash + )); + + // Advance one epoch, then reveal + step_epochs(1, netuid); + assert_ok!(SubtensorModule::reveal_mechanism_weights( + RuntimeOrigin::signed(hk1), + netuid, + mecid, + dests.clone(), + weights.clone(), + salt, + version_key + )); + + // Verify weights stored under the chosen mecid (normalized keeps max=0xFFFF here) + assert_eq!( + Weights::::get(idx1, uid1), + vec![(uid2, 88u16), (uid3, 0xFFFFu16)] + ); + + // And not under a different mecid + assert!(Weights::::get(idx0, uid1).is_empty()); + }); +} + +#[test] +fn test_commit_reveal_above_mechanism_count_fails() { + new_test_ext(1).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network(netuid, tempo, 0); + + // Two neurons: validator (caller) + miner + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).unwrap(); // caller + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).unwrap(); + + // Enable commit-reveal path and make caller a validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); + + // Ensure there are two mechanisms: 0 and 1 + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + let subid_above = MechId::from(2u8); // non-existing sub-subnet + let idx2 = SubtensorModule::get_mechanism_storage_index(netuid, subid_above); + + // Prepare payload and commit hash + let dests = vec![uid2]; + let weights = vec![88u16]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hk1, + idx2, + dests.clone(), + weights.clone(), + salt.clone(), + version_key, + )); + + // Commit in epoch 0 + assert_noop!( + SubtensorModule::commit_mechanism_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid_above, + commit_hash + ), + Error::::MechanismDoesNotExist + ); + + // Advance one epoch, then attempt to reveal + step_epochs(1, netuid); + assert_noop!( + SubtensorModule::reveal_mechanism_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid_above, + dests.clone(), + weights.clone(), + salt, + version_key + ), + Error::::NoWeightsCommitFound + ); + + // Verify that weights didn't update + assert!(Weights::::get(idx2, uid1).is_empty()); + assert!(Weights::::get(idx2, uid2).is_empty()); + }); +} + +#[test] +fn test_reveal_crv3_commits_sub_success() { + new_test_ext(100).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let mecid = MechId::from(1u8); // write under sub-subnet #1 + let hotkey1: AccountId = U256::from(1); + let hotkey2: AccountId = U256::from(2); + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + // ensure we actually have mecid=1 available + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + + // Register neurons and set up configs + register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); + register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1).expect("uid1"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey2).expect("uid2"); + + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_validator_permit_for_uid(netuid, uid2, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(3), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(4), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey1, &U256::from(3), netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &U256::from(4), netuid, 1.into()); + + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Payload (same as legacy; mecid is provided to the extrinsic) + let payload = WeightsTlockPayload { + hotkey: hotkey1.encode(), + values: vec![10, 20], + uids: vec![uid1, uid2], + version_key, + }; + let serialized_payload = payload.encode(); + + // Public key + encrypt + let esk = [2; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + let pk_bytes = hex::decode("83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a").unwrap(); + let pub_key = ::PublicKeyGroup::deserialize_compressed(&*pk_bytes).unwrap(); + + let message = { + let mut hasher = sha2::Sha256::new(); + hasher.update(reveal_round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![message]); + + let ct = tle::(pub_key, esk, &serialized_payload, identity, rng).expect("encrypt"); + let mut commit_bytes = Vec::new(); + ct.serialize_compressed(&mut commit_bytes).expect("serialize"); + + // Commit (sub variant) + assert_ok!(SubtensorModule::commit_timelocked_mechanism_weights( + RuntimeOrigin::signed(hotkey1), + netuid, + mecid, + commit_bytes.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // Inject drand pulse for the reveal round + let sig_bytes = hex::decode("b44679b9a59af2ec876b1a6b1ad52ea9b1615fc3982b19576350f93447cb1125e342b73a8dd2bacbe47e4b6b63ed5e39").unwrap(); + pallet_drand::Pulses::::insert( + reveal_round, + Pulse { + round: reveal_round, + randomness: vec![0; 32].try_into().unwrap(), + signature: sig_bytes.try_into().unwrap(), + }, + ); + + // Run epochs so the commit is processed + step_epochs(3, netuid); + + // Verify weights applied under the selected mecid index + let idx = SubtensorModule::get_mechanism_storage_index(netuid, mecid); + let weights_sparse = SubtensorModule::get_weights_sparse(idx); + let row = weights_sparse.get(uid1 as usize).cloned().unwrap_or_default(); + assert!(!row.is_empty(), "expected weights set for validator uid1 under mecid"); + + // Compare rounded normalized weights to expected proportions (like legacy test) + let expected: Vec<(u16, I32F32)> = payload.uids.iter().zip(payload.values.iter()).map(|(&u,&v)|(u, I32F32::from_num(v))).collect(); + let total: I32F32 = row.iter().map(|(_, w)| *w).sum(); + let normalized: Vec<(u16, I32F32)> = row.iter().map(|&(u,w)| (u, w * I32F32::from_num(30) / total)).collect(); + + for ((ua, wa), (ub, wb)) in normalized.iter().zip(expected.iter()) { + assert_eq!(ua, ub); + let actual = wa.to_num::().round() as i64; + let expect = wb.to_num::(); + assert_ne!(actual, 0, "actual weight for uid {ua} is zero"); + assert_eq!(actual, expect, "weight mismatch for uid {ua}"); + } + }); +} + +#[test] +fn test_crv3_above_mechanism_count_fails() { + new_test_ext(100).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let subid_above = MechId::from(2u8); // non-existing sub-subnet + let hotkey1: AccountId = U256::from(1); + let hotkey2: AccountId = U256::from(2); + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + // ensure we actually have mecid=1 available + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + + // Register neurons and set up configs + register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); + register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1).expect("uid1"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey2).expect("uid2"); + + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(3), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey1, &U256::from(3), netuid, 1.into()); + + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Payload (same as legacy; mecid is provided to the extrinsic) + let payload = WeightsTlockPayload { + hotkey: hotkey1.encode(), + values: vec![10, 20], + uids: vec![uid1, uid2], + version_key, + }; + let serialized_payload = payload.encode(); + + // Public key + encrypt + let esk = [2; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + let pk_bytes = hex::decode("83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a").unwrap(); + let pub_key = ::PublicKeyGroup::deserialize_compressed(&*pk_bytes).unwrap(); + + let message = { + let mut hasher = sha2::Sha256::new(); + hasher.update(reveal_round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![message]); + + let ct = tle::(pub_key, esk, &serialized_payload, identity, rng).expect("encrypt"); + let mut commit_bytes = Vec::new(); + ct.serialize_compressed(&mut commit_bytes).expect("serialize"); + + // Commit (sub variant) + assert_noop!( + SubtensorModule::commit_timelocked_mechanism_weights( + RuntimeOrigin::signed(hotkey1), + netuid, + subid_above, + commit_bytes.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::MechanismDoesNotExist + ); + }); +} + +#[test] +fn test_do_commit_crv3_mechanism_weights_committing_too_fast() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let mecid = MechId::from(1u8); + let hotkey: AccountId = U256::from(1); + let commit_data_1: Vec = vec![1, 2, 3]; + let commit_data_2: Vec = vec![4, 5, 6]; + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); // allow subids {0,1} + + register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("uid"); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, mecid); + SubtensorModule::set_last_update_for_uid(idx1, uid, 0); + + // make validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &U256::from(2), + netuid, + 1.into(), + ); + + // first commit OK on mecid=1 + assert_ok!(SubtensorModule::commit_timelocked_mechanism_weights( + RuntimeOrigin::signed(hotkey), + netuid, + mecid, + commit_data_1.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // immediate second commit on SAME mecid blocked + assert_noop!( + SubtensorModule::commit_timelocked_mechanism_weights( + RuntimeOrigin::signed(hotkey), + netuid, + mecid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::CommittingWeightsTooFast + ); + + // BUT committing too soon on a DIFFERENT mecid is allowed + let other_subid = MechId::from(0u8); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, other_subid); + SubtensorModule::set_last_update_for_uid(idx0, uid, 0); // baseline like above + assert_ok!(SubtensorModule::commit_timelocked_mechanism_weights( + RuntimeOrigin::signed(hotkey), + netuid, + other_subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // still too fast on original mecid after 2 blocks + step_block(2); + assert_noop!( + SubtensorModule::commit_timelocked_mechanism_weights( + RuntimeOrigin::signed(hotkey), + netuid, + mecid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::CommittingWeightsTooFast + ); + + // after enough blocks, OK again on original mecid + step_block(3); + assert_ok!(SubtensorModule::commit_timelocked_mechanism_weights( + RuntimeOrigin::signed(hotkey), + netuid, + mecid, + commit_data_2.try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + }); +} + +#[test] +fn epoch_mechanism_emergency_mode_distributes_by_stake() { + new_test_ext(1).execute_with(|| { + // setup a single sub-subnet where consensus sum becomes 0 + let netuid = NetUid::from(1u16); + let mecid = MechId::from(1u8); + let idx = SubtensorModule::get_mechanism_storage_index(netuid, mecid); + let tempo: u16 = 5; + add_network(netuid, tempo, 0); + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); // allow subids {0,1} + SubtensorModule::set_max_registrations_per_block(netuid, 4); + SubtensorModule::set_target_registrations_per_interval(netuid, 4); + + // three neurons: make ALL permitted validators so active_stake is non-zero + let hk0 = U256::from(10); + let ck0 = U256::from(11); + let hk1 = U256::from(20); + let ck1 = U256::from(21); + let hk2 = U256::from(30); + let ck2 = U256::from(31); + let hk3 = U256::from(40); // miner + let ck3 = U256::from(41); + register_ok_neuron(netuid, hk0, ck0, 0); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + // active + recent updates so they're all active + let now = SubtensorModule::get_current_block_as_u64(); + ActivityCutoff::::insert(netuid, 1_000u16); + LastUpdate::::insert(idx, vec![now, now, now, now]); + + // All staking validators permitted => active_stake = stake + ValidatorPermit::::insert(netuid, vec![true, true, true, false]); + SubtensorModule::set_stake_threshold(0); + + // force ZERO consensus/incentive path: no weights/bonds + // (leave Weights/Bonds empty for all rows on this sub-subnet) + + // stake proportions: uid0:uid1:uid2 = 10:30:60 + SubtensorModule::add_balance_to_coldkey_account(&ck0, 10); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 30); + SubtensorModule::add_balance_to_coldkey_account(&ck2, 60); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk0, + &ck0, + netuid, + AlphaCurrency::from(10), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + AlphaCurrency::from(30), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk2, + &ck2, + netuid, + AlphaCurrency::from(60), + ); + + let emission = AlphaCurrency::from(1_000_000u64); + + // --- act: run epoch on this sub-subnet only --- + let out = SubtensorModule::epoch_mechanism(netuid, mecid, emission); + + // collect validator emissions per hotkey + let t0 = out.0.get(&hk0).unwrap(); + let t1 = out.0.get(&hk1).unwrap(); + let t2 = out.0.get(&hk2).unwrap(); + let t3 = out.0.get(&hk3).unwrap(); + + // In emergency mode (consensus sum == 0): + // - validator_emission is distributed by (active) stake proportions + // - server_emission remains zero (incentive path is zero) + assert_eq!(u64::from(t0.server_emission), 0); + assert_eq!(u64::from(t1.server_emission), 0); + assert_eq!(u64::from(t2.server_emission), 0); + assert_eq!(u64::from(t3.server_emission), 0); + + // expected splits by stake: 10%, 30%, 60% of total emission + let e = u64::from(emission); + let exp0 = e / 10; // 10% + let exp1 = e * 3 / 10; // 30% + let exp2 = e * 6 / 10; // 60% + + // allow tiny rounding drift from fixed-point conversions + assert_abs_diff_eq!(u64::from(t0.validator_emission), exp0, epsilon = 2); + assert_abs_diff_eq!(u64::from(t1.validator_emission), exp1, epsilon = 2); + assert_abs_diff_eq!(u64::from(t2.validator_emission), exp2, epsilon = 2); + assert_eq!(u64::from(t3.validator_emission), 0); + + // all emission goes to validators + assert_abs_diff_eq!( + u64::from(t0.validator_emission) + + u64::from(t1.validator_emission) + + u64::from(t2.validator_emission), + e, + epsilon = 2 + ); + }); +} diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index d67b86e42d..816c87837e 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -22,7 +22,7 @@ use sp_io::hashing::twox_128; use sp_runtime::traits::Zero; use substrate_fixed::types::I96F32; use substrate_fixed::types::extra::U2; -use subtensor_runtime_common::TaoCurrency; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; #[allow(clippy::arithmetic_side_effects)] fn close(value: u64, target: u64, eps: u64) { @@ -1263,10 +1263,17 @@ fn test_migrate_crv3_commits_add_block() { let old_queue: VecDeque<_> = VecDeque::from(vec![(who, ciphertext.clone(), round)]); - CRV3WeightCommits::::insert(netuid, epoch, old_queue.clone()); + CRV3WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + epoch, + old_queue.clone(), + ); // Sanity: entry decodes under old alias - assert_eq!(CRV3WeightCommits::::get(netuid, epoch), old_queue); + assert_eq!( + CRV3WeightCommits::::get(NetUidStorageIndex::from(netuid), epoch), + old_queue + ); assert!( !HasMigrationRun::::get(MIG_NAME.to_vec()), @@ -1291,11 +1298,11 @@ fn test_migrate_crv3_commits_add_block() { // Old storage must be empty (drained) assert!( - CRV3WeightCommits::::get(netuid, epoch).is_empty(), + CRV3WeightCommits::::get(NetUidStorageIndex::from(netuid), epoch).is_empty(), "old queue should have been drained" ); - let new_q = CRV3WeightCommitsV2::::get(netuid, epoch); + let new_q = CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch); assert_eq!(new_q.len(), 1, "exactly one migrated element expected"); let (who2, commit_block, cipher2, round2) = new_q.front().cloned().unwrap(); @@ -1518,18 +1525,23 @@ fn test_migrate_crv3_v2_to_timelocked() { VecDeque::from(vec![(who, commit_block, ciphertext.clone(), round)]); // Insert under the deprecated alias - CRV3WeightCommitsV2::::insert(netuid, epoch, old_queue.clone()); + CRV3WeightCommitsV2::::insert( + NetUidStorageIndex::from(netuid), + epoch, + old_queue.clone(), + ); // Sanity: entry decodes under old alias assert_eq!( - CRV3WeightCommitsV2::::get(netuid, epoch), + CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch), old_queue, "pre-migration: old queue should be present" ); // Destination should be empty pre-migration assert!( - TimelockedWeightCommits::::get(netuid, epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), epoch) + .is_empty(), "pre-migration: destination should be empty" ); @@ -1556,12 +1568,12 @@ fn test_migrate_crv3_v2_to_timelocked() { // Old storage must be empty (drained) assert!( - CRV3WeightCommitsV2::::get(netuid, epoch).is_empty(), + CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch).is_empty(), "old queue should have been drained" ); // New storage must match exactly - let new_q = TimelockedWeightCommits::::get(netuid, epoch); + let new_q = TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), epoch); assert_eq!( new_q, old_queue, "migrated queue must exactly match the old queue" @@ -1692,3 +1704,239 @@ fn test_migrate_remove_network_modality_already_run() { )); }); } + +#[test] +fn test_migrate_subnet_limit_to_default() { + new_test_ext(1).execute_with(|| { + // ------------------------------ + // 0. Constants / helpers + // ------------------------------ + const MIG_NAME: &[u8] = b"subnet_limit_to_default"; + + // Compute a non-default value safely + let default: u16 = DefaultSubnetLimit::::get(); + let not_default: u16 = default.wrapping_add(1); + + // ------------------------------ + // 1. Pre-state: ensure a non-default value is stored + // ------------------------------ + SubnetLimit::::put(not_default); + assert_eq!( + SubnetLimit::::get(), + not_default, + "precondition failed: SubnetLimit should be non-default before migration" + ); + + assert!( + !HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag should be false before run" + ); + + // ------------------------------ + // 2. Run migration + // ------------------------------ + let w = crate::migrations::migrate_subnet_limit_to_default::migrate_subnet_limit_to_default::(); + assert!(!w.is_zero(), "weight must be non-zero"); + + // ------------------------------ + // 3. Verify results + // ------------------------------ + assert!( + HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag not set" + ); + + assert_eq!( + SubnetLimit::::get(), + default, + "SubnetLimit should be reset to the configured default" + ); + }); +} + +#[test] +fn test_migrate_network_lock_reduction_interval_and_decay() { + new_test_ext(0).execute_with(|| { + const FOUR_DAYS: u64 = 28_800; + const EIGHT_DAYS: u64 = 57_600; + const ONE_WEEK_BLOCKS: u64 = 50_400; + + // ── pre ────────────────────────────────────────────────────────────── + assert!( + !HasMigrationRun::::get(b"migrate_network_lock_reduction_interval".to_vec()), + "HasMigrationRun should be false before migration" + ); + + // ensure current_block > 0 + step_block(1); + let current_block_before = Pallet::::get_current_block_as_u64(); + + // ── run migration ──────────────────────────────────────────────────── + let weight = crate::migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::(); + assert!(!weight.is_zero(), "migration weight should be > 0"); + + // ── params & flags ─────────────────────────────────────────────────── + assert_eq!(NetworkLockReductionInterval::::get(), EIGHT_DAYS); + assert_eq!(NetworkRateLimit::::get(), FOUR_DAYS); + assert_eq!( + Pallet::::get_network_last_lock(), + 1_000_000_000_000u64.into(), // 1000 TAO in rao + "last_lock should be 1_000_000_000_000 rao" + ); + + // last_lock_block should be set one week in the future + let last_lock_block = Pallet::::get_network_last_lock_block(); + let expected_block = current_block_before.saturating_add(ONE_WEEK_BLOCKS); + assert_eq!( + last_lock_block, + expected_block, + "last_lock_block should be current + ONE_WEEK_BLOCKS" + ); + + // registration start block should match the same future block + assert_eq!( + NetworkRegistrationStartBlock::::get(), + expected_block, + "NetworkRegistrationStartBlock should equal last_lock_block" + ); + + // lock cost should be 2000 TAO immediately after migration + let lock_cost_now = Pallet::::get_network_lock_cost(); + assert_eq!( + lock_cost_now, + 2_000_000_000_000u64.into(), + "lock cost should be 2000 TAO right after migration" + ); + + assert!( + HasMigrationRun::::get(b"migrate_network_lock_reduction_interval".to_vec()), + "HasMigrationRun should be true after migration" + ); + }); +} + +#[test] +fn test_migrate_restore_subnet_locked_65_128() { + use sp_runtime::traits::SaturatedConversion; + new_test_ext(0).execute_with(|| { + let name = b"migrate_restore_subnet_locked".to_vec(); + assert!( + !HasMigrationRun::::get(name.clone()), + "HasMigrationRun should be false before migration" + ); + + // Expected snapshot for netuids 65..128. + const EXPECTED: &[(u16, u64)] = &[ + (65, 37_274_536_408), + (66, 65_230_444_016), + (67, 114_153_284_032), + (68, 199_768_252_064), + (69, 349_594_445_728), + (70, 349_412_366_216), + (71, 213_408_488_702), + (72, 191_341_473_067), + (73, 246_711_333_592), + (74, 291_874_466_228), + (75, 247_485_227_056), + (76, 291_241_991_316), + (77, 303_154_601_714), + (78, 287_407_417_932), + (79, 254_935_051_664), + (80, 255_413_055_349), + (81, 249_790_431_509), + (82, 261_343_249_180), + (83, 261_361_408_796), + (84, 201_938_003_214), + (85, 264_805_234_604), + (86, 223_171_973_880), + (87, 180_397_358_280), + (88, 270_596_039_760), + (89, 286_399_608_951), + (90, 267_684_201_301), + (91, 284_637_542_762), + (92, 288_373_410_868), + (93, 290_836_604_849), + (94, 270_861_792_144), + (95, 210_595_055_304), + (96, 315_263_727_200), + (97, 158_244_884_792), + (98, 168_102_223_900), + (99, 252_153_339_800), + (100, 378_230_014_000), + (101, 205_977_765_866), + (102, 149_434_017_849), + (103, 135_476_471_008), + (104, 147_970_415_680), + (105, 122_003_668_139), + (106, 133_585_556_570), + (107, 200_137_144_216), + (108, 106_767_623_816), + (109, 124_280_483_748), + (110, 186_420_726_696), + (111, 249_855_564_892), + (112, 196_761_272_984), + (113, 147_120_048_727), + (114, 84_021_895_534), + (115, 98_002_215_656), + (116, 89_944_262_256), + (117, 107_183_582_952), + (118, 110_644_724_664), + (119, 99_380_483_902), + (120, 138_829_019_156), + (121, 111_988_743_976), + (122, 130_264_686_152), + (123, 118_034_291_488), + (124, 79_312_501_676), + (125, 43_214_310_704), + (126, 64_755_449_962), + (127, 97_101_698_382), + (128, 145_645_807_991), + ]; + + // Run migration + let weight = + crate::migrations::migrate_subnet_locked::migrate_restore_subnet_locked::(); + assert!(!weight.is_zero(), "migration weight should be > 0"); + + // Read back storage as (u16 -> u64) + let actual: BTreeMap = SubnetLocked::::iter() + .map(|(k, v)| (k.saturated_into::(), u64::from(v))) + .collect(); + + let expected: BTreeMap = EXPECTED.iter().copied().collect(); + + // 1) exact content + assert_eq!( + actual, expected, + "SubnetLocked map mismatch for 65..128 snapshot" + ); + + // 2) count and total + let expected_len = expected.len(); + let expected_sum: u128 = expected.values().map(|v| *v as u128).sum(); + + let count_after = actual.len(); + let sum_after: u128 = actual.values().map(|v| *v as u128).sum(); + + assert_eq!(count_after, expected_len, "entry count mismatch"); + assert_eq!(sum_after, expected_sum, "total RAO sum mismatch"); + + // 3) migration flag set + assert!( + HasMigrationRun::::get(name.clone()), + "HasMigrationRun should be true after migration" + ); + + // 4) idempotence + let before = actual.clone(); + let _again = + crate::migrations::migrate_subnet_locked::migrate_restore_subnet_locked::(); + let after: BTreeMap = SubnetLocked::::iter() + .map(|(k, v)| (k.saturated_into::(), u64::from(v))) + .collect(); + assert_eq!( + before, after, + "re-running the migration should not change storage" + ); + }); +} diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 28da96687a..1aab19e859 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -19,7 +19,7 @@ use pallet_collective::MemberCount; use sp_core::{ConstU64, Get, H256, U256, offchain::KeyTypeId}; use sp_runtime::Perbill; use sp_runtime::{ - BuildStorage, + BuildStorage, Percent, traits::{BlakeTwo256, IdentityLookup}, }; use sp_std::{cell::RefCell, cmp::Ordering}; @@ -164,7 +164,8 @@ parameter_types! { pub const InitialTempo: u16 = 360; pub const SelfOwnership: u64 = 2; pub const InitialImmunityPeriod: u16 = 2; - pub const InitialMaxAllowedUids: u16 = 2; + pub const InitialMinAllowedUids: u16 = 2; + pub const InitialMaxAllowedUids: u16 = 4; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty:u16 = u16::MAX; pub const InitialBondsResetOn: bool = false; @@ -201,8 +202,7 @@ parameter_types! { pub const InitialMaxDifficulty: u64 = u64::MAX; pub const InitialRAORecycledForRegistration: u64 = 0; pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake - pub const InitialNetworkImmunityPeriod: u64 = 7200 * 7; - pub const InitialNetworkMinAllowedUids: u16 = 128; + pub const InitialNetworkImmunityPeriod: u64 = 1_296_000; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. pub const InitialNetworkLockReductionInterval: u64 = 2; // 2 blocks. @@ -223,6 +223,7 @@ parameter_types! { pub const HotkeySwapOnSubnetInterval: u64 = 15; // 15 block, should be bigger than subnet number, then trigger clean up for all subnets pub const MaxContributorsPerLeaseToRemove: u32 = 3; pub const LeaseDividendsDistributionInterval: u32 = 100; + pub const MaxImmuneUidsPercentage: Percent = Percent::from_percent(80); } // Configure collective pallet for council @@ -405,6 +406,7 @@ impl crate::Config for Test { type InitialRho = InitialRho; type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; + type InitialMinAllowedUids = InitialMinAllowedUids; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; type InitialScalingLawPower = InitialScalingLawPower; @@ -436,7 +438,6 @@ impl crate::Config for Test { type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; type InitialSenateRequiredStakePercentage = InitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; - type InitialNetworkMinAllowedUids = InitialNetworkMinAllowedUids; type InitialNetworkMinLockCost = InitialNetworkMinLockCost; type InitialSubnetOwnerCut = InitialSubnetOwnerCut; type InitialNetworkLockReductionInterval = InitialNetworkLockReductionInterval; @@ -459,6 +460,8 @@ impl crate::Config for Test { type ProxyInterface = FakeProxier; type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; type GetCommitments = (); + type MaxImmuneUidsPercentage = MaxImmuneUidsPercentage; + type CommitmentsInterface = CommitmentsI; } // Swap-related parameter types @@ -490,6 +493,11 @@ impl PrivilegeCmp for OriginPrivilegeCmp { } } +pub struct CommitmentsI; +impl CommitmentsInterface for CommitmentsI { + fn purge_netuid(_netuid: NetUid) {} +} + parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; @@ -948,7 +956,7 @@ pub fn mock_set_children_no_epochs(netuid: NetUid, parent: &U256, child_vec: &[( #[allow(dead_code)] pub fn step_rate_limit(transaction_type: &TransactionType, netuid: NetUid) { // Check rate limit - let limit = SubtensorModule::get_rate_limit_on_subnet(transaction_type, netuid); + let limit = transaction_type.rate_limit_on_subnet::(netuid); // Step that many blocks step_block(limit as u16); diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index b743d7c1ff..b9f4ff5366 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -5,10 +5,12 @@ mod consensus; mod delegate_info; mod difficulty; mod emission; +mod ensure; mod epoch; mod evm; mod leasing; mod math; +mod mechanism; mod migration; mod mock; mod move_stake; diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 4f0634c64c..42de84f54f 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -1,9 +1,13 @@ use super::mock::*; +use crate::migrations::migrate_network_immunity_period; use crate::*; -use frame_support::assert_ok; +use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::U256; -use subtensor_runtime_common::TaoCurrency; +use sp_std::collections::btree_map::BTreeMap; +use substrate_fixed::types::{I96F32, U64F64, U96F32}; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; +use subtensor_swap_interface::{OrderType, SwapHandler}; #[test] fn test_registration_ok() { @@ -33,15 +37,1257 @@ fn test_registration_ok() { coldkey_account_id )); - assert_ok!(SubtensorModule::user_remove_network( - coldkey_account_id, - netuid - )); + assert_ok!(SubtensorModule::do_dissolve_network(netuid)); assert!(!SubtensorModule::if_subnet_exist(netuid)) }) } +#[test] +fn dissolve_no_stakers_no_alpha_no_emission() { + new_test_ext(0).execute_with(|| { + let cold = U256::from(1); + let hot = U256::from(2); + let net = add_dynamic_network(&hot, &cold); + + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); + SubnetTAO::::insert(net, TaoCurrency::from(0)); + Emission::::insert(net, Vec::::new()); + + let before = SubtensorModule::get_coldkey_balance(&cold); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&cold); + + // Balance should be unchanged (whatever the network-lock bookkeeping left there) + assert_eq!(after, before); + assert!(!SubtensorModule::if_subnet_exist(net)); + }); +} + +#[test] +fn dissolve_refunds_full_lock_cost_when_no_emission() { + new_test_ext(0).execute_with(|| { + let cold = U256::from(3); + let hot = U256::from(4); + let net = add_dynamic_network(&hot, &cold); + + let lock: TaoCurrency = TaoCurrency::from(1_000_000); + SubtensorModule::set_subnet_locked_balance(net, lock); + SubnetTAO::::insert(net, TaoCurrency::from(0)); + Emission::::insert(net, Vec::::new()); + + let before = SubtensorModule::get_coldkey_balance(&cold); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&cold); + + assert_eq!(TaoCurrency::from(after), TaoCurrency::from(before) + lock); + }); +} + +#[test] +fn dissolve_single_alpha_out_staker_gets_all_tao() { + new_test_ext(0).execute_with(|| { + // 1. Owner & subnet + let owner_cold = U256::from(10); + let owner_hot = U256::from(20); + let net = add_dynamic_network(&owner_hot, &owner_cold); + + // 2. Single α-out staker + let (s_hot, s_cold) = (U256::from(100), U256::from(200)); + Alpha::::insert((s_hot, s_cold, net), U64F64::from_num(5_000u128)); + + // Entire TAO pot should be paid to staker's cold-key + let pot: u64 = 99_999; + SubnetTAO::::insert(net, TaoCurrency::from(pot)); + SubtensorModule::set_subnet_locked_balance(net, 0.into()); + + // Cold-key balance before + let before = SubtensorModule::get_coldkey_balance(&s_cold); + + // Dissolve + assert_ok!(SubtensorModule::do_dissolve_network(net)); + + // Cold-key received full pot + let after = SubtensorModule::get_coldkey_balance(&s_cold); + assert_eq!(after, before + pot); + + // No α entries left for dissolved subnet + assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != net)); + assert!(!SubnetTAO::::contains_key(net)); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn dissolve_two_stakers_pro_rata_distribution() { + new_test_ext(0).execute_with(|| { + // Subnet + two stakers + let oc = U256::from(50); + let oh = U256::from(51); + let net = add_dynamic_network(&oh, &oc); + + let (s1_hot, s1_cold, a1) = (U256::from(201), U256::from(301), 300u128); + let (s2_hot, s2_cold, a2) = (U256::from(202), U256::from(302), 700u128); + + Alpha::::insert((s1_hot, s1_cold, net), U64F64::from_num(a1)); + Alpha::::insert((s2_hot, s2_cold, net), U64F64::from_num(a2)); + + let pot: u64 = 10_000; + SubnetTAO::::insert(net, TaoCurrency::from(pot)); + SubtensorModule::set_subnet_locked_balance(net, 5_000.into()); // owner refund path present but emission = 0 + + // Cold-key balances before + let s1_before = SubtensorModule::get_coldkey_balance(&s1_cold); + let s2_before = SubtensorModule::get_coldkey_balance(&s2_cold); + let owner_before = SubtensorModule::get_coldkey_balance(&oc); + + // Expected τ shares with largest remainder + let total = a1 + a2; + let prod1 = a1 * (pot as u128); + let prod2 = a2 * (pot as u128); + let share1 = (prod1 / total) as u64; + let share2 = (prod2 / total) as u64; + let mut distributed = share1 + share2; + let mut rem = [(s1_cold, prod1 % total), (s2_cold, prod2 % total)]; + if distributed < pot { + rem.sort_by_key(|&(_c, r)| core::cmp::Reverse(r)); + let leftover = pot - distributed; + for _ in 0..leftover as usize { + distributed += 1; + } + } + // Recompute exact expected shares using the same logic + let mut expected1 = share1; + let mut expected2 = share2; + if share1 + share2 < pot { + rem.sort_by_key(|&(_c, r)| core::cmp::Reverse(r)); + if rem[0].0 == s1_cold { + expected1 += 1; + } else { + expected2 += 1; + } + } + + // Dissolve + assert_ok!(SubtensorModule::do_dissolve_network(net)); + + // Cold-keys received their τ shares + assert_eq!( + SubtensorModule::get_coldkey_balance(&s1_cold), + s1_before + expected1 + ); + assert_eq!( + SubtensorModule::get_coldkey_balance(&s2_cold), + s2_before + expected2 + ); + + // Owner refunded lock (no emission) + assert_eq!( + SubtensorModule::get_coldkey_balance(&oc), + owner_before + 5_000 + ); + + // α entries for dissolved subnet gone + assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != net)); + }); +} + +#[test] +fn dissolve_owner_cut_refund_logic() { + new_test_ext(0).execute_with(|| { + let oc = U256::from(70); + let oh = U256::from(71); + let net = add_dynamic_network(&oh, &oc); + + // One staker and a TAO pot (not relevant to refund amount). + let sh = U256::from(77); + let sc = U256::from(88); + Alpha::::insert((sh, sc, net), U64F64::from_num(100u128)); + SubnetTAO::::insert(net, TaoCurrency::from(1_000)); + + // Lock & emissions: total emitted α = 800. + let lock: TaoCurrency = TaoCurrency::from(2_000); + SubtensorModule::set_subnet_locked_balance(net, lock); + Emission::::insert( + net, + vec![AlphaCurrency::from(200), AlphaCurrency::from(600)], + ); + + // Owner cut = 11796 / 65535 (about 18%). + SubnetOwnerCut::::put(11_796u16); + + // Compute expected refund with the SAME math as the pallet. + let frac: U96F32 = SubtensorModule::get_float_subnet_owner_cut(); + let total_emitted_alpha: u64 = 800; + let owner_alpha_u64: u64 = U96F32::from_num(total_emitted_alpha) + .saturating_mul(frac) + .floor() + .saturating_to_num::(); + + // Current α→τ price for this subnet. + let price: U96F32 = + ::SwapInterface::current_alpha_price(net.into()); + let owner_emission_tao_u64: u64 = U96F32::from_num(owner_alpha_u64) + .saturating_mul(price) + .floor() + .saturating_to_num::(); + + let expected_refund: TaoCurrency = + lock.saturating_sub(TaoCurrency::from(owner_emission_tao_u64)); + + let before = SubtensorModule::get_coldkey_balance(&oc); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&oc); + + assert_eq!( + TaoCurrency::from(after), + TaoCurrency::from(before) + expected_refund + ); + }); +} + +#[test] +fn dissolve_zero_refund_when_emission_exceeds_lock() { + new_test_ext(0).execute_with(|| { + let oc = U256::from(1_000); + let oh = U256::from(2_000); + let net = add_dynamic_network(&oh, &oc); + + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(1_000)); + SubnetOwnerCut::::put(u16::MAX); // 100 % + Emission::::insert(net, vec![AlphaCurrency::from(2_000)]); + + let before = SubtensorModule::get_coldkey_balance(&oc); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&oc); + + assert_eq!(after, before); // no refund + }); +} + +#[test] +fn dissolve_nonexistent_subnet_fails() { + new_test_ext(0).execute_with(|| { + assert_err!( + SubtensorModule::do_dissolve_network(9_999.into()), + Error::::SubnetNotExists + ); + }); +} + +#[test] +fn dissolve_clears_all_per_subnet_storages() { + new_test_ext(0).execute_with(|| { + let owner_cold = U256::from(123); + let owner_hot = U256::from(456); + let net = add_dynamic_network(&owner_hot, &owner_cold); + + // ------------------------------------------------------------------ + // Populate each storage item with a minimal value of the CORRECT type + // ------------------------------------------------------------------ + // Core ownership / bookkeeping + SubnetOwner::::insert(net, owner_cold); + SubnetOwnerHotkey::::insert(net, owner_hot); + SubnetworkN::::insert(net, 0u16); + NetworksAdded::::insert(net, true); + NetworkRegisteredAt::::insert(net, 0u64); + + // Consensus vectors + Rank::::insert(net, vec![1u16]); + Trust::::insert(net, vec![1u16]); + Active::::insert(net, vec![true]); + Emission::::insert(net, vec![AlphaCurrency::from(1)]); + Incentive::::insert(NetUidStorageIndex::from(net), vec![1u16]); + Consensus::::insert(net, vec![1u16]); + Dividends::::insert(net, vec![1u16]); + PruningScores::::insert(net, vec![1u16]); + LastUpdate::::insert(NetUidStorageIndex::from(net), vec![0u64]); + ValidatorPermit::::insert(net, vec![true]); + ValidatorTrust::::insert(net, vec![1u16]); + + // Per‑net params + Tempo::::insert(net, 1u16); + Kappa::::insert(net, 1u16); + Difficulty::::insert(net, 1u64); + + MaxAllowedUids::::insert(net, 1u16); + ImmunityPeriod::::insert(net, 1u16); + ActivityCutoff::::insert(net, 1u16); + MaxWeightsLimit::::insert(net, 1u16); + MinAllowedWeights::::insert(net, 1u16); + + RegistrationsThisInterval::::insert(net, 1u16); + POWRegistrationsThisInterval::::insert(net, 1u16); + BurnRegistrationsThisInterval::::insert(net, 1u16); + + // Pool / AMM counters + SubnetTAO::::insert(net, TaoCurrency::from(1)); + SubnetAlphaInEmission::::insert(net, AlphaCurrency::from(1)); + SubnetAlphaOutEmission::::insert(net, AlphaCurrency::from(1)); + SubnetTaoInEmission::::insert(net, TaoCurrency::from(1)); + SubnetVolume::::insert(net, 1u128); + + // Items now REMOVED (not zeroed) by dissolution + SubnetAlphaIn::::insert(net, AlphaCurrency::from(2)); + SubnetAlphaOut::::insert(net, AlphaCurrency::from(3)); + + // Prefix / double-map collections + Keys::::insert(net, 0u16, owner_hot); + Bonds::::insert(NetUidStorageIndex::from(net), 0u16, vec![(0u16, 1u16)]); + Weights::::insert(NetUidStorageIndex::from(net), 0u16, vec![(1u16, 1u16)]); + + // Membership entry for the SAME hotkey as Keys + IsNetworkMember::::insert(owner_hot, net, true); + + // Token / price / provided reserves + TokenSymbol::::insert(net, b"XX".to_vec()); + SubnetMovingPrice::::insert(net, substrate_fixed::types::I96F32::from_num(1)); + SubnetTaoProvided::::insert(net, TaoCurrency::from(1)); + SubnetAlphaInProvided::::insert(net, AlphaCurrency::from(1)); + + // Subnet locks + TransferToggle::::insert(net, true); + SubnetLocked::::insert(net, TaoCurrency::from(1)); + LargestLocked::::insert(net, 1u64); + + // Subnet parameters & pending counters + FirstEmissionBlockNumber::::insert(net, 1u64); + SubnetMechanism::::insert(net, 1u16); + NetworkRegistrationAllowed::::insert(net, true); + NetworkPowRegistrationAllowed::::insert(net, true); + PendingEmission::::insert(net, AlphaCurrency::from(1)); + PendingRootDivs::::insert(net, TaoCurrency::from(1)); + PendingAlphaSwapped::::insert(net, AlphaCurrency::from(1)); + PendingOwnerCut::::insert(net, AlphaCurrency::from(1)); + BlocksSinceLastStep::::insert(net, 1u64); + LastMechansimStepBlock::::insert(net, 1u64); + ServingRateLimit::::insert(net, 1u64); + Rho::::insert(net, 1u16); + AlphaSigmoidSteepness::::insert(net, 1i16); + + // Weights/versioning/targets/limits + WeightsVersionKey::::insert(net, 1u64); + MaxAllowedValidators::::insert(net, 1u16); + AdjustmentInterval::::insert(net, 2u16); + BondsMovingAverage::::insert(net, 1u64); + BondsPenalty::::insert(net, 1u16); + BondsResetOn::::insert(net, true); + WeightsSetRateLimit::::insert(net, 1u64); + ValidatorPruneLen::::insert(net, 1u64); + ScalingLawPower::::insert(net, 1u16); + TargetRegistrationsPerInterval::::insert(net, 1u16); + AdjustmentAlpha::::insert(net, 1u64); + CommitRevealWeightsEnabled::::insert(net, true); + + // Burn/difficulty/adjustment + Burn::::insert(net, TaoCurrency::from(1)); + MinBurn::::insert(net, TaoCurrency::from(1)); + MaxBurn::::insert(net, TaoCurrency::from(2)); + MinDifficulty::::insert(net, 1u64); + MaxDifficulty::::insert(net, 2u64); + RegistrationsThisBlock::::insert(net, 1u16); + EMAPriceHalvingBlocks::::insert(net, 1u64); + RAORecycledForRegistration::::insert(net, TaoCurrency::from(1)); + + // Feature toggles + LiquidAlphaOn::::insert(net, true); + Yuma3On::::insert(net, true); + AlphaValues::::insert(net, (1u16, 2u16)); + SubtokenEnabled::::insert(net, true); + ImmuneOwnerUidsLimit::::insert(net, 1u16); + + // Per‑subnet vectors / indexes + StakeWeight::::insert(net, vec![1u16]); + + // Uid/registration + Uids::::insert(net, owner_hot, 0u16); + BlockAtRegistration::::insert(net, 0u16, 1u64); + + // Per‑subnet dividends + AlphaDividendsPerSubnet::::insert(net, owner_hot, AlphaCurrency::from(1)); + TaoDividendsPerSubnet::::insert(net, owner_hot, TaoCurrency::from(1)); + + // Parent/child topology + takes + ChildkeyTake::::insert(owner_hot, net, 1u16); + PendingChildKeys::::insert(net, owner_cold, (vec![(1u64, owner_hot)], 1u64)); + ChildKeys::::insert(owner_cold, net, vec![(1u64, owner_hot)]); + ParentKeys::::insert(owner_hot, net, vec![(1u64, owner_cold)]); + + // Hotkey swap timestamp for subnet + LastHotkeySwapOnNetuid::::insert(net, owner_cold, 1u64); + + // Axon/prometheus tx key timing (NMap) — ***correct key-tuple insertion*** + TransactionKeyLastBlock::::insert((owner_hot, net, 1u16), 1u64); + + // EVM association indexed by (netuid, uid) + AssociatedEvmAddress::::insert(net, 0u16, (sp_core::H160::zero(), 1u64)); + + // (Optional) subnet -> lease link + SubnetUidToLeaseId::::insert(net, 42u32); + + // ------------------------------------------------------------------ + // Dissolve + // ------------------------------------------------------------------ + assert_ok!(SubtensorModule::do_dissolve_network(net)); + + // ------------------------------------------------------------------ + // Items that must be COMPLETELY REMOVED + // ------------------------------------------------------------------ + assert!(!SubnetOwner::::contains_key(net)); + assert!(!SubnetOwnerHotkey::::contains_key(net)); + assert!(!SubnetworkN::::contains_key(net)); + assert!(!NetworksAdded::::contains_key(net)); + assert!(!NetworkRegisteredAt::::contains_key(net)); + + // Consensus vectors removed + assert!(!Rank::::contains_key(net)); + assert!(!Trust::::contains_key(net)); + assert!(!Active::::contains_key(net)); + assert!(!Emission::::contains_key(net)); + assert!(!Incentive::::contains_key(NetUidStorageIndex::from( + net + ))); + assert!(!Consensus::::contains_key(net)); + assert!(!Dividends::::contains_key(net)); + assert!(!PruningScores::::contains_key(net)); + assert!(!LastUpdate::::contains_key(NetUidStorageIndex::from( + net + ))); + + assert!(!ValidatorPermit::::contains_key(net)); + assert!(!ValidatorTrust::::contains_key(net)); + + // Per‑net params removed + assert!(!Tempo::::contains_key(net)); + assert!(!Kappa::::contains_key(net)); + assert!(!Difficulty::::contains_key(net)); + + assert!(!MaxAllowedUids::::contains_key(net)); + assert!(!ImmunityPeriod::::contains_key(net)); + assert!(!ActivityCutoff::::contains_key(net)); + assert!(!MaxWeightsLimit::::contains_key(net)); + assert!(!MinAllowedWeights::::contains_key(net)); + + assert!(!RegistrationsThisInterval::::contains_key(net)); + assert!(!POWRegistrationsThisInterval::::contains_key(net)); + assert!(!BurnRegistrationsThisInterval::::contains_key(net)); + + // Pool / AMM counters removed + assert!(!SubnetTAO::::contains_key(net)); + assert!(!SubnetAlphaInEmission::::contains_key(net)); + assert!(!SubnetAlphaOutEmission::::contains_key(net)); + assert!(!SubnetTaoInEmission::::contains_key(net)); + assert!(!SubnetVolume::::contains_key(net)); + + // These are now REMOVED + assert!(!SubnetAlphaIn::::contains_key(net)); + assert!(!SubnetAlphaOut::::contains_key(net)); + + // Collections fully cleared + assert!(Keys::::iter_prefix(net).next().is_none()); + assert!( + Bonds::::iter_prefix(NetUidStorageIndex::from(net)) + .next() + .is_none() + ); + assert!( + Weights::::iter_prefix(NetUidStorageIndex::from(net)) + .next() + .is_none() + ); + assert!(!IsNetworkMember::::contains_key(owner_hot, net)); + + // Token / price / provided reserves + assert!(!TokenSymbol::::contains_key(net)); + assert!(!SubnetMovingPrice::::contains_key(net)); + assert!(!SubnetTaoProvided::::contains_key(net)); + assert!(!SubnetAlphaInProvided::::contains_key(net)); + + // Subnet locks + assert!(!TransferToggle::::contains_key(net)); + assert!(!SubnetLocked::::contains_key(net)); + assert!(!LargestLocked::::contains_key(net)); + + // Subnet parameters & pending counters + assert!(!FirstEmissionBlockNumber::::contains_key(net)); + assert!(!SubnetMechanism::::contains_key(net)); + assert!(!NetworkRegistrationAllowed::::contains_key(net)); + assert!(!NetworkPowRegistrationAllowed::::contains_key(net)); + assert!(!PendingEmission::::contains_key(net)); + assert!(!PendingRootDivs::::contains_key(net)); + assert!(!PendingAlphaSwapped::::contains_key(net)); + assert!(!PendingOwnerCut::::contains_key(net)); + assert!(!BlocksSinceLastStep::::contains_key(net)); + assert!(!LastMechansimStepBlock::::contains_key(net)); + assert!(!ServingRateLimit::::contains_key(net)); + assert!(!Rho::::contains_key(net)); + assert!(!AlphaSigmoidSteepness::::contains_key(net)); + + // Weights/versioning/targets/limits + assert!(!WeightsVersionKey::::contains_key(net)); + assert!(!MaxAllowedValidators::::contains_key(net)); + assert!(!AdjustmentInterval::::contains_key(net)); + assert!(!BondsMovingAverage::::contains_key(net)); + assert!(!BondsPenalty::::contains_key(net)); + assert!(!BondsResetOn::::contains_key(net)); + assert!(!WeightsSetRateLimit::::contains_key(net)); + assert!(!ValidatorPruneLen::::contains_key(net)); + assert!(!ScalingLawPower::::contains_key(net)); + assert!(!TargetRegistrationsPerInterval::::contains_key(net)); + assert!(!AdjustmentAlpha::::contains_key(net)); + assert!(!CommitRevealWeightsEnabled::::contains_key(net)); + + // Burn/difficulty/adjustment + assert!(!Burn::::contains_key(net)); + assert!(!MinBurn::::contains_key(net)); + assert!(!MaxBurn::::contains_key(net)); + assert!(!MinDifficulty::::contains_key(net)); + assert!(!MaxDifficulty::::contains_key(net)); + assert!(!RegistrationsThisBlock::::contains_key(net)); + assert!(!EMAPriceHalvingBlocks::::contains_key(net)); + assert!(!RAORecycledForRegistration::::contains_key(net)); + + // Feature toggles + assert!(!LiquidAlphaOn::::contains_key(net)); + assert!(!Yuma3On::::contains_key(net)); + assert!(!AlphaValues::::contains_key(net)); + assert!(!SubtokenEnabled::::contains_key(net)); + assert!(!ImmuneOwnerUidsLimit::::contains_key(net)); + + // Per‑subnet vectors / indexes + assert!(!StakeWeight::::contains_key(net)); + + // Uid/registration + assert!(Uids::::get(net, owner_hot).is_none()); + assert!(!BlockAtRegistration::::contains_key(net, 0u16)); + + // Per‑subnet dividends + assert!(!AlphaDividendsPerSubnet::::contains_key( + net, owner_hot + )); + assert!(!TaoDividendsPerSubnet::::contains_key(net, owner_hot)); + + // Parent/child topology + takes + assert!(!ChildkeyTake::::contains_key(owner_hot, net)); + assert!(!PendingChildKeys::::contains_key(net, owner_cold)); + assert!(!ChildKeys::::contains_key(owner_cold, net)); + assert!(!ParentKeys::::contains_key(owner_hot, net)); + + // Hotkey swap timestamp for subnet + assert!(!LastHotkeySwapOnNetuid::::contains_key( + net, owner_cold + )); + + // Axon/prometheus tx key timing (NMap) — ValueQuery (defaults to 0) + assert_eq!( + TransactionKeyLastBlock::::get((owner_hot, net, 1u16)), + 0u64 + ); + + // EVM association + assert!(AssociatedEvmAddress::::get(net, 0u16).is_none()); + + // Subnet -> lease link + assert!(!SubnetUidToLeaseId::::contains_key(net)); + + // ------------------------------------------------------------------ + // Final subnet removal confirmation + // ------------------------------------------------------------------ + assert!(!SubtensorModule::if_subnet_exist(net)); + }); +} + +#[test] +fn dissolve_alpha_out_but_zero_tao_no_rewards() { + new_test_ext(0).execute_with(|| { + let oc = U256::from(21); + let oh = U256::from(22); + let net = add_dynamic_network(&oh, &oc); + + let sh = U256::from(23); + let sc = U256::from(24); + + Alpha::::insert((sh, sc, net), U64F64::from_num(1_000u64)); + SubnetTAO::::insert(net, TaoCurrency::from(0)); // zero TAO + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); + Emission::::insert(net, Vec::::new()); + + let before = SubtensorModule::get_coldkey_balance(&sc); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&sc); + + // No reward distributed, α-out cleared. + assert_eq!(after, before); + assert!(Alpha::::iter().next().is_none()); + }); +} + +#[test] +fn dissolve_decrements_total_networks() { + new_test_ext(0).execute_with(|| { + let total_before = TotalNetworks::::get(); + + let cold = U256::from(41); + let hot = U256::from(42); + let net = add_dynamic_network(&hot, &cold); + + // Sanity: adding network increments the counter. + assert_eq!(TotalNetworks::::get(), total_before + 1); + + assert_ok!(SubtensorModule::do_dissolve_network(net)); + assert_eq!(TotalNetworks::::get(), total_before); + }); +} + +#[test] +fn dissolve_rounding_remainder_distribution() { + new_test_ext(0).execute_with(|| { + // 1. Build subnet with two α-out stakers (3 & 2 α) + let oc = U256::from(61); + let oh = U256::from(62); + let net = add_dynamic_network(&oh, &oc); + + let (s1h, s1c) = (U256::from(63), U256::from(64)); + let (s2h, s2c) = (U256::from(65), U256::from(66)); + + Alpha::::insert((s1h, s1c, net), U64F64::from_num(3u128)); + Alpha::::insert((s2h, s2c, net), U64F64::from_num(2u128)); + + SubnetTAO::::insert(net, TaoCurrency::from(1)); // TAO pot = 1 + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); + + // Cold-key balances before + let c1_before = SubtensorModule::get_coldkey_balance(&s1c); + let c2_before = SubtensorModule::get_coldkey_balance(&s2c); + + // 3. Run full dissolve flow + assert_ok!(SubtensorModule::do_dissolve_network(net)); + + // 4. s1 (larger remainder) should get +1 τ on cold-key + let c1_after = SubtensorModule::get_coldkey_balance(&s1c); + let c2_after = SubtensorModule::get_coldkey_balance(&s2c); + + assert_eq!(c1_after, c1_before + 1); + assert_eq!(c2_after, c2_before); + + // α records for subnet gone; TAO key gone + assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != net)); + assert!(!SubnetTAO::::contains_key(net)); + }); +} +#[test] +fn destroy_alpha_out_multiple_stakers_pro_rata() { + new_test_ext(0).execute_with(|| { + // 1. Owner & subnet + let owner_cold = U256::from(10); + let owner_hot = U256::from(20); + let netuid = add_dynamic_network(&owner_hot, &owner_cold); + + // 2. Two stakers on that subnet + let (c1, h1) = (U256::from(111), U256::from(211)); + let (c2, h2) = (U256::from(222), U256::from(333)); + register_ok_neuron(netuid, h1, c1, 0); + register_ok_neuron(netuid, h2, c2, 0); + + // 3. Stake 30 : 70 (s1 : s2) in TAO + let min_total = DefaultMinStake::::get(); + let min_total_u64: u64 = min_total.into(); + let s1: u64 = 3u64 * min_total_u64; + let s2: u64 = 7u64 * min_total_u64; + + SubtensorModule::add_balance_to_coldkey_account(&c1, s1 + 50_000); + SubtensorModule::add_balance_to_coldkey_account(&c2, s2 + 50_000); + + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(c1), + h1, + netuid, + s1.into() + )); + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(c2), + h2, + netuid, + s2.into() + )); + + // 4. α-out snapshot + let a1: u128 = Alpha::::get((h1, c1, netuid)).saturating_to_num(); + let a2: u128 = Alpha::::get((h2, c2, netuid)).saturating_to_num(); + let atotal = a1 + a2; + + // 5. TAO pot & lock + let tao_pot: u64 = 10_000; + SubnetTAO::::insert(netuid, TaoCurrency::from(tao_pot)); + SubtensorModule::set_subnet_locked_balance(netuid, TaoCurrency::from(5_000)); + + // 6. Balances before + let c1_before = SubtensorModule::get_coldkey_balance(&c1); + let c2_before = SubtensorModule::get_coldkey_balance(&c2); + let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); + + // 7. Run the (now credit-to-coldkey) logic + assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); + + // 8. Expected τ shares via largest remainder + let prod1 = (tao_pot as u128) * a1; + let prod2 = (tao_pot as u128) * a2; + let mut s1_share = (prod1 / atotal) as u64; + let mut s2_share = (prod2 / atotal) as u64; + let distributed = s1_share + s2_share; + if distributed < tao_pot { + // Assign leftover to larger remainder + let r1 = prod1 % atotal; + let r2 = prod2 % atotal; + if r1 >= r2 { + s1_share += 1; + } else { + s2_share += 1; + } + } + + // 9. Cold-key balances must have increased accordingly + assert_eq!( + SubtensorModule::get_coldkey_balance(&c1), + c1_before + s1_share + ); + assert_eq!( + SubtensorModule::get_coldkey_balance(&c2), + c2_before + s2_share + ); + + // 10. Owner refund (5 000 τ) to cold-key (no emission) + assert_eq!( + SubtensorModule::get_coldkey_balance(&owner_cold), + owner_before + 5_000 + ); + + // 11. α entries cleared for the subnet + assert!(!Alpha::::contains_key((h1, c1, netuid))); + assert!(!Alpha::::contains_key((h2, c2, netuid))); + }); +} + +#[allow(clippy::indexing_slicing)] +#[test] +fn destroy_alpha_out_many_stakers_complex_distribution() { + new_test_ext(0).execute_with(|| { + // ── 1) create subnet with 20 stakers ──────────────────────────────── + let owner_cold = U256::from(1_000); + let owner_hot = U256::from(2_000); + let netuid = add_dynamic_network(&owner_hot, &owner_cold); + SubtensorModule::set_max_registrations_per_block(netuid, 1_000u16); + SubtensorModule::set_target_registrations_per_interval(netuid, 1_000u16); + + // Runtime-exact min amount = min_stake + fee + let min_amount = { + let min_stake = DefaultMinStake::::get(); + let fee = ::SwapInterface::approx_fee_amount( + netuid.into(), + min_stake.into(), + ); + min_stake.saturating_add(fee.into()) + }; + + const N: usize = 20; + let mut cold = [U256::zero(); N]; + let mut hot = [U256::zero(); N]; + let mut stake = [0u64; N]; + + let min_amount_u64: u64 = min_amount.into(); + for i in 0..N { + cold[i] = U256::from(10_000 + 2 * i as u32); + hot[i] = U256::from(10_001 + 2 * i as u32); + stake[i] = (i as u64 + 1u64) * min_amount_u64; // multiples of min_amount + + register_ok_neuron(netuid, hot[i], cold[i], 0); + SubtensorModule::add_balance_to_coldkey_account(&cold[i], stake[i] + 100_000); + + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(cold[i]), + hot[i], + netuid, + stake[i].into() + )); + } + + // ── 2) α-out snapshot ─────────────────────────────────────────────── + let mut alpha = [0u128; N]; + let mut alpha_sum: u128 = 0; + for i in 0..N { + alpha[i] = Alpha::::get((hot[i], cold[i], netuid)).saturating_to_num(); + alpha_sum += alpha[i]; + } + + // ── 3) TAO pot & subnet lock ──────────────────────────────────────── + let tao_pot: u64 = 123_456; + let lock: u64 = 30_000; + SubnetTAO::::insert(netuid, TaoCurrency::from(tao_pot)); + SubtensorModule::set_subnet_locked_balance(netuid, TaoCurrency::from(lock)); + + // Owner already earned some emission; owner-cut = 50 % + Emission::::insert( + netuid, + vec![ + AlphaCurrency::from(1_000), + AlphaCurrency::from(2_000), + AlphaCurrency::from(1_500), + ], + ); + SubnetOwnerCut::::put(32_768u16); // ~ 0.5 in fixed-point + + // ── 4) balances before ────────────────────────────────────────────── + let mut bal_before = [0u64; N]; + for i in 0..N { + bal_before[i] = SubtensorModule::get_coldkey_balance(&cold[i]); + } + let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); + + // ── 5) expected τ share per pallet algorithm (incl. remainder) ───── + let mut share = [0u64; N]; + let mut rem = [0u128; N]; + let mut paid: u128 = 0; + + for i in 0..N { + let prod = tao_pot as u128 * alpha[i]; + share[i] = (prod / alpha_sum) as u64; + rem[i] = prod % alpha_sum; + paid += share[i] as u128; + } + let leftover = tao_pot as u128 - paid; + let mut idx: Vec<_> = (0..N).collect(); + idx.sort_by_key(|i| core::cmp::Reverse(rem[*i])); + for i in 0..leftover as usize { + share[idx[i]] += 1; + } + + // ── 5b) expected owner refund with price-aware emission deduction ─── + let frac: U96F32 = SubtensorModule::get_float_subnet_owner_cut(); + let total_emitted_alpha: u64 = 1_000 + 2_000 + 1_500; // 4500 α + let owner_alpha_u64: u64 = U96F32::from_num(total_emitted_alpha) + .saturating_mul(frac) + .floor() + .saturating_to_num::(); + + let owner_emission_tao_u64: u64 = ::SwapInterface::sim_swap( + netuid.into(), + OrderType::Sell, + owner_alpha_u64, + ) + .map(|res| res.amount_paid_out) + .unwrap_or_else(|_| { + // Fallback matches the pallet's fallback + let price: U96F32 = + ::SwapInterface::current_alpha_price(netuid.into()); + U96F32::from_num(owner_alpha_u64) + .saturating_mul(price) + .floor() + .saturating_to_num::() + }); + + let expected_refund: u64 = lock.saturating_sub(owner_emission_tao_u64); + + // ── 6) run distribution (credits τ to coldkeys, wipes α state) ───── + assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); + + // ── 7) post checks ────────────────────────────────────────────────── + for i in 0..N { + // cold-key balances increased by expected τ share + assert_eq!( + SubtensorModule::get_coldkey_balance(&cold[i]), + bal_before[i] + share[i], + "staker {i} cold-key balance changed unexpectedly" + ); + } + + // owner refund + assert_eq!( + SubtensorModule::get_coldkey_balance(&owner_cold), + owner_before + expected_refund + ); + + // α cleared for dissolved subnet & related counters reset + assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != netuid)); + assert_eq!(SubnetAlphaIn::::get(netuid), 0.into()); + assert_eq!(SubnetAlphaOut::::get(netuid), 0.into()); + assert_eq!(SubtensorModule::get_subnet_locked_balance(netuid), 0.into()); + }); +} + +#[test] +fn prune_none_with_no_networks() { + new_test_ext(0).execute_with(|| { + assert_eq!(SubtensorModule::get_network_to_prune(), None); + }); +} + +#[test] +fn prune_none_when_all_networks_immune() { + new_test_ext(0).execute_with(|| { + // two fresh networks → still inside immunity window + let n1 = add_dynamic_network(&U256::from(2), &U256::from(1)); + let _n2 = add_dynamic_network(&U256::from(4), &U256::from(3)); + + // emissions don’t matter while immune + Emission::::insert(n1, vec![AlphaCurrency::from(10)]); + + assert_eq!(SubtensorModule::get_network_to_prune(), None); + }); +} + +#[test] +fn prune_selects_network_with_lowest_price() { + new_test_ext(0).execute_with(|| { + let n1 = add_dynamic_network(&U256::from(20), &U256::from(10)); + let n2 = add_dynamic_network(&U256::from(40), &U256::from(30)); + + // make both networks eligible (past immunity) + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 10); + + // n1 has lower price → should be pruned + SubnetMovingPrice::::insert(n1, I96F32::from_num(1)); + SubnetMovingPrice::::insert(n2, I96F32::from_num(10)); + + assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); + }); +} + +#[test] +fn prune_ignores_immune_network_even_if_lower_price() { + new_test_ext(0).execute_with(|| { + // create mature network n1 first + let n1 = add_dynamic_network(&U256::from(22), &U256::from(11)); + + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 5); // advance → n1 now mature + + // create second network n2 *inside* immunity + let n2 = add_dynamic_network(&U256::from(44), &U256::from(33)); + + // prices: n2 lower but immune; n1 must be selected + SubnetMovingPrice::::insert(n1, I96F32::from_num(5)); + SubnetMovingPrice::::insert(n2, I96F32::from_num(1)); + + System::set_block_number(imm + 10); // still immune for n2 + assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); + }); +} + +#[test] +fn prune_tie_on_price_earlier_registration_wins() { + new_test_ext(0).execute_with(|| { + // n1 registered first + let n1 = add_dynamic_network(&U256::from(66), &U256::from(55)); + + // advance 1 block, then register n2 (later timestamp) + System::set_block_number(1); + let n2 = add_dynamic_network(&U256::from(88), &U256::from(77)); + + // push past immunity for both + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 20); + + // identical prices → tie; earlier (n1) must be chosen + SubnetMovingPrice::::insert(n1, I96F32::from_num(7)); + SubnetMovingPrice::::insert(n2, I96F32::from_num(7)); + + assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); + }); +} + +#[test] +fn prune_selection_complex_state_exhaustive() { + new_test_ext(0).execute_with(|| { + let imm = SubtensorModule::get_network_immunity_period(); + + // --------------------------------------------------------------------- + // Build a rich topology of networks with controlled registration times. + // --------------------------------------------------------------------- + // n1 + n2 in the same block (equal timestamp) to test "tie + same time". + System::set_block_number(0); + let n1 = add_dynamic_network(&U256::from(101), &U256::from(201)); + let n2 = add_dynamic_network(&U256::from(102), &U256::from(202)); // same registered_at as n1 + + // Later registrations (strictly greater timestamp than n1/n2) + System::set_block_number(1); + let n3 = add_dynamic_network(&U256::from(103), &U256::from(203)); + + System::set_block_number(2); + let n4 = add_dynamic_network(&U256::from(104), &U256::from(204)); + + // Create *immune* networks that will remain ineligible initially, + // even if their price is the lowest. + System::set_block_number(imm + 5); + let n5 = add_dynamic_network(&U256::from(105), &U256::from(205)); // immune at first + + System::set_block_number(imm + 6); + let n6 = add_dynamic_network(&U256::from(106), &U256::from(206)); // immune at first + + // (Root is ignored by the selector.) + let root = NetUid::ROOT; + + // --------------------------------------------------------------------- + // Drive pruning via the EMA/moving price used by `get_network_to_prune()`. + // We set the moving prices directly to create deterministic selections. + // + // Intended prices: + // n1: 25, n2: 25, n3: 100, n4: 1, n5: 0 (immune initially), n6: 0 (immune initially) + // --------------------------------------------------------------------- + SubnetMovingPrice::::insert(n1, I96F32::from_num(25)); + SubnetMovingPrice::::insert(n2, I96F32::from_num(25)); + SubnetMovingPrice::::insert(n3, I96F32::from_num(100)); + SubnetMovingPrice::::insert(n4, I96F32::from_num(1)); + SubnetMovingPrice::::insert(n5, I96F32::from_num(0)); + SubnetMovingPrice::::insert(n6, I96F32::from_num(0)); + + // --------------------------------------------------------------------- + // Phase A: Only n1..n4 are mature → lowest price (n4=1) should win. + // --------------------------------------------------------------------- + System::set_block_number(imm + 10); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n4), + "Among mature nets (n1..n4), n4 has price=1 (lowest) and should be chosen." + ); + + // --------------------------------------------------------------------- + // Phase B: Tie on price with *same registration time* (n1 vs n2). + // Raise n4's price to 25 so {n1=25, n2=25, n3=100, n4=25}. + // n1 and n2 share the *same registered_at*. The tie should keep the + // first encountered (stable iteration by key order) → n1. + // --------------------------------------------------------------------- + SubnetMovingPrice::::insert(n4, I96F32::from_num(25)); // n4 now 25 + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n1), + "Tie on price with equal timestamps (n1,n2) → first encountered (n1) should persist." + ); + + // --------------------------------------------------------------------- + // Phase C: Tie on price with *different registration times*. + // Make n3 price=25 as well. Now n1,n2,n3,n4 all have price=25. + // Earliest registration among them is n1 (block 0). + // --------------------------------------------------------------------- + SubnetMovingPrice::::insert(n3, I96F32::from_num(25)); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n1), + "Tie on price across multiple nets → earliest registration (n1) wins." + ); + + // --------------------------------------------------------------------- + // Phase D: Immune networks ignored even if strictly cheaper (0). + // n5 and n6 price=0 but still immune at (imm + 10). Ensure they are + // ignored and selection remains n1. + // --------------------------------------------------------------------- + let now = System::block_number(); + assert!( + now < NetworkRegisteredAt::::get(n5) + imm, + "n5 is immune at current block" + ); + assert!( + now < NetworkRegisteredAt::::get(n6) + imm, + "n6 is immune at current block" + ); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n1), + "Immune nets (n5,n6) must be ignored despite lower price." + ); + + // --------------------------------------------------------------------- + // Phase E: If *all* networks are immune → return None. + // Move clock back before any network's immunity expires. + // --------------------------------------------------------------------- + System::set_block_number(0); + assert_eq!( + SubtensorModule::get_network_to_prune(), + None, + "With all networks immune, there is no prunable candidate." + ); + + // --------------------------------------------------------------------- + // Phase F: Advance beyond immunity for n5 & n6. + // Both n5 and n6 now eligible with price=0 (lowest). + // Tie on price; earlier registration between n5 and n6 is n5. + // --------------------------------------------------------------------- + System::set_block_number(2 * imm + 10); + assert!( + System::block_number() >= NetworkRegisteredAt::::get(n5) + imm, + "n5 has matured" + ); + assert!( + System::block_number() >= NetworkRegisteredAt::::get(n6) + imm, + "n6 has matured" + ); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n5), + "After immunity, n5 (price=0) should win; tie with n6 broken by earlier registration." + ); + + // --------------------------------------------------------------------- + // Phase G: Create *sparse* netuids and ensure selection is stable. + // Remove n5; now n6 (price=0) should be selected. + // This validates robustness to holes / non-contiguous netuids. + // --------------------------------------------------------------------- + SubtensorModule::do_dissolve_network(n5).expect("Expected not to panic"); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n6), + "After removing n5, next-lowest (n6=0) should be chosen even with sparse netuids." + ); + + // --------------------------------------------------------------------- + // Phase H: Dynamic price changes. + // Make n6 expensive (price 100); make n3 cheapest (price 1). + // --------------------------------------------------------------------- + SubnetMovingPrice::::insert(n6, I96F32::from_num(100)); + SubnetMovingPrice::::insert(n3, I96F32::from_num(1)); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n3), + "Dynamic changes: n3 set to price=1 (lowest among eligibles) → should be pruned." + ); + + // --------------------------------------------------------------------- + // Phase I: Tie again (n2 vs n3) but earlier registration must win. + // Give n2 the same price as n3; n2 registered at block 0, n3 at block 1. + // n2 should be chosen. + // --------------------------------------------------------------------- + SubnetMovingPrice::::insert(n2, I96F32::from_num(1)); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n2), + "Tie on price across n2 (earlier reg) and n3 → n2 wins by timestamp." + ); + + // --------------------------------------------------------------------- + // (Extra) Mark n2 as 'not added' to assert we honor the `added` flag, + // then restore it to avoid side-effects on subsequent tests. + // --------------------------------------------------------------------- + NetworksAdded::::insert(n2, false); + assert_ne!( + SubtensorModule::get_network_to_prune(), + Some(n2), + "`added=false` must exclude n2 from consideration." + ); + NetworksAdded::::insert(n2, true); + + // Root is always ignored even if cheapest (get_moving_alpha_price returns 1 for ROOT). + assert_ne!( + SubtensorModule::get_network_to_prune(), + Some(root), + "ROOT must never be selected for pruning." + ); + }); +} + +#[test] +fn register_network_prunes_and_recycles_netuid() { + new_test_ext(0).execute_with(|| { + SubnetLimit::::put(2u16); + + let n1_cold = U256::from(21); + let n1_hot = U256::from(22); + let n1 = add_dynamic_network(&n1_hot, &n1_cold); + + let n2_cold = U256::from(23); + let n2_hot = U256::from(24); + let n2 = add_dynamic_network(&n2_hot, &n2_cold); + + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 100); + + Emission::::insert(n1, vec![AlphaCurrency::from(1)]); + Emission::::insert(n2, vec![AlphaCurrency::from(1_000)]); + + let new_cold = U256::from(30); + let new_hot = U256::from(31); + let needed: u64 = SubtensorModule::get_network_lock_cost().into(); + SubtensorModule::add_balance_to_coldkey_account(&new_cold, needed.saturating_mul(10)); + + assert_ok!(SubtensorModule::do_register_network( + RuntimeOrigin::signed(new_cold), + &new_hot, + 1, + None, + )); + + assert_eq!(TotalNetworks::::get(), 2); + assert_eq!(SubnetOwner::::get(n1), new_cold); + assert_eq!(SubnetOwnerHotkey::::get(n1), new_hot); + assert_eq!(SubnetOwner::::get(n2), n2_cold); + }); +} + +#[test] +fn register_network_fails_before_prune_keeps_existing() { + new_test_ext(0).execute_with(|| { + SubnetLimit::::put(1u16); + + let n_cold = U256::from(41); + let n_hot = U256::from(42); + let net = add_dynamic_network(&n_hot, &n_cold); + + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 50); + Emission::::insert(net, vec![AlphaCurrency::from(10)]); + + let caller_cold = U256::from(50); + let caller_hot = U256::from(51); + + assert_err!( + SubtensorModule::do_register_network( + RuntimeOrigin::signed(caller_cold), + &caller_hot, + 1, + None, + ), + Error::::CannotAffordLockCost + ); + + assert!(SubtensorModule::if_subnet_exist(net)); + assert_eq!(TotalNetworks::::get(), 1); + }); +} + +#[test] +fn test_migrate_network_immunity_period() { + new_test_ext(0).execute_with(|| { + // -------------------------------------------------------------------- + // ‼️ PRE-CONDITIONS + // -------------------------------------------------------------------- + assert_ne!(NetworkImmunityPeriod::::get(), 864_000); + assert!( + !HasMigrationRun::::get(b"migrate_network_immunity_period".to_vec()), + "HasMigrationRun should be false before migration" + ); + + // -------------------------------------------------------------------- + // ▶️ RUN MIGRATION + // -------------------------------------------------------------------- + let weight = migrate_network_immunity_period::migrate_network_immunity_period::(); + + // -------------------------------------------------------------------- + // ✅ POST-CONDITIONS + // -------------------------------------------------------------------- + assert_eq!( + NetworkImmunityPeriod::::get(), + 864_000, + "NetworkImmunityPeriod should now be 864_000" + ); + + assert!( + HasMigrationRun::::get(b"migrate_network_immunity_period".to_vec()), + "HasMigrationRun should be true after migration" + ); + + assert!(weight != Weight::zero(), "migration weight should be > 0"); + }); +} + // #[test] // fn test_schedule_dissolve_network_execution() { // new_test_ext(1).execute_with(|| { @@ -349,3 +1595,454 @@ fn test_tempo_greater_than_weight_set_rate_limit() { assert!(tempo as u64 >= weights_set_rate_limit); }) } + +#[allow(clippy::indexing_slicing)] +#[test] +fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state() { + new_test_ext(0).execute_with(|| { + // ──────────────────────────────────────────────────────────────────── + // 0) Constants and helpers (distinct hotkeys & coldkeys) + // ──────────────────────────────────────────────────────────────────── + const NUM_NETS: usize = 4; + + // Six LP coldkeys + let cold_lps: [U256; 6] = [ + U256::from(3001), + U256::from(3002), + U256::from(3003), + U256::from(3004), + U256::from(3005), + U256::from(3006), + ]; + + // For each coldkey, define two DISTINCT hotkeys it owns. + let mut cold_to_hots: BTreeMap = BTreeMap::new(); + for &c in cold_lps.iter() { + let h1 = U256::from(c.low_u64().saturating_add(100_000)); + let h2 = U256::from(c.low_u64().saturating_add(200_000)); + cold_to_hots.insert(c, [h1, h2]); + } + + // Distinct τ pot sizes per net. + let pots: [u64; NUM_NETS] = [12_345, 23_456, 34_567, 45_678]; + + let lp_sets_per_net: [&[U256]; NUM_NETS] = [ + &cold_lps[0..4], // net0: A,B,C,D + &cold_lps[2..6], // net1: C,D,E,F + &cold_lps[0..6], // net2: A..F + &cold_lps[1..5], // net3: B,C,D,E + ]; + + // Multiple bands/sizes → many positions per cold across nets, using mixed hotkeys. + let bands: [i32; 3] = [5, 13, 30]; + let liqs: [u64; 3] = [400_000, 700_000, 1_100_000]; + + // Helper: add a V3 position via a (hot, cold) pair. + let add_pos = |net: NetUid, hot: U256, cold: U256, band: i32, liq: u64| { + let ct = pallet_subtensor_swap::CurrentTick::::get(net); + let lo = ct.saturating_sub(band); + let hi = ct.saturating_add(band); + assert_ok!(pallet_subtensor_swap::Pallet::::add_liquidity( + RuntimeOrigin::signed(cold), + hot, + net, + lo, + hi, + liq + )); + }; + + // ──────────────────────────────────────────────────────────────────── + // 1) Create many subnets, enable V3, fix price at tick=0 (sqrt≈1) + // ──────────────────────────────────────────────────────────────────── + let mut nets: Vec = Vec::new(); + for i in 0..NUM_NETS { + let owner_hot = U256::from(10_000 + (i as u64)); + let owner_cold = U256::from(20_000 + (i as u64)); + let net = add_dynamic_network(&owner_hot, &owner_cold); + SubtensorModule::set_max_registrations_per_block(net, 1_000u16); + SubtensorModule::set_target_registrations_per_interval(net, 1_000u16); + Emission::::insert(net, Vec::::new()); + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); + + assert_ok!( + pallet_subtensor_swap::Pallet::::toggle_user_liquidity( + RuntimeOrigin::root(), + net, + true + ) + ); + + // Price/tick pinned so LP math stays stable (sqrt(1)). + let ct0 = pallet_subtensor_swap::tick::TickIndex::new_unchecked(0); + let sqrt1 = ct0.try_to_sqrt_price().expect("sqrt(1) price"); + pallet_subtensor_swap::CurrentTick::::set(net, ct0); + pallet_subtensor_swap::AlphaSqrtPrice::::set(net, sqrt1); + + nets.push(net); + } + + // Map net → index for quick lookups. + let mut net_index: BTreeMap = BTreeMap::new(); + for (i, &n) in nets.iter().enumerate() { + net_index.insert(n, i); + } + + // ──────────────────────────────────────────────────────────────────── + // 2) Pre-create a handful of small (hot, cold) pairs so accounts exist + // ──────────────────────────────────────────────────────────────────── + for id in 0u64..10 { + let cold_acc = U256::from(1_000_000 + id); + let hot_acc = U256::from(2_000_000 + id); + for &net in nets.iter() { + register_ok_neuron(net, hot_acc, cold_acc, 100_000 + id); + } + } + + // ──────────────────────────────────────────────────────────────────── + // 3) LPs per net: register each (hot, cold), massive τ prefund, and stake + // ──────────────────────────────────────────────────────────────────── + for &cold in cold_lps.iter() { + SubtensorModule::add_balance_to_coldkey_account(&cold, u64::MAX); + } + + // τ balances before LP adds (after staking): + let mut tao_before: BTreeMap = BTreeMap::new(); + + // Ordered α snapshot per net at **pair granularity** (pre‑LP): + let mut alpha_pairs_per_net: BTreeMap> = BTreeMap::new(); + + // Register both hotkeys for each participating cold on each net and stake τ→α. + for (ni, &net) in nets.iter().enumerate() { + let participants = lp_sets_per_net[ni]; + for &cold in participants.iter() { + let [hot1, hot2] = cold_to_hots[&cold]; + + // Ensure (hot, cold) neurons exist on this net. + register_ok_neuron( + net, + hot1, + cold, + (ni as u64) * 10_000 + (hot1.low_u64() % 10_000), + ); + register_ok_neuron( + net, + hot2, + cold, + (ni as u64) * 10_000 + (hot2.low_u64() % 10_000) + 1, + ); + + // Stake τ (split across the two hotkeys). + let base: u64 = + 5_000_000 + ((ni as u64) * 1_000_000) + ((cold.low_u64() % 10) * 250_000); + let stake1: u64 = base.saturating_mul(3) / 5; // 60% + let stake2: u64 = base.saturating_sub(stake1); // 40% + + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(cold), + hot1, + net, + stake1.into() + )); + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(cold), + hot2, + net, + stake2.into() + )); + } + } + + // Record τ balances now (post‑stake, pre‑LP). + for &cold in cold_lps.iter() { + tao_before.insert(cold, SubtensorModule::get_coldkey_balance(&cold)); + } + + // Capture **pair‑level** α snapshot per net (pre‑LP). + for ((hot, cold, net), amt) in Alpha::::iter() { + if let Some(&ni) = net_index.get(&net) { + if lp_sets_per_net[ni].contains(&cold) { + let a: u128 = amt.saturating_to_num(); + if a > 0 { + alpha_pairs_per_net + .entry(net) + .or_default() + .push(((hot, cold), a)); + } + } + } + } + + // ──────────────────────────────────────────────────────────────────── + // 4) Add many V3 positions per cold across nets, alternating hotkeys + // ──────────────────────────────────────────────────────────────────── + for (ni, &net) in nets.iter().enumerate() { + let participants = lp_sets_per_net[ni]; + for (pi, &cold) in participants.iter().enumerate() { + let [hot1, hot2] = cold_to_hots[&cold]; + let hots = [hot1, hot2]; + for k in 0..3 { + let band = bands[(pi + k) % bands.len()]; + let liq = liqs[(ni + k) % liqs.len()]; + let hot = hots[k % hots.len()]; + add_pos(net, hot, cold, band, liq); + } + } + } + + // Snapshot τ balances AFTER LP adds (to measure actual principal debit). + let mut tao_after_adds: BTreeMap = BTreeMap::new(); + for &cold in cold_lps.iter() { + tao_after_adds.insert(cold, SubtensorModule::get_coldkey_balance(&cold)); + } + + // ──────────────────────────────────────────────────────────────────── + // 5) Compute Hamilton-apportionment BASE shares per cold and total leftover + // from the **pair-level** pre‑LP α snapshot; also count pairs per cold. + // ──────────────────────────────────────────────────────────────────── + let mut base_share_cold: BTreeMap = + cold_lps.iter().copied().map(|c| (c, 0_u64)).collect(); + let mut pair_count_cold: BTreeMap = + cold_lps.iter().copied().map(|c| (c, 0_u32)).collect(); + + let mut leftover_total: u64 = 0; + + for (ni, &net) in nets.iter().enumerate() { + let pot = pots[ni]; + let pairs = alpha_pairs_per_net.get(&net).cloned().unwrap_or_default(); + if pot == 0 || pairs.is_empty() { + continue; + } + let total_alpha: u128 = pairs.iter().map(|(_, a)| *a).sum(); + if total_alpha == 0 { + continue; + } + + let mut base_sum_net: u64 = 0; + for ((_, cold), a) in pairs.iter().copied() { + // quota = a * pot / total_alpha + let prod: u128 = a.saturating_mul(pot as u128); + let base: u64 = (prod / total_alpha) as u64; + base_sum_net = base_sum_net.saturating_add(base); + *base_share_cold.entry(cold).or_default() = + base_share_cold[&cold].saturating_add(base); + *pair_count_cold.entry(cold).or_default() += 1; + } + let leftover_net = pot.saturating_sub(base_sum_net); + leftover_total = leftover_total.saturating_add(leftover_net); + } + + // ──────────────────────────────────────────────────────────────────── + // 6) Seed τ pots and dissolve *all* networks (liquidates LPs + refunds) + // ──────────────────────────────────────────────────────────────────── + for (ni, &net) in nets.iter().enumerate() { + SubnetTAO::::insert(net, TaoCurrency::from(pots[ni])); + } + for &net in nets.iter() { + assert_ok!(SubtensorModule::do_dissolve_network(net)); + } + + // ──────────────────────────────────────────────────────────────────── + // 7) Assertions: τ balances, α gone, nets removed, swap state clean + // (Hamilton invariants enforced at cold-level without relying on tie-break) + // ──────────────────────────────────────────────────────────────────── + // Collect actual pot credits per cold (principal cancels out against adds when comparing before→after). + let mut actual_pot_cold: BTreeMap = + cold_lps.iter().copied().map(|c| (c, 0_u64)).collect(); + for &cold in cold_lps.iter() { + let before = tao_before[&cold]; + let after = SubtensorModule::get_coldkey_balance(&cold); + actual_pot_cold.insert(cold, after.saturating_sub(before)); + } + + // (a) Sum of actual pot credits equals total pots. + let total_actual: u64 = actual_pot_cold.values().copied().sum(); + let total_pots: u64 = pots.iter().copied().sum(); + assert_eq!( + total_actual, total_pots, + "total τ pot credited across colds must equal sum of pots" + ); + + // (b) Each cold’s pot is within Hamilton bounds: base ≤ actual ≤ base + #pairs. + let mut extra_accum: u64 = 0; + for &cold in cold_lps.iter() { + let base = *base_share_cold.get(&cold).unwrap_or(&0); + let pairs = *pair_count_cold.get(&cold).unwrap_or(&0) as u64; + let actual = *actual_pot_cold.get(&cold).unwrap_or(&0); + + assert!( + actual >= base, + "cold {cold:?} actual pot {actual} is below base {base}" + ); + assert!( + actual <= base.saturating_add(pairs), + "cold {cold:?} actual pot {actual} exceeds base + pairs ({base} + {pairs})" + ); + + extra_accum = extra_accum.saturating_add(actual.saturating_sub(base)); + } + + // (c) The total “extra beyond base” equals the computed leftover_total across nets. + assert_eq!( + extra_accum, leftover_total, + "sum of extras beyond base must equal total leftover" + ); + + // (d) τ principal was fully refunded (compare after_adds → after). + for &cold in cold_lps.iter() { + let before = tao_before[&cold]; + let mid = tao_after_adds[&cold]; + let after = SubtensorModule::get_coldkey_balance(&cold); + let principal_actual = before.saturating_sub(mid); + let actual_pot = after.saturating_sub(before); + assert_eq!( + after.saturating_sub(mid), + principal_actual.saturating_add(actual_pot), + "cold {cold:?} τ balance incorrect vs 'after_adds'" + ); + } + + // For each dissolved net, check α ledgers gone, network removed, and swap state clean. + for &net in nets.iter() { + assert!( + Alpha::::iter().all(|((_h, _c, n), _)| n != net), + "alpha ledger not fully cleared for net {net:?}" + ); + assert!( + !SubtensorModule::if_subnet_exist(net), + "subnet {net:?} still exists" + ); + assert!( + pallet_subtensor_swap::Ticks::::iter_prefix(net) + .next() + .is_none(), + "ticks not cleared for net {net:?}" + ); + assert!( + !pallet_subtensor_swap::Positions::::iter() + .any(|((n, _owner, _pid), _)| n == net), + "swap positions not fully cleared for net {net:?}" + ); + assert_eq!( + pallet_subtensor_swap::FeeGlobalTao::::get(net).saturating_to_num::(), + 0, + "FeeGlobalTao nonzero for net {net:?}" + ); + assert_eq!( + pallet_subtensor_swap::FeeGlobalAlpha::::get(net).saturating_to_num::(), + 0, + "FeeGlobalAlpha nonzero for net {net:?}" + ); + assert_eq!( + pallet_subtensor_swap::CurrentLiquidity::::get(net), + 0, + "CurrentLiquidity not zero for net {net:?}" + ); + assert!( + !pallet_subtensor_swap::SwapV3Initialized::::get(net), + "SwapV3Initialized still set" + ); + assert!( + !pallet_subtensor_swap::EnabledUserLiquidity::::get(net), + "EnabledUserLiquidity still set" + ); + assert!( + pallet_subtensor_swap::TickIndexBitmapWords::::iter_prefix((net,)) + .next() + .is_none(), + "TickIndexBitmapWords not cleared for net {net:?}" + ); + } + + // ──────────────────────────────────────────────────────────────────── + // 8) Re-register a fresh subnet and re‑stake using the pallet’s min rule + // Assert αΔ equals the sim-swap result for the exact τ staked. + // ──────────────────────────────────────────────────────────────────── + let new_owner_hot = U256::from(99_000); + let new_owner_cold = U256::from(99_001); + let net_new = add_dynamic_network(&new_owner_hot, &new_owner_cold); + SubtensorModule::set_max_registrations_per_block(net_new, 1_000u16); + SubtensorModule::set_target_registrations_per_interval(net_new, 1_000u16); + Emission::::insert(net_new, Vec::::new()); + SubtensorModule::set_subnet_locked_balance(net_new, TaoCurrency::from(0)); + + assert_ok!( + pallet_subtensor_swap::Pallet::::toggle_user_liquidity( + RuntimeOrigin::root(), + net_new, + true + ) + ); + let ct0 = pallet_subtensor_swap::tick::TickIndex::new_unchecked(0); + let sqrt1 = ct0.try_to_sqrt_price().expect("sqrt(1)"); + pallet_subtensor_swap::CurrentTick::::set(net_new, ct0); + pallet_subtensor_swap::AlphaSqrtPrice::::set(net_new, sqrt1); + + // Compute the exact min stake per the pallet rule: DefaultMinStake + fee(DefaultMinStake). + let min_stake_u64: u64 = DefaultMinStake::::get().into(); + let fee_for_min: u64 = pallet_subtensor_swap::Pallet::::sim_swap( + net_new, + subtensor_swap_interface::OrderType::Buy, + min_stake_u64, + ) + .map(|r| r.fee_paid) + .unwrap_or_else(|_e| { + as subtensor_swap_interface::SwapHandler< + ::AccountId, + >>::approx_fee_amount(net_new, min_stake_u64) + }); + let min_amount_required: u64 = min_stake_u64.saturating_add(fee_for_min); + + // Re‑stake from three coldkeys; choose a specific DISTINCT hotkey per cold. + for &cold in &cold_lps[0..3] { + let [hot1, _hot2] = cold_to_hots[&cold]; + register_ok_neuron(net_new, hot1, cold, 7777); + + let before_tao = SubtensorModule::get_coldkey_balance(&cold); + let a_prev: u64 = Alpha::::get((hot1, cold, net_new)).saturating_to_num(); + + // Expected α for this exact τ, using the same sim path as the pallet. + let expected_alpha_out: u64 = pallet_subtensor_swap::Pallet::::sim_swap( + net_new, + subtensor_swap_interface::OrderType::Buy, + min_amount_required, + ) + .map(|r| r.amount_paid_out) + .expect("sim_swap must succeed for fresh net and min amount"); + + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(cold), + hot1, + net_new, + min_amount_required.into() + )); + + let after_tao = SubtensorModule::get_coldkey_balance(&cold); + let a_new: u64 = Alpha::::get((hot1, cold, net_new)).saturating_to_num(); + let a_delta = a_new.saturating_sub(a_prev); + + // τ decreased by exactly the amount we sent. + assert_eq!( + after_tao, + before_tao.saturating_sub(min_amount_required), + "τ did not decrease by the min required restake amount for cold {cold:?}" + ); + + // α minted equals the simulated swap’s net out for that same τ. + assert_eq!( + a_delta, expected_alpha_out, + "α minted mismatch for cold {cold:?} (hot {hot1:?}) on new net (αΔ {a_delta}, expected {expected_alpha_out})" + ); + } + + // Ensure V3 still functional on new net: add a small position for the first cold using its hot1 + let who_cold = cold_lps[0]; + let [who_hot, _] = cold_to_hots[&who_cold]; + add_pos(net_new, who_hot, who_cold, 8, 123_456); + assert!( + pallet_subtensor_swap::Positions::::iter() + .any(|((n, owner, _pid), _)| n == net_new && owner == who_cold), + "new position not recorded on the re-registered net" + ); + }); +} diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index e61a7aee26..173a03aea1 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -430,7 +430,7 @@ fn test_recycle_errors() { 100_000.into(), 99.into() // non-existent subnet ), - Error::::SubNetworkDoesNotExist + Error::::SubnetNotExists ); assert_noop!( @@ -502,7 +502,7 @@ fn test_burn_errors() { 100_000.into(), 99.into() // non-existent subnet ), - Error::::SubNetworkDoesNotExist + Error::::SubnetNotExists ); assert_noop!( diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 7ccb591620..48e887d606 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -1,5 +1,6 @@ #![allow(clippy::unwrap_used)] +use crate::*; use approx::assert_abs_diff_eq; use frame_support::dispatch::DispatchInfo; use frame_support::sp_runtime::{DispatchError, transaction_validity::TransactionSource}; @@ -8,7 +9,7 @@ use frame_support::{assert_err, assert_noop, assert_ok}; use frame_system::{Config, RawOrigin}; use sp_core::U256; use sp_runtime::traits::{DispatchInfoOf, TransactionExtension, TxBaseImplication}; -use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid, NetUidStorageIndex}; use super::mock; use super::mock::*; @@ -1335,6 +1336,92 @@ fn test_registration_get_uid_to_prune_none_in_immunity_period() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::registration::test_registration_get_uid_to_prune_owner_immortality --exact --show-output --nocapture +#[test] +fn test_registration_get_uid_to_prune_owner_immortality() { + new_test_ext(1).execute_with(|| { + [ + // Burn key limit to 1 - testing the limits + // Other owner's hotkey is pruned because there's only 1 immune key and + // pruning score of owner key is lower + (1, 1), + // Burn key limit to 2 - both owner keys are immune + (2, 2), + ] + .iter() + .for_each(|(limit, uid_to_prune)| { + let subnet_owner_ck = U256::from(0); + let subnet_owner_hk = U256::from(1); + + // Other hk owned by owner + let other_owner_hk = U256::from(2); + Owner::::insert(other_owner_hk, subnet_owner_ck); + OwnedHotkeys::::insert(subnet_owner_ck, vec![subnet_owner_hk, other_owner_hk]); + + // Another hk not owned by owner + let non_owner_hk = U256::from(3); + + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + BlockAtRegistration::::insert(netuid, 1, 1); + BlockAtRegistration::::insert(netuid, 2, 2); + Uids::::insert(netuid, other_owner_hk, 1); + Uids::::insert(netuid, non_owner_hk, 2); + Keys::::insert(netuid, 1, other_owner_hk); + Keys::::insert(netuid, 2, non_owner_hk); + ImmunityPeriod::::insert(netuid, 1); + SubnetworkN::::insert(netuid, 3); + + step_block(10); + + ImmuneOwnerUidsLimit::::insert(netuid, *limit); + + // Set lower pruning score to sn owner keys + PruningScores::::insert(netuid, vec![0, 0, 1]); + + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), *uid_to_prune); + }); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::registration::test_registration_get_uid_to_prune_owner_immortality_all_immune --exact --show-output --nocapture +#[test] +fn test_registration_get_uid_to_prune_owner_immortality_all_immune() { + new_test_ext(1).execute_with(|| { + let limit = 2; + let uid_to_prune = 2; + let subnet_owner_ck = U256::from(0); + let subnet_owner_hk = U256::from(1); + + // Other hk owned by owner + let other_owner_hk = U256::from(2); + Owner::::insert(other_owner_hk, subnet_owner_ck); + OwnedHotkeys::::insert(subnet_owner_ck, vec![subnet_owner_hk, other_owner_hk]); + + // Another hk not owned by owner + let non_owner_hk = U256::from(3); + + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + BlockAtRegistration::::insert(netuid, 0, 12); + BlockAtRegistration::::insert(netuid, 1, 11); + BlockAtRegistration::::insert(netuid, 2, 10); + Uids::::insert(netuid, other_owner_hk, 1); + Uids::::insert(netuid, non_owner_hk, 2); + Keys::::insert(netuid, 1, other_owner_hk); + Keys::::insert(netuid, 2, non_owner_hk); + ImmunityPeriod::::insert(netuid, 100); + SubnetworkN::::insert(netuid, 3); + + step_block(20); + + ImmuneOwnerUidsLimit::::insert(netuid, limit); + + // Set lower pruning score to sn owner keys + PruningScores::::insert(netuid, vec![0, 0, 1]); + + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), uid_to_prune); + }); +} + #[test] fn test_registration_pruning() { new_test_ext(1).execute_with(|| { @@ -2062,6 +2149,45 @@ fn test_registration_disabled() { }); } +#[test] +fn test_last_update_correctness() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + let coldkey_account_id = U256::from(667); // Neighbour of the beast, har har + //add network + SubtensorModule::set_burn(netuid, burn_cost.into()); + add_network(netuid, tempo, 0); + + let reserve = 1_000_000_000_000; + mock::setup_reserves(netuid, reserve.into(), reserve.into()); + + // Simulate existing neurons + let existing_neurons = 3; + SubnetworkN::::insert(netuid, existing_neurons); + + // Simulate no LastUpdate so far (can happen on mechanisms) + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); + + // Give some $$$ to coldkey + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + // Check that LastUpdate has existing_neurons + 1 elements now + assert_eq!( + LastUpdate::::get(NetUidStorageIndex::from(netuid)).len(), + (existing_neurons + 1) as usize + ); + }); +} + // #[ignore] // #[test] // fn test_hotkey_swap_ok() { diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index 38895348cc..b36788fa31 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -13,7 +13,9 @@ use sp_core::{Get, H256, U256}; use sp_runtime::traits::Dispatchable; use substrate_fixed::traits::FromFixed; use substrate_fixed::types::{I96F32, I110F18, U64F64, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid, TaoCurrency}; +use subtensor_runtime_common::{ + AlphaCurrency, Currency as CurrencyT, NetUid, NetUidStorageIndex, TaoCurrency, +}; use subtensor_swap_interface::{OrderType, SwapHandler}; use super::mock; @@ -2440,12 +2442,12 @@ fn test_mining_emission_distribution_validator_valiminer_miner() { )); // Setup YUMA so that it creates emissions - Weights::::insert(netuid, 0, vec![(1, 0xFFFF)]); - Weights::::insert(netuid, 1, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 0, vec![(1, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 1, vec![(2, 0xFFFF)]); BlockAtRegistration::::set(netuid, 0, 1); BlockAtRegistration::::set(netuid, 1, 1); BlockAtRegistration::::set(netuid, 2, 1); - LastUpdate::::set(netuid, vec![2, 2, 2]); + LastUpdate::::set(NetUidStorageIndex::from(netuid), vec![2, 2, 2]); Kappa::::set(netuid, u16::MAX / 5); ActivityCutoff::::set(netuid, u16::MAX); // makes all stake active ValidatorPermit::::insert(netuid, vec![true, true, false]); diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index b60f3ffa41..a11eae759e 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -52,7 +52,7 @@ fn test_do_start_call_fail_with_not_existed_subnet() { <::RuntimeOrigin>::signed(coldkey_account_id), netuid ), - Error::::SubNetworkDoesNotExist + Error::::SubnetNotExists ); }); } @@ -231,6 +231,7 @@ fn test_register_network_min_burn_at_default() { #[test] fn test_register_network_use_symbol_for_subnet_if_available() { new_test_ext(1).execute_with(|| { + SubtensorModule::set_max_subnets(SYMBOLS.len() as u16); for i in 0..(SYMBOLS.len() - 1) { let coldkey = U256::from(1_000_000 + i); let hotkey = U256::from(2_000_000 + i); @@ -317,6 +318,7 @@ fn test_register_network_use_next_available_symbol_if_symbol_for_subnet_is_taken fn test_register_network_use_default_symbol_if_all_symbols_are_taken() { new_test_ext(1).execute_with(|| { // Register networks until we have exhausted all symbols + SubtensorModule::set_max_subnets(SYMBOLS.len() as u16); for i in 0..(SYMBOLS.len() - 1) { let coldkey = U256::from(1_000_000 + i); let hotkey = U256::from(2_000_000 + i); @@ -725,11 +727,6 @@ fn test_user_liquidity_access_control() { // add network let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); - // Initially should be disabled - assert!(!pallet_subtensor_swap::EnabledUserLiquidity::::get( - NetUid::from(netuid) - )); - // Not owner, not root: should fail assert_noop!( Swap::toggle_user_liquidity(RuntimeOrigin::signed(not_owner), netuid, true), diff --git a/pallets/subtensor/src/tests/swap_hotkey.rs b/pallets/subtensor/src/tests/swap_hotkey.rs index dae5a3f176..5991baf07a 100644 --- a/pallets/subtensor/src/tests/swap_hotkey.rs +++ b/pallets/subtensor/src/tests/swap_hotkey.rs @@ -8,7 +8,7 @@ use frame_system::{Config, RawOrigin}; use sp_core::{Get, H160, H256, U256}; use sp_runtime::SaturatedConversion; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use super::mock; @@ -326,7 +326,11 @@ fn test_swap_weight_commits() { add_network(netuid, 1, 1); IsNetworkMember::::insert(old_hotkey, netuid, true); - WeightCommits::::insert(netuid, old_hotkey, weight_commits.clone()); + WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + old_hotkey, + weight_commits.clone(), + ); assert_ok!(SubtensorModule::perform_hotkey_swap_on_all_subnets( &old_hotkey, @@ -335,9 +339,12 @@ fn test_swap_weight_commits() { &mut weight )); - assert!(!WeightCommits::::contains_key(netuid, old_hotkey)); + assert!(!WeightCommits::::contains_key( + NetUidStorageIndex::from(netuid), + old_hotkey + )); assert_eq!( - WeightCommits::::get(netuid, new_hotkey), + WeightCommits::::get(NetUidStorageIndex::from(netuid), new_hotkey), Some(weight_commits) ); }); diff --git a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs index 349c28903a..c7baa55387 100644 --- a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs +++ b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs @@ -5,7 +5,7 @@ use codec::Encode; use frame_support::weights::Weight; use frame_support::{assert_err, assert_noop, assert_ok}; use frame_system::{Config, RawOrigin}; -use subtensor_runtime_common::{AlphaCurrency, Currency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUidStorageIndex, TaoCurrency}; use super::mock::*; use crate::*; @@ -343,7 +343,11 @@ fn test_swap_weight_commits() { SubtensorModule::add_balance_to_coldkey_account(&coldkey, u64::MAX); IsNetworkMember::::insert(old_hotkey, netuid, true); - WeightCommits::::insert(netuid, old_hotkey, weight_commits.clone()); + WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + old_hotkey, + weight_commits.clone(), + ); System::set_block_number(System::block_number() + HotkeySwapOnSubnetInterval::get()); assert_ok!(SubtensorModule::do_swap_hotkey( @@ -353,9 +357,12 @@ fn test_swap_weight_commits() { Some(netuid) )); - assert!(!WeightCommits::::contains_key(netuid, old_hotkey)); + assert!(!WeightCommits::::contains_key( + NetUidStorageIndex::from(netuid), + old_hotkey + )); assert_eq!( - WeightCommits::::get(netuid, new_hotkey), + WeightCommits::::get(NetUidStorageIndex::from(netuid), new_hotkey), Some(weight_commits) ); }); diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index bca6945b44..4317337ffd 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -5,7 +5,7 @@ use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::{H160, U256}; -use subtensor_runtime_common::AlphaCurrency; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex}; /******************************************** tests for uids.rs file @@ -63,13 +63,13 @@ fn test_replace_neuron() { Consensus::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); - Incentive::::mutate(netuid, |v| { + Incentive::::mutate(NetUidStorageIndex::from(netuid), |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); Dividends::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); - Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + Bonds::::insert(NetUidStorageIndex::from(netuid), neuron_uid, vec![(0, 1)]); // serve axon mock address let ip: u128 = 1676056785; @@ -130,7 +130,7 @@ fn test_replace_neuron() { 0 ); assert_eq!( - SubtensorModule::get_incentive_for_uid(netuid, neuron_uid), + SubtensorModule::get_incentive_for_uid(netuid.into(), neuron_uid), 0 ); assert_eq!( @@ -145,7 +145,10 @@ fn test_replace_neuron() { assert_eq!(axon_info.ip_type, 0); // Check bonds are cleared. - assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + assert_eq!( + Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), + vec![] + ); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } @@ -189,7 +192,7 @@ fn test_bonds_cleared_on_replace() { let neuron_uid = neuron_uid.unwrap(); AssociatedEvmAddress::::insert(netuid, neuron_uid, (evm_address, 1)); // set non-default bonds - Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + Bonds::::insert(NetUidStorageIndex::from(netuid), neuron_uid, vec![(0, 1)]); // Replace the neuron. SubtensorModule::replace_neuron(netuid, neuron_uid, &new_hotkey_account_id, block_number); @@ -214,7 +217,10 @@ fn test_bonds_cleared_on_replace() { assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); // Check bonds are cleared. - assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + assert_eq!( + Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), + vec![] + ); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index d25a3eb34f..bc9af5cf07 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -21,7 +21,7 @@ use sp_runtime::{ }; use sp_std::collections::vec_deque::VecDeque; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::TaoCurrency; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use tle::{ curves::drand::TinyBLS381, @@ -340,8 +340,14 @@ fn test_reveal_weights_validate() { version_key, }); - let commit_hash: H256 = - SubtensorModule::get_commit_hash(&who, netuid, &dests, &weights, &salt, version_key); + let commit_hash: H256 = SubtensorModule::get_commit_hash( + &who, + NetUidStorageIndex::from(netuid), + &dests, + &weights, + &salt, + version_key, + ); let commit_block = SubtensorModule::get_current_block_as_u64(); let (first_reveal_block, last_reveal_block) = SubtensorModule::get_reveal_blocks(netuid, commit_block); @@ -412,7 +418,7 @@ fn test_reveal_weights_validate() { ); // Add the commit to the hotkey - WeightCommits::::mutate(netuid, hotkey, |maybe_commits| { + WeightCommits::::mutate(NetUidStorageIndex::from(netuid), hotkey, |maybe_commits| { let mut commits: VecDeque<(H256, u64, u64, u64)> = maybe_commits.take().unwrap_or_default(); commits.push_back(( @@ -1377,7 +1383,7 @@ fn test_set_weights_sum_larger_than_u16_max() { assert_ok!(result); // Get max-upscaled unnormalized weights. - let all_weights: Vec> = SubtensorModule::get_weights(netuid); + let all_weights: Vec> = SubtensorModule::get_weights(netuid.into()); let weights_set: &[I32F32] = &all_weights[neuron_uid as usize]; assert_eq!(weights_set[0], I32F32::from_num(u16::MAX)); assert_eq!(weights_set[1], I32F32::from_num(u16::MAX)); @@ -2535,8 +2541,9 @@ fn test_commit_reveal_multiple_commits() { )); // Check that commits before the revealed one are removed - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey) - .expect("expected 8 remaining commits"); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("expected 8 remaining commits"); assert_eq!(remaining_commits.len(), 8); // 10 commits - 2 removed (index 0 and 1) // 4. Reveal the last commit next @@ -2551,7 +2558,8 @@ fn test_commit_reveal_multiple_commits() { )); // Remaining commits should have removed up to index 9 - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(remaining_commits.is_none()); // All commits removed // After revealing all commits, attempt to commit again should now succeed @@ -2796,7 +2804,8 @@ fn test_commit_reveal_multiple_commits() { )); // Check that the first commit has been removed - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(remaining_commits.is_none()); // Attempting to reveal the first commit should fail as it was removed @@ -2956,7 +2965,8 @@ fn test_expired_commits_handling_in_commit_and_reveal() { // 6. Verify that the number of unrevealed, non-expired commits is now 6 let commits: VecDeque<(H256, u64, u64, u64)> = - crate::WeightCommits::::get(netuid, hotkey).expect("Expected a commit"); + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("Expected a commit"); assert_eq!(commits.len(), 6); // 5 non-expired commits from epoch 1 + new commit // 7. Attempt to reveal an expired commit (from epoch 0) @@ -3002,7 +3012,7 @@ fn test_expired_commits_handling_in_commit_and_reveal() { )); // 10. Verify that all commits have been revealed and the queue is empty - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); // 11. Attempt to reveal again, should fail with NoWeightsCommitFound @@ -3193,7 +3203,7 @@ fn test_reveal_at_exact_epoch() { Error::::ExpiredWeightCommit ); - crate::WeightCommits::::remove(netuid, hotkey); + crate::WeightCommits::::remove(NetUidStorageIndex::from(netuid), hotkey); } }); } @@ -3471,7 +3481,8 @@ fn test_commit_reveal_order_enforcement() { // Check that commits A and B are removed let remaining_commits = - crate::WeightCommits::::get(netuid, hotkey).expect("expected 1 remaining commit"); + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("expected 1 remaining commit"); assert_eq!(remaining_commits.len(), 1); // Only commit C should remain // Attempt to reveal C (index 2), should succeed @@ -3652,7 +3663,7 @@ fn test_reveal_at_exact_block() { ); // Clean up for next iteration - crate::WeightCommits::::remove(netuid, hotkey); + crate::WeightCommits::::remove(NetUidStorageIndex::from(netuid), hotkey); } }); } @@ -3730,7 +3741,7 @@ fn test_successful_batch_reveal() { )); // 4. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -3831,8 +3842,8 @@ fn test_batch_reveal_with_expired_commits() { assert_err!(result, Error::::ExpiredWeightCommit); // 5. Expired commit is not removed until a successful call - let commits = - crate::WeightCommits::::get(netuid, hotkey).expect("Expected remaining commits"); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("Expected remaining commits"); assert_eq!(commits.len(), 3); // 6. Try revealing the remaining commits @@ -3851,7 +3862,7 @@ fn test_batch_reveal_with_expired_commits() { )); // 7. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -4258,7 +4269,7 @@ fn test_batch_reveal_with_out_of_order_commits() { )); // 6. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -4322,7 +4333,7 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { for i in 0..commits_per_hotkey { for hotkey in &hotkeys { - let current_commits = crate::WeightCommits::::get(netuid, hotkey) + let current_commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) .unwrap_or_default(); if current_commits.len() >= max_unrevealed_commits { continue; @@ -4671,7 +4682,7 @@ fn test_get_reveal_blocks() { assert_err!(result, Error::::NoWeightsCommitFound); // **15. Verify that All Commits Have Been Removed from Storage** - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!( commits.is_none(), "Commits should be cleared after successful reveal" @@ -4727,7 +4738,7 @@ fn test_commit_weights_rate_limit() { let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("expected uid"); - SubtensorModule::set_last_update_for_uid(netuid, neuron_uid, 0); + SubtensorModule::set_last_update_for_uid(NetUidStorageIndex::from(netuid), neuron_uid, 0); assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), @@ -4984,7 +4995,7 @@ fn test_reveal_crv3_commits_success() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -5106,7 +5117,7 @@ fn test_reveal_crv3_commits_cannot_reveal_after_reveal_epoch() { step_epochs(3, netuid); // Verify that weights are not set - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse .get(neuron_uid1 as usize) .cloned() @@ -5141,7 +5152,7 @@ fn test_reveal_crv3_commits_cannot_reveal_after_reveal_epoch() { assert_ok!(SubtensorModule::reveal_crv3_commits(netuid)); // Verify that the weights for the neuron have not been set - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse .get(neuron_uid1 as usize) .cloned() @@ -5181,7 +5192,8 @@ fn test_do_commit_crv3_weights_success() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); + let commits = + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), cur_epoch); assert_eq!(commits.len(), 1); assert_eq!(commits[0].0, hotkey); assert_eq!(commits[0].2, commit_data); @@ -5264,7 +5276,7 @@ fn test_do_commit_crv3_weights_committing_too_fast() { SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("Expected uid"); - SubtensorModule::set_last_update_for_uid(netuid, neuron_uid, 0); + SubtensorModule::set_last_update_for_uid(NetUidStorageIndex::from(netuid), neuron_uid, 0); assert_ok!(SubtensorModule::do_commit_timelocked_weights( RuntimeOrigin::signed(hotkey), @@ -5478,7 +5490,7 @@ fn test_reveal_crv3_commits_decryption_failure() { let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_matrix = SubtensorModule::get_weights(netuid); + let weights_matrix = SubtensorModule::get_weights(netuid.into()); let weights = weights_matrix.get(neuron_uid).cloned().unwrap_or_default(); assert!(weights.iter().all(|&w| w == I32F32::from_num(0))); }); @@ -5591,7 +5603,7 @@ fn test_reveal_crv3_commits_multiple_commits_some_fail_some_succeed() { // Verify that weights are set for hotkey1 let neuron_uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1) .expect("Failed to get neuron UID for hotkey1") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights1 = weights_sparse.get(neuron_uid1).cloned().unwrap_or_default(); assert!( !weights1.is_empty(), @@ -5686,7 +5698,7 @@ fn test_reveal_crv3_commits_do_set_weights_failure() { // Verify that weights are not set due to `do_set_weights` failure let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5764,7 +5776,7 @@ fn test_reveal_crv3_commits_payload_decoding_failure() { // Verify that weights are not set let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5846,7 +5858,7 @@ fn test_reveal_crv3_commits_signature_deserialization_failure() { // Verify that weights are not set let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5911,7 +5923,7 @@ fn test_reveal_crv3_commits_with_empty_commit_queue() { step_epochs(2, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); assert!( weights_sparse.is_empty(), "Weights should be empty as there were no commits to reveal" @@ -5998,7 +6010,7 @@ fn test_reveal_crv3_commits_with_incorrect_identity_message() { // Verify that weights are not set due to decryption failure let neuron_uid = neuron_uid as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -6036,7 +6048,8 @@ fn test_multiple_commits_by_same_hotkey_within_limit() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); + let commits = + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), cur_epoch); assert_eq!( commits.len(), 10, @@ -6071,7 +6084,7 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { let bounded_commit = vec![epoch as u8; 5].try_into().expect("bounded vec"); assert_ok!(TimelockedWeightCommits::::try_mutate( - netuid, + NetUidStorageIndex::from(netuid), epoch, |q| -> DispatchResult { q.push_back((hotkey, cur_block, bounded_commit, reveal_round)); @@ -6081,8 +6094,14 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { } // Sanity – both epochs presently hold a commit. - assert!(!TimelockedWeightCommits::::get(netuid, past_epoch).is_empty()); - assert!(!TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty()); + assert!( + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), past_epoch) + .is_empty() + ); + assert!( + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), reveal_epoch) + .is_empty() + ); // --------------------------------------------------------------------- // Run the reveal pass WITHOUT a pulse – only expiry housekeeping runs. @@ -6091,13 +6110,15 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { // past_epoch (< reveal_epoch) must be gone assert!( - TimelockedWeightCommits::::get(netuid, past_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), past_epoch) + .is_empty(), "expired epoch {past_epoch} should be cleared" ); // reveal_epoch queue is *kept* because its commit could still be revealed later. assert!( - !TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty(), + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), reveal_epoch) + .is_empty(), "reveal-epoch {reveal_epoch} must be retained until commit can be revealed" ); }); @@ -6208,7 +6229,7 @@ fn test_reveal_crv3_commits_multiple_valid_commits_all_processed() { step_epochs(2, netuid); // ───── assertions ─────────────────────────────────────────────────── - let w_sparse = SubtensorModule::get_weights_sparse(netuid); + let w_sparse = SubtensorModule::get_weights_sparse(netuid.into()); for hk in hotkeys { let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk).unwrap() as usize; assert!( @@ -6323,7 +6344,7 @@ fn test_reveal_crv3_commits_max_neurons() { step_epochs(2, netuid); // ───── verify weights ─────────────────────────────────────────────── - let w_sparse = SubtensorModule::get_weights_sparse(netuid); + let w_sparse = SubtensorModule::get_weights_sparse(netuid.into()); for hk in &committing_hotkeys { let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, hk).unwrap() as usize; assert!( @@ -6553,7 +6574,7 @@ fn test_reveal_crv3_commits_hotkey_check() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -6670,7 +6691,7 @@ fn test_reveal_crv3_commits_hotkey_check() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -6773,10 +6794,11 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { )); // epoch in which commit was stored - let stored_epoch = TimelockedWeightCommits::::iter_prefix(netuid) - .next() - .map(|(e, _)| e) - .expect("commit stored"); + let stored_epoch = + TimelockedWeightCommits::::iter_prefix(NetUidStorageIndex::from(netuid)) + .next() + .map(|(e, _)| e) + .expect("commit stored"); // first block of reveal epoch (commit_epoch + RP) let first_reveal_epoch = stored_epoch + SubtensorModule::get_reveal_period(netuid); @@ -6787,7 +6809,8 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { // run *one* block inside reveal epoch without pulse → commit should stay queued step_block(1); assert!( - !TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), stored_epoch) + .is_empty(), "commit must remain queued when pulse is missing" ); @@ -6808,14 +6831,15 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { step_block(1); // automatic reveal runs here - let weights = SubtensorModule::get_weights_sparse(netuid) + let weights = SubtensorModule::get_weights_sparse(netuid.into()) .get(uid as usize) .cloned() .unwrap_or_default(); assert!(!weights.is_empty(), "weights must be set after pulse"); assert!( - TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), stored_epoch) + .is_empty(), "queue should be empty after successful reveal" ); }); @@ -6943,7 +6967,7 @@ fn test_reveal_crv3_commits_legacy_payload_success() { // ───────────────────────────────────── // 5 ▸ assertions // ───────────────────────────────────── - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let w1 = weights_sparse .get(uid1 as usize) .cloned() @@ -6958,7 +6982,8 @@ fn test_reveal_crv3_commits_legacy_payload_success() { // commit should be gone assert!( - TimelockedWeightCommits::::get(netuid, commit_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), commit_epoch) + .is_empty(), "commit storage should be cleaned after reveal" ); }); diff --git a/pallets/subtensor/src/transaction_extension.rs b/pallets/subtensor/src/transaction_extension.rs index 064a242f91..42e45b5fd4 100644 --- a/pallets/subtensor/src/transaction_extension.rs +++ b/pallets/subtensor/src/transaction_extension.rs @@ -17,7 +17,7 @@ use sp_runtime::transaction_validity::{ use sp_std::marker::PhantomData; use sp_std::vec::Vec; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; #[freeze_struct("2e02eb32e5cb25d3")] #[derive(Default, Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] @@ -53,7 +53,7 @@ where if let Err(err) = result { Err(match err { Error::::AmountTooLow => CustomTransactionError::StakeAmountTooLow.into(), - Error::::SubnetNotExists => CustomTransactionError::SubnetDoesntExist.into(), + Error::::SubnetNotExists => CustomTransactionError::SubnetNotExists.into(), Error::::NotEnoughBalanceToStake => CustomTransactionError::BalanceTooLow.into(), Error::::HotKeyAccountNotExists => { CustomTransactionError::HotkeyAccountDoesntExist.into() @@ -149,7 +149,7 @@ where if Self::check_weights_min_stake(who, *netuid) { let provided_hash = Pallet::::get_commit_hash( who, - *netuid, + NetUidStorageIndex::from(*netuid), uids, values, salt, @@ -186,7 +186,7 @@ where .map(|i| { Pallet::::get_commit_hash( who, - *netuid, + NetUidStorageIndex::from(*netuid), uids_list.get(i).unwrap_or(&Vec::new()), values_list.get(i).unwrap_or(&Vec::new()), salts_list.get(i).unwrap_or(&Vec::new()), @@ -279,7 +279,7 @@ where .map(|validity| (validity, Some(who.clone()), origin.clone())) } Some(Call::register_network { .. }) => { - if !Pallet::::passes_rate_limit(&TransactionType::RegisterNetwork, who) { + if !TransactionType::RegisterNetwork.passes_rate_limit::(who) { return Err(CustomTransactionError::RateLimitExceeded.into()); } diff --git a/pallets/subtensor/src/utils/evm.rs b/pallets/subtensor/src/utils/evm.rs index 652fb8ea27..757c2ef91e 100644 --- a/pallets/subtensor/src/utils/evm.rs +++ b/pallets/subtensor/src/utils/evm.rs @@ -24,11 +24,12 @@ impl Pallet { /// Associate an EVM key with a hotkey. /// - /// This function accepts a Signature, which is a signed message containing the hotkey concatenated with - /// the hashed block number. It will then attempt to recover the EVM key from the signature and compare it - /// with the `evm_key` parameter, and ensures that they match. + /// This function accepts a Signature, which is a signed message containing the hotkey + /// concatenated with the hashed block number. It will then attempt to recover the EVM key from + /// the signature and compare it with the `evm_key` parameter, and ensures that they match. /// - /// The EVM key is expected to sign the message according to this formula to produce the signature: + /// The EVM key is expected to sign the message according to this formula to produce the + /// signature: /// ```text /// keccak_256(hotkey ++ keccak_256(block_number)) /// ``` @@ -40,15 +41,22 @@ impl Pallet { /// * `hotkey` - The hotkey associated with the `origin` coldkey. /// * `evm_key` - The EVM address to associate with the `hotkey`. /// * `block_number` - The block number used in the `signature`. - /// * `signature` - A signed message by the `evm_key` containing the `hotkey` and the hashed `block_number`. + /// * `signature` - A signed message by the `evm_key` containing the `hotkey` and the hashed + /// `block_number`. pub fn do_associate_evm_key( origin: T::RuntimeOrigin, netuid: NetUid, + hotkey: T::AccountId, evm_key: H160, block_number: u64, mut signature: Signature, ) -> dispatch::DispatchResult { - let hotkey = ensure_signed(origin)?; + let coldkey = ensure_signed(origin)?; + + ensure!( + Self::get_owning_coldkey_for_hotkey(&hotkey) == coldkey, + Error::::NonAssociatedColdKey + ); // Normalize the v value to 0 or 1 if signature.0[64] >= 27 { diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index f64962f094..a4d4755e5d 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -1,38 +1,140 @@ use super::*; -use crate::{ - Error, - system::{ensure_root, ensure_signed, ensure_signed_or_root, pallet_prelude::BlockNumberFor}, +use crate::Error; +use crate::system::{ + ensure_root, ensure_signed, ensure_signed_or_root, pallet_prelude::BlockNumberFor, }; use safe_math::*; use sp_core::Get; use sp_core::U256; use sp_runtime::Saturating; use substrate_fixed::types::{I32F32, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; impl Pallet { pub fn ensure_subnet_owner_or_root( o: T::RuntimeOrigin, netuid: NetUid, - ) -> Result<(), DispatchError> { + ) -> Result, DispatchError> { let coldkey = ensure_signed_or_root(o); match coldkey { - Ok(Some(who)) if SubnetOwner::::get(netuid) == who => Ok(()), + Ok(Some(who)) if SubnetOwner::::get(netuid) == who => Ok(Some(who)), Ok(Some(_)) => Err(DispatchError::BadOrigin), - Ok(None) => Ok(()), + Ok(None) => Ok(None), Err(x) => Err(x.into()), } } - pub fn ensure_subnet_owner(o: T::RuntimeOrigin, netuid: NetUid) -> Result<(), DispatchError> { + pub fn ensure_subnet_owner( + o: T::RuntimeOrigin, + netuid: NetUid, + ) -> Result { let coldkey = ensure_signed(o); match coldkey { - Ok(who) if SubnetOwner::::get(netuid) == who => Ok(()), + Ok(who) if SubnetOwner::::get(netuid) == who => Ok(who), Ok(_) => Err(DispatchError::BadOrigin), Err(x) => Err(x.into()), } } + /// Like `ensure_root` but also prohibits calls during the last N blocks of the tempo. + pub fn ensure_root_with_rate_limit( + o: T::RuntimeOrigin, + netuid: NetUid, + ) -> Result<(), DispatchError> { + ensure_root(o)?; + let now = Self::get_current_block_as_u64(); + Self::ensure_not_in_admin_freeze_window(netuid, now)?; + Ok(()) + } + + /// Ensure owner-or-root with a set of TransactionType rate checks (owner only). + /// - Root: only freeze window is enforced; no TransactionType checks. + /// - Owner (Signed): freeze window plus all rate checks in `limits` using signer extracted from + /// origin. + pub fn ensure_sn_owner_or_root_with_limits( + o: T::RuntimeOrigin, + netuid: NetUid, + limits: &[crate::utils::rate_limiting::TransactionType], + ) -> Result, DispatchError> { + let maybe_who = Self::ensure_subnet_owner_or_root(o, netuid)?; + let now = Self::get_current_block_as_u64(); + Self::ensure_not_in_admin_freeze_window(netuid, now)?; + if let Some(who) = maybe_who.as_ref() { + for tx in limits.iter() { + ensure!( + tx.passes_rate_limit_on_subnet::(who, netuid), + Error::::TxRateLimitExceeded + ); + } + } + Ok(maybe_who) + } + + /// Ensure the caller is the subnet owner and passes all provided rate limits. + /// This does NOT allow root; it is strictly owner-only. + /// Returns the signer (owner) on success so callers may record last-blocks. + pub fn ensure_sn_owner_with_limits( + o: T::RuntimeOrigin, + netuid: NetUid, + limits: &[crate::utils::rate_limiting::TransactionType], + ) -> Result { + let who = Self::ensure_subnet_owner(o, netuid)?; + let now = Self::get_current_block_as_u64(); + Self::ensure_not_in_admin_freeze_window(netuid, now)?; + for tx in limits.iter() { + ensure!( + tx.passes_rate_limit_on_subnet::(&who, netuid), + Error::::TxRateLimitExceeded + ); + } + Ok(who) + } + + /// Returns true if the current block is within the terminal freeze window of the tempo for the + /// given subnet. During this window, admin ops are prohibited to avoid interference with + /// validator weight submissions. + pub fn is_in_admin_freeze_window(netuid: NetUid, current_block: u64) -> bool { + let tempo = Self::get_tempo(netuid); + if tempo == 0 { + return false; + } + let remaining = Self::blocks_until_next_epoch(netuid, tempo, current_block); + let window = AdminFreezeWindow::::get() as u64; + remaining < window + } + + fn ensure_not_in_admin_freeze_window(netuid: NetUid, now: u64) -> Result<(), DispatchError> { + ensure!( + !Self::is_in_admin_freeze_window(netuid, now), + Error::::AdminActionProhibitedDuringWeightsWindow + ); + Ok(()) + } + + pub fn set_admin_freeze_window(window: u16) { + AdminFreezeWindow::::set(window); + Self::deposit_event(Event::AdminFreezeWindowSet(window)); + } + + pub fn set_owner_hyperparam_rate_limit(epochs: u16) { + OwnerHyperparamRateLimit::::set(epochs); + Self::deposit_event(Event::OwnerHyperparamRateLimitSet(epochs)); + } + + /// If owner is `Some`, record last-blocks for the provided `TransactionType`s. + pub fn record_owner_rl( + maybe_owner: Option<::AccountId>, + netuid: NetUid, + txs: &[TransactionType], + ) { + if let Some(who) = maybe_owner { + let now = Self::get_current_block_as_u64(); + for tx in txs { + tx.set_last_block_on_subnet::(&who, netuid, now); + } + } + } + // ======================== // ==== Global Setters ==== // ======================== @@ -98,14 +200,22 @@ impl Pallet { pub fn get_consensus(netuid: NetUid) -> Vec { Consensus::::get(netuid) } - pub fn get_incentive(netuid: NetUid) -> Vec { + pub fn get_incentive(netuid: NetUidStorageIndex) -> Vec { Incentive::::get(netuid) } pub fn get_dividends(netuid: NetUid) -> Vec { Dividends::::get(netuid) } - pub fn get_last_update(netuid: NetUid) -> Vec { - LastUpdate::::get(netuid) + /// Fetch LastUpdate for `netuid` and ensure its length is at least `get_subnetwork_n(netuid)`, + /// padding with zeros if needed. Returns the (possibly padded) vector. + pub fn get_last_update(netuid_index: NetUidStorageIndex) -> Vec { + let netuid = Self::get_netuid(netuid_index); + let target_len = Self::get_subnetwork_n(netuid) as usize; + let mut v = LastUpdate::::get(netuid_index); + if v.len() < target_len { + v.resize(target_len, 0); + } + v } pub fn get_pruning_score(netuid: NetUid) -> Vec { PruningScores::::get(netuid) @@ -120,7 +230,7 @@ impl Pallet { // ================================== // ==== YumaConsensus UID params ==== // ================================== - pub fn set_last_update_for_uid(netuid: NetUid, uid: u16, last_update: u64) { + pub fn set_last_update_for_uid(netuid: NetUidStorageIndex, uid: u16, last_update: u64) { let mut updated_last_update_vec = Self::get_last_update(netuid); let Some(updated_last_update) = updated_last_update_vec.get_mut(uid as usize) else { return; @@ -143,12 +253,19 @@ impl Pallet { SubnetworkN::::get(netuid) ); log::debug!("uid = {uid:?}"); - assert!(uid < SubnetworkN::::get(netuid)); - PruningScores::::mutate(netuid, |v| { - if let Some(s) = v.get_mut(uid as usize) { - *s = pruning_score; - } - }); + if uid < SubnetworkN::::get(netuid) { + PruningScores::::mutate(netuid, |v| { + if let Some(s) = v.get_mut(uid as usize) { + *s = pruning_score; + } + }); + } else { + log::error!( + "set_pruning_score_for_uid: uid >= SubnetworkN::::get(netuid): {:?} >= {:?}", + uid, + SubnetworkN::::get(netuid) + ); + } } pub fn set_validator_permit_for_uid(netuid: NetUid, uid: u16, validator_permit: bool) { let mut updated_validator_permits = Self::get_validator_permit(netuid); @@ -183,7 +300,7 @@ impl Pallet { let vec = Consensus::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } - pub fn get_incentive_for_uid(netuid: NetUid, uid: u16) -> u16 { + pub fn get_incentive_for_uid(netuid: NetUidStorageIndex, uid: u16) -> u16 { let vec = Incentive::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } @@ -191,7 +308,7 @@ impl Pallet { let vec = Dividends::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } - pub fn get_last_update_for_uid(netuid: NetUid, uid: u16) -> u64 { + pub fn get_last_update_for_uid(netuid: NetUidStorageIndex, uid: u16) -> u64 { let vec = LastUpdate::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } @@ -270,7 +387,7 @@ impl Pallet { // ======================== // === Token Management === // ======================== - pub fn burn_tokens(amount: TaoCurrency) { + pub fn recycle_tao(amount: TaoCurrency) { TotalIssuance::::put(TotalIssuance::::get().saturating_sub(amount)); } pub fn increase_issuance(amount: TaoCurrency) { @@ -291,6 +408,10 @@ impl Pallet { total_subnet_locked.into() } + pub fn set_recycle_or_burn(netuid: NetUid, recycle_or_burn: RecycleOrBurnEnum) { + RecycleOrBurn::::insert(netuid, recycle_or_burn); + } + // ======================== // ========= Sudo ========= // ======================== @@ -459,6 +580,14 @@ impl Pallet { Self::deposit_event(Event::MinAllowedWeightSet(netuid, min_allowed_weights)); } + pub fn get_min_allowed_uids(netuid: NetUid) -> u16 { + MinAllowedUids::::get(netuid) + } + pub fn set_min_allowed_uids(netuid: NetUid, min_allowed: u16) { + MinAllowedUids::::insert(netuid, min_allowed); + Self::deposit_event(Event::MinAllowedUidsSet(netuid, min_allowed)); + } + pub fn get_max_allowed_uids(netuid: NetUid) -> u16 { MaxAllowedUids::::get(netuid) } @@ -829,4 +958,19 @@ impl Pallet { ImmuneOwnerUidsLimit::::insert(netuid, limit); Ok(()) } + + /// Fetches the max number of subnet + /// + /// # Returns: + /// * 'u16': The max number of subnet + /// + pub fn get_max_subnets() -> u16 { + SubnetLimit::::get() + } + + /// Sets the max number of subnet + pub fn set_max_subnets(limit: u16) { + SubnetLimit::::put(limit); + Self::deposit_event(Event::SubnetLimitSet(limit)); + } } diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index de75086ea1..e7028bc4e3 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -4,6 +4,7 @@ use super::*; /// Enum representing different types of transactions #[derive(Copy, Clone)] +#[non_exhaustive] pub enum TransactionType { SetChildren, SetChildkeyTake, @@ -11,132 +12,197 @@ pub enum TransactionType { RegisterNetwork, SetWeightsVersionKey, SetSNOwnerHotkey, + OwnerHyperparamUpdate(Hyperparameter), + MechanismCountUpdate, + MechanismEmission, + MaxUidsTrimming, } -/// Implement conversion from TransactionType to u16 -impl From for u16 { - fn from(tx_type: TransactionType) -> Self { - match tx_type { - TransactionType::SetChildren => 0, - TransactionType::SetChildkeyTake => 1, - TransactionType::Unknown => 2, - TransactionType::RegisterNetwork => 3, - TransactionType::SetWeightsVersionKey => 4, - TransactionType::SetSNOwnerHotkey => 5, - } - } -} - -/// Implement conversion from u16 to TransactionType -impl From for TransactionType { - fn from(value: u16) -> Self { - match value { - 0 => TransactionType::SetChildren, - 1 => TransactionType::SetChildkeyTake, - 3 => TransactionType::RegisterNetwork, - 4 => TransactionType::SetWeightsVersionKey, - 5 => TransactionType::SetSNOwnerHotkey, - _ => TransactionType::Unknown, - } - } -} -impl Pallet { - // ======================== - // ==== Rate Limiting ===== - // ======================== +impl TransactionType { /// Get the rate limit for a specific transaction type - pub fn get_rate_limit(tx_type: &TransactionType) -> u64 { - match tx_type { - TransactionType::SetChildren => 150, // 30 minutes - TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), - TransactionType::RegisterNetwork => NetworkRateLimit::::get(), - - TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) + pub fn rate_limit(&self) -> u64 { + match self { + Self::SetChildren => 150, // 30 minutes + Self::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), + Self::RegisterNetwork => NetworkRateLimit::::get(), + Self::MechanismCountUpdate => MechanismCountSetRateLimit::::get(), + Self::MechanismEmission => MechanismEmissionRateLimit::::get(), + Self::MaxUidsTrimming => MaxUidsTrimmingRateLimit::::get(), + Self::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, } } - pub fn get_rate_limit_on_subnet(tx_type: &TransactionType, netuid: NetUid) -> u64 { + pub fn rate_limit_on_subnet(&self, netuid: NetUid) -> u64 { #[allow(clippy::match_single_binding)] - match tx_type { - TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) + match self { + Self::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) .saturating_mul(WeightsVersionKeyRateLimit::::get()), - TransactionType::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), + // Owner hyperparameter updates are rate-limited by N tempos on the subnet (sudo configurable) + Self::OwnerHyperparamUpdate(_) => { + let epochs = OwnerHyperparamRateLimit::::get() as u64; + (Tempo::::get(netuid) as u64).saturating_mul(epochs) + } + Self::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), - _ => Self::get_rate_limit(tx_type), + _ => self.rate_limit::(), } } + pub fn passes_rate_limit(&self, key: &T::AccountId) -> bool { + let block = Pallet::::get_current_block_as_u64(); + let limit = self.rate_limit::(); + let last_block = self.last_block::(key); + + Self::check_passes_rate_limit(limit, block, last_block) + } + pub fn check_passes_rate_limit(limit: u64, block: u64, last_block: u64) -> bool { // Allow the first transaction (when last_block is 0) or if the rate limit has passed last_block == 0 || block.saturating_sub(last_block) >= limit } - pub fn passes_rate_limit(tx_type: &TransactionType, key: &T::AccountId) -> bool { - let block: u64 = Self::get_current_block_as_u64(); - let limit: u64 = Self::get_rate_limit(tx_type); - let last_block: u64 = Self::get_last_transaction_block(key, tx_type); - - Self::check_passes_rate_limit(limit, block, last_block) - } - /// Check if a transaction should be rate limited on a specific subnet - pub fn passes_rate_limit_on_subnet( - tx_type: &TransactionType, + pub fn passes_rate_limit_on_subnet( + &self, hotkey: &T::AccountId, netuid: NetUid, ) -> bool { - let block: u64 = Self::get_current_block_as_u64(); - let limit: u64 = Self::get_rate_limit_on_subnet(tx_type, netuid); - let last_block: u64 = Self::get_last_transaction_block_on_subnet(hotkey, netuid, tx_type); + let block = Pallet::::get_current_block_as_u64(); + let limit = self.rate_limit_on_subnet::(netuid); + let last_block = self.last_block_on_subnet::(hotkey, netuid); Self::check_passes_rate_limit(limit, block, last_block) } /// Get the block number of the last transaction for a specific key, and transaction type - pub fn get_last_transaction_block(key: &T::AccountId, tx_type: &TransactionType) -> u64 { - match tx_type { - TransactionType::RegisterNetwork => Self::get_network_last_lock_block(), - _ => Self::get_last_transaction_block_on_subnet(key, NetUid::ROOT, tx_type), + pub fn last_block(&self, key: &T::AccountId) -> u64 { + match self { + Self::RegisterNetwork => Pallet::::get_network_last_lock_block(), + _ => self.last_block_on_subnet::(key, NetUid::ROOT), } } - /// Get the block number of the last transaction for a specific hotkey, network, and transaction type - pub fn get_last_transaction_block_on_subnet( - hotkey: &T::AccountId, - netuid: NetUid, - tx_type: &TransactionType, - ) -> u64 { - match tx_type { - TransactionType::RegisterNetwork => Self::get_network_last_lock_block(), - TransactionType::SetSNOwnerHotkey => { - Self::get_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid)) + /// Get the block number of the last transaction for a specific hotkey, network, and transaction + /// type + pub fn last_block_on_subnet(&self, hotkey: &T::AccountId, netuid: NetUid) -> u64 { + match self { + Self::RegisterNetwork => Pallet::::get_network_last_lock_block(), + Self::SetSNOwnerHotkey => { + Pallet::::get_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid)) } + Self::OwnerHyperparamUpdate(hparam) => Pallet::::get_rate_limited_last_block( + &RateLimitKey::OwnerHyperparamUpdate(netuid, *hparam), + ), _ => { - let tx_as_u16: u16 = (*tx_type).into(); - TransactionKeyLastBlock::::get((hotkey, netuid, tx_as_u16)) + let tx_type: u16 = (*self).into(); + TransactionKeyLastBlock::::get((hotkey, netuid, tx_type)) } } } - /// Set the block number of the last transaction for a specific hotkey, network, and transaction type - pub fn set_last_transaction_block_on_subnet( + /// Set the block number of the last transaction for a specific hotkey, network, and transaction + /// type + pub fn set_last_block_on_subnet( + &self, key: &T::AccountId, netuid: NetUid, - tx_type: &TransactionType, block: u64, ) { - match tx_type { - TransactionType::RegisterNetwork => Self::set_network_last_lock_block(block), - TransactionType::SetSNOwnerHotkey => { - Self::set_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid), block) - } + match self { + Self::RegisterNetwork => Pallet::::set_network_last_lock_block(block), + Self::SetSNOwnerHotkey => Pallet::::set_rate_limited_last_block( + &RateLimitKey::SetSNOwnerHotkey(netuid), + block, + ), + Self::OwnerHyperparamUpdate(hparam) => Pallet::::set_rate_limited_last_block( + &RateLimitKey::OwnerHyperparamUpdate(netuid, *hparam), + block, + ), _ => { - let tx_as_u16: u16 = (*tx_type).into(); - TransactionKeyLastBlock::::insert((key, netuid, tx_as_u16), block); + let tx_type: u16 = (*self).into(); + TransactionKeyLastBlock::::insert((key, netuid, tx_type), block); } } } +} + +/// Implement conversion from TransactionType to u16 +impl From for u16 { + fn from(tx_type: TransactionType) -> Self { + match tx_type { + TransactionType::SetChildren => 0, + TransactionType::SetChildkeyTake => 1, + TransactionType::Unknown => 2, + TransactionType::RegisterNetwork => 3, + TransactionType::SetWeightsVersionKey => 4, + TransactionType::SetSNOwnerHotkey => 5, + TransactionType::OwnerHyperparamUpdate(_) => 6, + TransactionType::MechanismCountUpdate => 7, + TransactionType::MechanismEmission => 8, + TransactionType::MaxUidsTrimming => 9, + } + } +} + +/// Implement conversion from u16 to TransactionType +impl From for TransactionType { + fn from(value: u16) -> Self { + match value { + 0 => TransactionType::SetChildren, + 1 => TransactionType::SetChildkeyTake, + 3 => TransactionType::RegisterNetwork, + 4 => TransactionType::SetWeightsVersionKey, + 5 => TransactionType::SetSNOwnerHotkey, + 6 => TransactionType::OwnerHyperparamUpdate(Hyperparameter::Unknown), + 7 => TransactionType::MechanismCountUpdate, + 8 => TransactionType::MechanismEmission, + 9 => TransactionType::MaxUidsTrimming, + _ => TransactionType::Unknown, + } + } +} + +impl From for TransactionType { + fn from(param: Hyperparameter) -> Self { + Self::OwnerHyperparamUpdate(param) + } +} + +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, Debug, TypeInfo)] +#[non_exhaustive] +pub enum Hyperparameter { + Unknown = 0, + ServingRateLimit = 1, + MaxDifficulty = 2, + AdjustmentAlpha = 3, + MaxWeightLimit = 4, + ImmunityPeriod = 5, + MinAllowedWeights = 6, + Kappa = 7, + Rho = 8, + ActivityCutoff = 9, + PowRegistrationAllowed = 10, + MinBurn = 11, + MaxBurn = 12, + BondsMovingAverage = 13, + BondsPenalty = 14, + CommitRevealEnabled = 15, + LiquidAlphaEnabled = 16, + AlphaValues = 17, + WeightCommitInterval = 18, + TransferEnabled = 19, + AlphaSigmoidSteepness = 20, + Yuma3Enabled = 21, + BondsResetEnabled = 22, + ImmuneNeuronLimit = 23, + RecycleOrBurn = 24, +} + +impl Pallet { + // ======================== + // ==== Rate Limiting ===== + // ======================== pub fn remove_last_tx_block(key: &T::AccountId) { Self::remove_rate_limited_last_block(&RateLimitKey::LastTxBlock(key.clone())) diff --git a/pallets/swap-interface/src/lib.rs b/pallets/swap-interface/src/lib.rs index a0b39e151f..4998bbe379 100644 --- a/pallets/swap-interface/src/lib.rs +++ b/pallets/swap-interface/src/lib.rs @@ -34,6 +34,9 @@ pub trait SwapHandler { alpha_delta: AlphaCurrency, ); fn is_user_liquidity_enabled(netuid: NetUid) -> bool; + fn dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult; + fn toggle_user_liquidity(netuid: NetUid, enabled: bool); + fn clear_protocol_liquidity(netuid: NetUid) -> DispatchResult; } #[derive(Debug, PartialEq)] diff --git a/pallets/swap/src/mock.rs b/pallets/swap/src/mock.rs index 7a07cc7007..c79cb95d32 100644 --- a/pallets/swap/src/mock.rs +++ b/pallets/swap/src/mock.rs @@ -11,7 +11,7 @@ use frame_support::{ use frame_system::{self as system}; use sp_core::H256; use sp_runtime::{ - BuildStorage, + BuildStorage, Vec, traits::{BlakeTwo256, IdentityLookup}, }; use subtensor_runtime_common::{AlphaCurrency, BalanceOps, NetUid, SubnetInfo, TaoCurrency}; @@ -120,6 +120,26 @@ impl SubnetInfo for MockLiquidityProvider { fn is_subtoken_enabled(netuid: NetUid) -> bool { netuid.inner() != SUBTOKEN_DISABLED_NETUID } + + fn get_validator_trust(netuid: NetUid) -> Vec { + match netuid.into() { + 123u16 => vec![4000, 3000, 2000, 1000], + WRAPPING_FEES_NETUID => vec![8000, 7000, 6000, 5000], + _ => vec![1000, 800, 600, 400], + } + } + + fn get_validator_permit(netuid: NetUid) -> Vec { + match netuid.into() { + 123u16 => vec![true, true, false, true], + WRAPPING_FEES_NETUID => vec![true, true, true, true], + _ => vec![true, true, true, true], + } + } + + fn hotkey_of_uid(_netuid: NetUid, uid: u16) -> Option { + Some(uid as AccountId) + } } pub struct MockBalanceOps; diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 69bf3eacbb..9a41283426 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -5,7 +5,7 @@ use frame_support::storage::{TransactionOutcome, transactional}; use frame_support::{ensure, pallet_prelude::DispatchError, traits::Get}; use safe_math::*; use sp_arithmetic::helpers_128bit; -use sp_runtime::traits::AccountIdConversion; +use sp_runtime::{DispatchResult, Vec, traits::AccountIdConversion}; use substrate_fixed::types::{I64F64, U64F64, U96F32}; use subtensor_runtime_common::{ AlphaCurrency, BalanceOps, Currency, NetUid, SubnetInfo, TaoCurrency, @@ -1212,6 +1212,199 @@ impl Pallet { pub fn protocol_account_id() -> T::AccountId { T::ProtocolId::get().into_account_truncating() } + + /// Dissolve all LPs and clean state. + pub fn do_dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { + if SwapV3Initialized::::get(netuid) { + // 1) Snapshot only *non‑protocol* positions: (owner, position_id). + struct CloseItem { + owner: A, + pos_id: PositionId, + } + let protocol_account = Self::protocol_account_id(); + + let mut to_close: sp_std::vec::Vec> = sp_std::vec::Vec::new(); + for ((owner, pos_id), _pos) in Positions::::iter_prefix((netuid,)) { + if owner != protocol_account { + to_close.push(CloseItem { owner, pos_id }); + } + } + + if to_close.is_empty() { + log::debug!( + "dissolve_all_lp: no user positions; netuid={netuid:?}, protocol liquidity untouched" + ); + return Ok(()); + } + + let mut user_refunded_tao = TaoCurrency::ZERO; + let mut user_staked_alpha = AlphaCurrency::ZERO; + + let trust: Vec = T::SubnetInfo::get_validator_trust(netuid.into()); + let permit: Vec = T::SubnetInfo::get_validator_permit(netuid.into()); + + // Helper: pick target validator uid, only among permitted validators, by highest trust. + let pick_target_uid = |trust: &Vec, permit: &Vec| -> Option { + let mut best_uid: Option = None; + let mut best_trust: u16 = 0; + for (i, (&t, &p)) in trust.iter().zip(permit.iter()).enumerate() { + if p && (best_uid.is_none() || t > best_trust) { + best_uid = Some(i); + best_trust = t; + } + } + best_uid.map(|i| i as u16) + }; + + for CloseItem { owner, pos_id } in to_close.into_iter() { + match Self::do_remove_liquidity(netuid, &owner, pos_id) { + Ok(rm) => { + // α withdrawn from the pool = principal + accrued fees + let alpha_total_from_pool: AlphaCurrency = + rm.alpha.saturating_add(rm.fee_alpha); + + // ---------------- USER: refund τ and convert α → stake ---------------- + + // 1) Refund τ principal directly. + if rm.tao > TaoCurrency::ZERO { + T::BalanceOps::increase_balance(&owner, rm.tao); + user_refunded_tao = user_refunded_tao.saturating_add(rm.tao); + T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); + } + + // 2) Stake ALL withdrawn α (principal + fees) to the best permitted validator. + if alpha_total_from_pool > AlphaCurrency::ZERO { + if let Some(target_uid) = pick_target_uid(&trust, &permit) { + let validator_hotkey: T::AccountId = + T::SubnetInfo::hotkey_of_uid(netuid.into(), target_uid).ok_or( + sp_runtime::DispatchError::Other( + "validator_hotkey_missing", + ), + )?; + + // Stake α from LP owner (coldkey) to chosen validator (hotkey). + T::BalanceOps::increase_stake( + &owner, + &validator_hotkey, + netuid, + alpha_total_from_pool, + )?; + + user_staked_alpha = + user_staked_alpha.saturating_add(alpha_total_from_pool); + + log::debug!( + "dissolve_all_lp: user dissolved & staked α: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, α_staked={alpha_total_from_pool:?}, target_uid={target_uid}" + ); + } else { + // No permitted validators; burn to avoid balance drift. + log::debug!( + "dissolve_all_lp: no permitted validators; α burned: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, α_total={alpha_total_from_pool:?}" + ); + } + + T::BalanceOps::decrease_provided_alpha_reserve( + netuid, + alpha_total_from_pool, + ); + } + } + Err(e) => { + log::debug!( + "dissolve_all_lp: force-close failed: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, err={e:?}" + ); + continue; + } + } + } + + log::debug!( + "dissolve_all_liquidity_providers (users-only): netuid={netuid:?}, users_refunded_total_τ={user_refunded_tao:?}, users_staked_total_α={user_staked_alpha:?}; protocol liquidity untouched" + ); + + return Ok(()); + } + + log::debug!( + "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V2-or-nonV3, leaving all liquidity/state intact" + ); + + Ok(()) + } + + /// Clear **protocol-owned** liquidity and wipe all swap state for `netuid`. + pub fn do_clear_protocol_liquidity(netuid: NetUid) -> DispatchResult { + let protocol_account = Self::protocol_account_id(); + + // 1) Force-close only protocol positions, burning proceeds. + let mut burned_tao = TaoCurrency::ZERO; + let mut burned_alpha = AlphaCurrency::ZERO; + + // Collect protocol position IDs first to avoid mutating while iterating. + let protocol_pos_ids: sp_std::vec::Vec = Positions::::iter_prefix((netuid,)) + .filter_map(|((owner, pos_id), _)| { + if owner == protocol_account { + Some(pos_id) + } else { + None + } + }) + .collect(); + + for pos_id in protocol_pos_ids { + match Self::do_remove_liquidity(netuid, &protocol_account, pos_id) { + Ok(rm) => { + let alpha_total_from_pool: AlphaCurrency = + rm.alpha.saturating_add(rm.fee_alpha); + let tao = rm.tao; + + if tao > TaoCurrency::ZERO { + burned_tao = burned_tao.saturating_add(tao); + } + if alpha_total_from_pool > AlphaCurrency::ZERO { + burned_alpha = burned_alpha.saturating_add(alpha_total_from_pool); + } + + log::debug!( + "clear_protocol_liquidity: burned protocol pos: netuid={netuid:?}, pos_id={pos_id:?}, τ={tao:?}, α_total={alpha_total_from_pool:?}" + ); + } + Err(e) => { + log::debug!( + "clear_protocol_liquidity: force-close failed: netuid={netuid:?}, pos_id={pos_id:?}, err={e:?}" + ); + continue; + } + } + } + + // 2) Clear active tick index entries, then all swap state (idempotent even if empty/non‑V3). + let active_ticks: sp_std::vec::Vec = + Ticks::::iter_prefix(netuid).map(|(ti, _)| ti).collect(); + for ti in active_ticks { + ActiveTickIndexManager::::remove(netuid, ti); + } + + let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); + let _ = Ticks::::clear_prefix(netuid, u32::MAX, None); + + FeeGlobalTao::::remove(netuid); + FeeGlobalAlpha::::remove(netuid); + CurrentLiquidity::::remove(netuid); + CurrentTick::::remove(netuid); + AlphaSqrtPrice::::remove(netuid); + SwapV3Initialized::::remove(netuid); + + let _ = TickIndexBitmapWords::::clear_prefix((netuid,), u32::MAX, None); + FeeRate::::remove(netuid); + EnabledUserLiquidity::::remove(netuid); + + log::debug!( + "clear_protocol_liquidity: netuid={netuid:?}, protocol_burned: τ={burned_tao:?}, α={burned_alpha:?}; state cleared" + ); + + Ok(()) + } } impl SwapHandler for Pallet { @@ -1304,6 +1497,15 @@ impl SwapHandler for Pallet { fn is_user_liquidity_enabled(netuid: NetUid) -> bool { EnabledUserLiquidity::::get(netuid) } + fn dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { + Self::do_dissolve_all_liquidity_providers(netuid) + } + fn toggle_user_liquidity(netuid: NetUid, enabled: bool) { + EnabledUserLiquidity::::insert(netuid, enabled) + } + fn clear_protocol_liquidity(netuid: NetUid) -> DispatchResult { + Self::do_clear_protocol_liquidity(netuid) + } } #[derive(Debug, PartialEq)] diff --git a/pallets/swap/src/pallet/mod.rs b/pallets/swap/src/pallet/mod.rs index 442c4852aa..554c7aeead 100644 --- a/pallets/swap/src/pallet/mod.rs +++ b/pallets/swap/src/pallet/mod.rs @@ -265,7 +265,7 @@ mod pallet { ReservesTooLow, /// The subnet does not exist. - SubNetworkDoesNotExist, + MechanismDoesNotExist, /// User liquidity operations are disabled for this subnet UserLiquidityDisabled, @@ -294,7 +294,7 @@ mod pallet { // Ensure that the subnet exists. ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); ensure!(rate <= T::MaxFeeRate::get(), Error::::FeeRateTooHigh); @@ -331,7 +331,7 @@ mod pallet { ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); EnabledUserLiquidity::::insert(netuid, enable); @@ -366,7 +366,7 @@ mod pallet { // Ensure that the subnet exists. ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); ensure!( @@ -434,7 +434,7 @@ mod pallet { // Ensure that the subnet exists. ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); // Remove liquidity @@ -494,7 +494,7 @@ mod pallet { // Ensure that the subnet exists. ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); ensure!( diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index 396bd656be..72c33d698f 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -141,7 +141,7 @@ mod dispatchables { NON_EXISTENT_NETUID.into(), true ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); }); } @@ -1981,3 +1981,811 @@ fn test_swap_subtoken_disabled() { ); }); } + +#[test] +fn test_liquidate_v3_removes_positions_ticks_and_state() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + + // Initialize V3 (creates protocol position, ticks, price, liquidity) + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + assert!(SwapV3Initialized::::get(netuid)); + + // Enable user LP + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + true + )); + + // Add a user position across the full range to ensure ticks/bitmap are populated. + let min_price = tick_to_price(TickIndex::MIN); + let max_price = tick_to_price(TickIndex::MAX); + let tick_low = price_to_tick(min_price); + let tick_high = price_to_tick(max_price); + let liquidity = 2_000_000_000_u64; + + let (_pos_id, _tao, _alpha) = Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + tick_low, + tick_high, + liquidity, + ) + .expect("add liquidity"); + + // Accrue some global fees so we can verify fee storage is cleared later. + let sqrt_limit_price = SqrtPrice::from_num(1_000_000.0); + assert_ok!(Pallet::::do_swap( + netuid, + OrderType::Buy, + 1_000_000, + sqrt_limit_price, + false, + false + )); + + // Sanity: protocol & user positions exist, ticks exist, liquidity > 0 + let protocol_id = Pallet::::protocol_account_id(); + let prot_positions = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!(!prot_positions.is_empty()); + + let user_positions = Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .collect::>(); + assert_eq!(user_positions.len(), 1); + + assert!(Ticks::::get(netuid, TickIndex::MIN).is_some()); + assert!(Ticks::::get(netuid, TickIndex::MAX).is_some()); + assert!(CurrentLiquidity::::get(netuid) > 0); + + let had_bitmap_words = TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_some(); + assert!(had_bitmap_words); + + // ACT: users-only liquidation then protocol clear + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + + // ASSERT: positions cleared (both user and protocol) + assert_eq!( + Pallet::::count_positions(netuid, &OK_COLDKEY_ACCOUNT_ID), + 0 + ); + let prot_positions_after = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!(prot_positions_after.is_empty()); + let user_positions_after = + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .collect::>(); + assert!(user_positions_after.is_empty()); + + // ASSERT: ticks cleared + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!(Ticks::::get(netuid, TickIndex::MIN).is_none()); + assert!(Ticks::::get(netuid, TickIndex::MAX).is_none()); + + // ASSERT: fee globals cleared + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + + // ASSERT: price/tick/liquidity flags cleared + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!SwapV3Initialized::::contains_key(netuid)); + + // ASSERT: active tick bitmap cleared + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + + // ASSERT: knobs removed on dereg + assert!(!FeeRate::::contains_key(netuid)); + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + }); +} + +/// V3 path with user liquidity disabled at teardown: +/// must still remove positions and clear state (after protocol clear). +#[test] +fn test_liquidate_v3_with_user_liquidity_disabled() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(101); + + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + assert!(SwapV3Initialized::::get(netuid)); + + // Enable temporarily to add a user position + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + true + )); + + let min_price = tick_to_price(TickIndex::MIN); + let max_price = tick_to_price(TickIndex::MAX); + let tick_low = price_to_tick(min_price); + let tick_high = price_to_tick(max_price); + let liquidity = 1_000_000_000_u64; + + let (_pos_id, _tao, _alpha) = Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + tick_low, + tick_high, + liquidity, + ) + .expect("add liquidity"); + + // Disable user LP *before* liquidation; removal must ignore this flag. + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + false + )); + + // Users-only dissolve, then clear protocol liquidity/state. + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + + // ASSERT: positions & ticks gone, state reset + assert_eq!( + Pallet::::count_positions(netuid, &OK_COLDKEY_ACCOUNT_ID), + 0 + ); + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + assert!(!SwapV3Initialized::::contains_key(netuid)); + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + + // `EnabledUserLiquidity` is removed by protocol clear stage. + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + }); +} + +/// Non‑V3 path: V3 not initialized (no positions); function must still clear any residual storages and succeed. +#[test] +fn test_liquidate_non_v3_uninitialized_ok_and_clears() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(202); + + // Sanity: V3 is not initialized + assert!(!SwapV3Initialized::::get(netuid)); + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + + // ACT + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // ASSERT: Defensive clears leave no residues and do not panic + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + + // All single-key maps should not have the key after liquidation + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!SwapV3Initialized::::contains_key(netuid)); + assert!(!FeeRate::::contains_key(netuid)); + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + }); +} + +#[test] +fn test_liquidate_idempotent() { + // V3 flavor + new_test_ext().execute_with(|| { + let netuid = NetUid::from(7); + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // Add a small user position + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + true + )); + let tick_low = price_to_tick(0.2); + let tick_high = price_to_tick(0.3); + assert_ok!(Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + tick_low, + tick_high, + 123_456_789 + )); + + // Users-only liquidations are idempotent. + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // Now clear protocol liquidity/state—also idempotent. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + + // State remains empty + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + assert!(!SwapV3Initialized::::contains_key(netuid)); + }); + + // Non‑V3 flavor + new_test_ext().execute_with(|| { + let netuid = NetUid::from(8); + + // Never initialize V3; both calls no-op and succeed. + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + assert!(!SwapV3Initialized::::contains_key(netuid)); + }); +} + +#[test] +fn liquidate_v3_refunds_user_funds_and_clears_state() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + + // Enable V3 path & initialize price/ticks (also creates a protocol position). + assert_ok!(Pallet::::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid, + true + )); + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // Use distinct cold/hot to demonstrate alpha refund/stake accounting. + let cold = OK_COLDKEY_ACCOUNT_ID; + let hot = OK_HOTKEY_ACCOUNT_ID; + + // Tight in‑range band around current tick. + let ct = CurrentTick::::get(netuid); + let tick_low = ct.saturating_sub(10); + let tick_high = ct.saturating_add(10); + let liquidity: u64 = 1_000_000; + + // Snapshot balances BEFORE. + let tao_before = ::BalanceOps::tao_balance(&cold); + let alpha_before_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_before_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_before_total = alpha_before_hot + alpha_before_owner; + + // Create the user position (storage & v3 state only; no balances moved yet). + let (_pos_id, need_tao, need_alpha) = + Pallet::::do_add_liquidity(netuid, &cold, &hot, tick_low, tick_high, liquidity) + .expect("add liquidity"); + + // Mirror extrinsic bookkeeping: withdraw funds & bump provided‑reserve counters. + let tao_taken = ::BalanceOps::decrease_balance(&cold, need_tao.into()) + .expect("decrease TAO"); + let alpha_taken = ::BalanceOps::decrease_stake( + &cold, + &hot, + netuid.into(), + need_alpha.into(), + ) + .expect("decrease ALPHA"); + ::BalanceOps::increase_provided_tao_reserve(netuid.into(), tao_taken); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); + + // Users‑only liquidation. + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // Expect balances restored to BEFORE snapshots (no swaps ran -> zero fees). + let tao_after = ::BalanceOps::tao_balance(&cold); + assert_eq!(tao_after, tao_before, "TAO principal must be refunded"); + + // ALPHA totals conserved to owner (distribution may differ). + let alpha_after_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_after_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_after_total = alpha_after_hot + alpha_after_owner; + assert_eq!( + alpha_after_total, alpha_before_total, + "ALPHA principal must be refunded/staked for the account (check totals)" + ); + + // Clear protocol liquidity and V3 state now. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + + // User position(s) are gone and all V3 state cleared. + assert_eq!(Pallet::::count_positions(netuid, &cold), 0); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!(!SwapV3Initialized::::contains_key(netuid)); + }); +} + +#[test] +fn refund_alpha_single_provider_exact() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(11); + let cold = OK_COLDKEY_ACCOUNT_ID; + let hot = OK_HOTKEY_ACCOUNT_ID; + + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // --- Create an alpha‑only position (range entirely above current tick → TAO = 0, ALPHA > 0). + let ct = CurrentTick::::get(netuid); + let tick_low = ct.next().expect("current tick should not be MAX in tests"); + let tick_high = TickIndex::MAX; + + let liquidity = 1_000_000_u64; + let (_pos_id, tao_needed, alpha_needed) = + Pallet::::do_add_liquidity(netuid, &cold, &hot, tick_low, tick_high, liquidity) + .expect("add alpha-only liquidity"); + assert_eq!(tao_needed, 0, "alpha-only position must not require TAO"); + assert!(alpha_needed > 0, "alpha-only position must require ALPHA"); + + // --- Snapshot BEFORE we withdraw funds (baseline for conservation). + let alpha_before_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_before_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_before_total = alpha_before_hot + alpha_before_owner; + + // --- Mimic extrinsic bookkeeping: withdraw α and record provided reserve. + let alpha_taken = ::BalanceOps::decrease_stake( + &cold, + &hot, + netuid.into(), + alpha_needed.into(), + ) + .expect("decrease ALPHA"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); + + // --- Act: users‑only dissolve. + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // --- Assert: total α conserved to owner (may be staked to validator). + let alpha_after_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_after_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_after_total = alpha_after_hot + alpha_after_owner; + assert_eq!( + alpha_after_total, alpha_before_total, + "ALPHA principal must be conserved to the account" + ); + + // Clear protocol liquidity and V3 state now. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + + // --- State is cleared. + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert_eq!(Pallet::::count_positions(netuid, &cold), 0); + assert!(!SwapV3Initialized::::contains_key(netuid)); + }); +} + +#[test] +fn refund_alpha_multiple_providers_proportional_to_principal() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(12); + let c1 = OK_COLDKEY_ACCOUNT_ID; + let h1 = OK_HOTKEY_ACCOUNT_ID; + let c2 = OK_COLDKEY_ACCOUNT_ID_2; + let h2 = OK_HOTKEY_ACCOUNT_ID_2; + + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // Use the same "above current tick" trick for alpha‑only positions. + let ct = CurrentTick::::get(netuid); + let tick_low = ct.next().expect("current tick should not be MAX in tests"); + let tick_high = TickIndex::MAX; + + // Provider #1 (smaller α) + let liq1 = 700_000_u64; + let (_p1, t1, a1) = + Pallet::::do_add_liquidity(netuid, &c1, &h1, tick_low, tick_high, liq1) + .expect("add alpha-only liquidity #1"); + assert_eq!(t1, 0); + assert!(a1 > 0); + + // Provider #2 (larger α) + let liq2 = 2_100_000_u64; + let (_p2, t2, a2) = + Pallet::::do_add_liquidity(netuid, &c2, &h2, tick_low, tick_high, liq2) + .expect("add alpha-only liquidity #2"); + assert_eq!(t2, 0); + assert!(a2 > 0); + + // Baselines BEFORE withdrawing + let a1_before_hot = ::BalanceOps::alpha_balance(netuid.into(), &c1, &h1); + let a1_before_owner = ::BalanceOps::alpha_balance(netuid.into(), &c1, &c1); + let a1_before = a1_before_hot + a1_before_owner; + + let a2_before_hot = ::BalanceOps::alpha_balance(netuid.into(), &c2, &h2); + let a2_before_owner = ::BalanceOps::alpha_balance(netuid.into(), &c2, &c2); + let a2_before = a2_before_hot + a2_before_owner; + + // Withdraw α and account reserves for each provider. + let a1_taken = + ::BalanceOps::decrease_stake(&c1, &h1, netuid.into(), a1.into()) + .expect("decrease α #1"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), a1_taken); + + let a2_taken = + ::BalanceOps::decrease_stake(&c2, &h2, netuid.into(), a2.into()) + .expect("decrease α #2"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), a2_taken); + + // Act + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // Each owner is restored to their exact baseline. + let a1_after_hot = ::BalanceOps::alpha_balance(netuid.into(), &c1, &h1); + let a1_after_owner = ::BalanceOps::alpha_balance(netuid.into(), &c1, &c1); + let a1_after = a1_after_hot + a1_after_owner; + assert_eq!( + a1_after, a1_before, + "owner #1 must receive their α principal back" + ); + + let a2_after_hot = ::BalanceOps::alpha_balance(netuid.into(), &c2, &h2); + let a2_after_owner = ::BalanceOps::alpha_balance(netuid.into(), &c2, &c2); + let a2_after = a2_after_hot + a2_after_owner; + assert_eq!( + a2_after, a2_before, + "owner #2 must receive their α principal back" + ); + }); +} + +#[test] +fn refund_alpha_same_cold_multiple_hotkeys_conserved_to_owner() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(13); + let cold = OK_COLDKEY_ACCOUNT_ID; + let hot1 = OK_HOTKEY_ACCOUNT_ID; + let hot2 = OK_HOTKEY_ACCOUNT_ID_2; + + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // Two alpha‑only positions on different hotkeys of the same owner. + let ct = CurrentTick::::get(netuid); + let tick_low = ct.next().expect("current tick should not be MAX in tests"); + let tick_high = TickIndex::MAX; + + let (_p1, _t1, a1) = + Pallet::::do_add_liquidity(netuid, &cold, &hot1, tick_low, tick_high, 900_000) + .expect("add alpha-only pos (hot1)"); + let (_p2, _t2, a2) = + Pallet::::do_add_liquidity(netuid, &cold, &hot2, tick_low, tick_high, 1_500_000) + .expect("add alpha-only pos (hot2)"); + assert!(a1 > 0 && a2 > 0); + + // Baseline BEFORE: sum over (cold,hot1) + (cold,hot2) + (cold,cold). + let before_hot1 = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot1); + let before_hot2 = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot2); + let before_owner = ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let before_total = before_hot1 + before_hot2 + before_owner; + + // Withdraw α from both hotkeys; track provided‑reserve. + let t1 = + ::BalanceOps::decrease_stake(&cold, &hot1, netuid.into(), a1.into()) + .expect("decr α #hot1"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), t1); + + let t2 = + ::BalanceOps::decrease_stake(&cold, &hot2, netuid.into(), a2.into()) + .expect("decr α #hot2"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), t2); + + // Act + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // The total α "owned" by the coldkey is conserved (credit may land on (cold,cold)). + let after_hot1 = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot1); + let after_hot2 = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot2); + let after_owner = ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let after_total = after_hot1 + after_hot2 + after_owner; + + assert_eq!( + after_total, before_total, + "owner’s α must be conserved across hot ledgers + (owner,owner)" + ); + }); +} + +#[test] +fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { + new_test_ext().execute_with(|| { + // --- Setup --- + let netuid = NetUid::from(42); + let cold = OK_COLDKEY_ACCOUNT_ID; + let hot = OK_HOTKEY_ACCOUNT_ID; + + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + true + )); + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + assert!(SwapV3Initialized::::get(netuid)); + + // Tight in‑range band so BOTH τ and α are required. + let ct = CurrentTick::::get(netuid); + let tick_low = ct.saturating_sub(10); + let tick_high = ct.saturating_add(10); + let liquidity: u64 = 1_250_000; + + // Add liquidity and capture required τ/α. + let (_pos_id, tao_needed, alpha_needed) = + Pallet::::do_add_liquidity(netuid, &cold, &hot, tick_low, tick_high, liquidity) + .expect("add in-range liquidity"); + assert!(tao_needed > 0, "in-range pos must require TAO"); + assert!(alpha_needed > 0, "in-range pos must require ALPHA"); + + // Determine the permitted validator with the highest trust (green path). + let trust = ::SubnetInfo::get_validator_trust(netuid.into()); + let permit = ::SubnetInfo::get_validator_permit(netuid.into()); + assert_eq!(trust.len(), permit.len(), "trust/permit must align"); + let target_uid: u16 = trust + .iter() + .zip(permit.iter()) + .enumerate() + .filter(|(_, (_t, p))| **p) + .max_by_key(|(_, (t, _))| *t) + .map(|(i, _)| i as u16) + .expect("at least one permitted validator"); + let validator_hotkey: ::AccountId = + ::SubnetInfo::hotkey_of_uid(netuid.into(), target_uid) + .expect("uid -> hotkey mapping must exist"); + + // --- Snapshot BEFORE we withdraw τ/α to fund the position --- + let tao_before = ::BalanceOps::tao_balance(&cold); + + let alpha_before_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_before_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_before_val = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &validator_hotkey); + + let alpha_before_total = if validator_hotkey == hot { + alpha_before_hot + alpha_before_owner + } else { + alpha_before_hot + alpha_before_owner + alpha_before_val + }; + + // --- Mirror extrinsic bookkeeping: withdraw τ & α; bump provided reserves --- + let tao_taken = ::BalanceOps::decrease_balance(&cold, tao_needed.into()) + .expect("decrease TAO"); + let alpha_taken = ::BalanceOps::decrease_stake( + &cold, + &hot, + netuid.into(), + alpha_needed.into(), + ) + .expect("decrease ALPHA"); + + ::BalanceOps::increase_provided_tao_reserve(netuid.into(), tao_taken); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); + + // --- Act: dissolve (GREEN PATH: permitted validators exist) --- + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // --- Assert: τ principal refunded to user --- + let tao_after = ::BalanceOps::tao_balance(&cold); + assert_eq!(tao_after, tao_before, "TAO principal must be refunded"); + + // --- α ledger assertions --- + let alpha_after_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_after_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_after_val = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &validator_hotkey); + + // Owner ledger must be unchanged in the green path. + assert_eq!( + alpha_after_owner, alpha_before_owner, + "Owner α ledger must be unchanged (staked to validator, not refunded)" + ); + + if validator_hotkey == hot { + assert_eq!( + alpha_after_hot, alpha_before_hot, + "When validator == hotkey, user's hot ledger must net back to its original balance" + ); + let alpha_after_total = alpha_after_hot + alpha_after_owner; + assert_eq!( + alpha_after_total, alpha_before_total, + "Total α for the coldkey must be conserved (validator==hotkey)" + ); + } else { + assert!( + alpha_before_hot >= alpha_after_hot, + "hot ledger should not increase" + ); + assert!( + alpha_after_val >= alpha_before_val, + "validator ledger should not decrease" + ); + + let hot_loss = alpha_before_hot - alpha_after_hot; + let val_gain = alpha_after_val - alpha_before_val; + assert_eq!( + val_gain, hot_loss, + "α that left the user's hot ledger must equal α credited to the validator ledger" + ); + + let alpha_after_total = alpha_after_hot + alpha_after_owner + alpha_after_val; + assert_eq!( + alpha_after_total, alpha_before_total, + "Total α for the coldkey must be conserved" + ); + } + + // Now clear protocol liquidity & state and assert full reset. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + + let protocol_id = Pallet::::protocol_account_id(); + assert_eq!(Pallet::::count_positions(netuid, &cold), 0); + let prot_positions_after = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!( + prot_positions_after.is_empty(), + "protocol positions must be removed" + ); + + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!(Ticks::::get(netuid, TickIndex::MIN).is_none()); + assert!(Ticks::::get(netuid, TickIndex::MAX).is_none()); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!SwapV3Initialized::::contains_key(netuid)); + + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none(), + "active tick bitmap words must be cleared" + ); + + assert!(!FeeRate::::contains_key(netuid)); + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + }); +} + +#[test] +fn test_clear_protocol_liquidity_green_path() { + new_test_ext().execute_with(|| { + // --- Arrange --- + let netuid = NetUid::from(55); + + // Ensure the "user liquidity enabled" flag exists so we can verify it's removed later. + assert_ok!(Pallet::::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid, + true + )); + + // Initialize V3 state; this should set price/tick flags and create a protocol position. + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + assert!( + SwapV3Initialized::::get(netuid), + "V3 must be initialized" + ); + + // Sanity: protocol positions exist before clearing. + let protocol_id = Pallet::::protocol_account_id(); + let prot_positions_before = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!( + !prot_positions_before.is_empty(), + "protocol positions should exist after V3 init" + ); + + // --- Act --- + // Green path: just clear protocol liquidity and wipe all V3 state. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + + // --- Assert: all protocol positions removed --- + let prot_positions_after = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!( + prot_positions_after.is_empty(), + "protocol positions must be removed by do_clear_protocol_liquidity" + ); + + // --- Assert: V3 data wiped (idempotent even if some maps were empty) --- + // Ticks / active tick bitmap + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none(), + "active tick bitmap words must be cleared" + ); + + // Fee globals + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + + // Price / tick / liquidity / flags + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!SwapV3Initialized::::contains_key(netuid)); + + // Knobs removed + assert!(!FeeRate::::contains_key(netuid)); + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + + // --- And it's idempotent --- + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + assert!( + Positions::::iter_prefix_values((netuid, protocol_id)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + assert!(!SwapV3Initialized::::contains_key(netuid)); + }); +} diff --git a/pallets/transaction-fee/src/tests/mock.rs b/pallets/transaction-fee/src/tests/mock.rs index 8aca06dc5d..9651f0ea2d 100644 --- a/pallets/transaction-fee/src/tests/mock.rs +++ b/pallets/transaction-fee/src/tests/mock.rs @@ -15,7 +15,7 @@ pub use pallet_subtensor::*; pub use sp_core::U256; use sp_core::{ConstU64, H256}; use sp_runtime::{ - BuildStorage, KeyTypeId, Perbill, + BuildStorage, KeyTypeId, Perbill, Percent, testing::TestXt, traits::{BlakeTwo256, ConstU32, IdentityLookup, One}, }; @@ -156,7 +156,8 @@ parameter_types! { pub const InitialTempo: u16 = 0; pub const SelfOwnership: u64 = 2; pub const InitialImmunityPeriod: u16 = 2; - pub const InitialMaxAllowedUids: u16 = 2; + pub const InitialMinAllowedUids: u16 = 2; + pub const InitialMaxAllowedUids: u16 = 4; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty: u16 = u16::MAX; pub const InitialBondsResetOn: bool = false; @@ -194,7 +195,6 @@ parameter_types! { pub const InitialRAORecycledForRegistration: u64 = 0; pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake pub const InitialNetworkImmunityPeriod: u64 = 7200 * 7; - pub const InitialNetworkMinAllowedUids: u16 = 128; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. pub const InitialNetworkLockReductionInterval: u64 = 2; // 2 blocks. @@ -216,6 +216,7 @@ parameter_types! { pub const InitialKeySwapOnSubnetCost: u64 = 10_000_000; pub const HotkeySwapOnSubnetInterval: u64 = 7 * 24 * 60 * 60 / 12; // 7 days pub const LeaseDividendsDistributionInterval: u32 = 100; // 100 blocks + pub const MaxImmuneUidsPercentage: Percent = Percent::from_percent(80); } impl pallet_subtensor::Config for Test { @@ -239,6 +240,7 @@ impl pallet_subtensor::Config for Test { type InitialRho = InitialRho; type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; + type InitialMinAllowedUids = InitialMinAllowedUids; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; type InitialScalingLawPower = InitialScalingLawPower; @@ -270,7 +272,6 @@ impl pallet_subtensor::Config for Test { type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; type InitialSenateRequiredStakePercentage = InitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; - type InitialNetworkMinAllowedUids = InitialNetworkMinAllowedUids; type InitialNetworkMinLockCost = InitialNetworkMinLockCost; type InitialSubnetOwnerCut = InitialSubnetOwnerCut; type InitialNetworkLockReductionInterval = InitialNetworkLockReductionInterval; @@ -293,6 +294,8 @@ impl pallet_subtensor::Config for Test { type ProxyInterface = (); type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; type GetCommitments = (); + type MaxImmuneUidsPercentage = MaxImmuneUidsPercentage; + type CommitmentsInterface = CommitmentsI; } parameter_types! { @@ -419,6 +422,11 @@ impl PrivilegeCmp for OriginPrivilegeCmp { } } +pub struct CommitmentsI; +impl pallet_subtensor::CommitmentsInterface for CommitmentsI { + fn purge_netuid(_netuid: NetUid) {} +} + parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; diff --git a/pallets/utility/Cargo.toml b/pallets/utility/Cargo.toml index 01ecd42166..08df4734c0 100644 --- a/pallets/utility/Cargo.toml +++ b/pallets/utility/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-utility" -version = "38.0.0" +version = "40.0.0" edition.workspace = true license = "Apache-2.0" description = "FRAME utilities pallet" @@ -13,20 +13,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-benchmarking = { workspace = true, optional = true } +codec = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support.workspace = true frame-system.workspace = true +scale-info = { features = ["derive"], workspace = true } sp-core.workspace = true sp-io.workspace = true sp-runtime.workspace = true -codec = { workspace = true, features = ["derive"] } -scale-info = { workspace = true, features = ["derive"] } subtensor-macros.workspace = true [dev-dependencies] -pallet-collective.workspace = true -pallet-root-testing.workspace = true pallet-balances = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } +pallet-root-testing = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -41,24 +41,22 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "pallet-collective/std", - "pallet-root-testing/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", - "sp-runtime/try-runtime", "pallet-balances/try-runtime", "pallet-collective/try-runtime", "pallet-root-testing/try-runtime", "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime", ] diff --git a/pallets/utility/src/benchmarking.rs b/pallets/utility/src/benchmarking.rs index 6980552c36..4a9e0ca306 100644 --- a/pallets/utility/src/benchmarking.rs +++ b/pallets/utility/src/benchmarking.rs @@ -19,73 +19,105 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; -use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; +use alloc::vec; +use frame_benchmarking::{benchmarking::add_to_whitelist, v2::*}; use frame_system::RawOrigin; +use crate::*; + const SEED: u32 = 0; fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -benchmarks! { - where_clause { where ::PalletsOrigin: Clone } - batch { - let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark { remark: vec![] }.into(); - calls.push(call); - } +#[benchmarks] +mod benchmark { + use super::*; + + #[benchmark] + fn batch(c: Linear<0, 1000>) { + let calls = vec![frame_system::Call::remark { remark: vec![] }.into(); c as usize]; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), calls) - verify { - assert_last_event::(Event::BatchCompleted.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), calls); + + assert_last_event::(Event::BatchCompleted.into()); } - as_derivative { + #[benchmark] + fn as_derivative() { let caller = account("caller", SEED, SEED); let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), SEED as u16, call) - - batch_all { - let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark { remark: vec![] }.into(); - calls.push(call); - } + add_to_whitelist(caller_key.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), SEED as u16, call); + } + + #[benchmark] + fn batch_all(c: Linear<0, 1000>) { + let calls = vec![frame_system::Call::remark { remark: vec![] }.into(); c as usize]; + let caller = whitelisted_caller(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), calls); + + assert_last_event::(Event::BatchCompleted.into()); + } + + #[benchmark] + fn dispatch_as() { + let caller = account("caller", SEED, SEED); + let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); + let origin = T::RuntimeOrigin::from(RawOrigin::Signed(caller)); + let pallets_origin = origin.caller().clone(); + let pallets_origin = T::PalletsOrigin::from(pallets_origin); + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(pallets_origin), call); + } + + #[benchmark] + fn force_batch(c: Linear<0, 1000>) { + let calls = vec![frame_system::Call::remark { remark: vec![] }.into(); c as usize]; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), calls) - verify { - assert_last_event::(Event::BatchCompleted.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), calls); + + assert_last_event::(Event::BatchCompleted.into()); } - dispatch_as { + #[benchmark] + fn dispatch_as_fallible() { let caller = account("caller", SEED, SEED); let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); let origin: T::RuntimeOrigin = RawOrigin::Signed(caller).into(); - let pallets_origin: ::PalletsOrigin = origin.caller().clone(); - let pallets_origin = Into::::into(pallets_origin); - }: _(RawOrigin::Root, Box::new(pallets_origin), call) - - force_batch { - let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark { remark: vec![] }.into(); - calls.push(call); - } + let pallets_origin = origin.caller().clone(); + let pallets_origin = T::PalletsOrigin::from(pallets_origin); + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(pallets_origin), call); + } + + #[benchmark] + fn if_else() { + // Failing main call. + let main_call = Box::new(frame_system::Call::set_code { code: vec![1] }.into()); + let fallback_call = Box::new(frame_system::Call::remark { remark: vec![1] }.into()); let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), calls) - verify { - assert_last_event::(Event::BatchCompleted.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), main_call, fallback_call); } - impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite! { + Pallet, + tests::new_test_ext(), + tests::Test + } } diff --git a/pallets/utility/src/lib.rs b/pallets/utility/src/lib.rs index 8ee888889e..bafb7ce9d9 100644 --- a/pallets/utility/src/lib.rs +++ b/pallets/utility/src/lib.rs @@ -61,7 +61,10 @@ extern crate alloc; use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ - dispatch::{GetDispatchInfo, PostDispatchInfo, extract_actual_weight}, + dispatch::{ + DispatchClass::{Normal, Operational}, + GetDispatchInfo, PostDispatchInfo, extract_actual_weight, + }, traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, }; use sp_core::TypeId; @@ -122,6 +125,10 @@ pub mod pallet { ItemFailed { error: DispatchError }, /// A call was dispatched. DispatchedAs { result: DispatchResult }, + /// Main call was dispatched. + IfElseMainSuccess, + /// The fallback call was dispatched. + IfElseFallbackCalled { main_error: DispatchError }, } // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm @@ -135,19 +142,16 @@ pub mod pallet { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; - let size = core::mem::size_of::<::RuntimeCall>() as u32; - - let align_up = size.saturating_add(CALL_ALIGN.saturating_sub(1)); - let call_size = align_up - .checked_div(CALL_ALIGN) - .unwrap_or(0) - .saturating_mul(CALL_ALIGN); - - let margin_factor: u32 = 3; - - let after_margin = allocator_limit.checked_div(margin_factor).unwrap_or(0); - - after_margin.checked_div(call_size).unwrap_or(0) + let call_size = (core::mem::size_of::<::RuntimeCall>() as u32) + .div_ceil(CALL_ALIGN) + .checked_mul(CALL_ALIGN) + .unwrap_or(u32::MAX); + // The margin to take into account vec doubling capacity. + let margin_factor = 3; + + allocator_limit + .checked_div(margin_factor) + .map_or(0, |x| x.checked_div(call_size).unwrap_or(0)) } } @@ -190,9 +194,9 @@ pub mod pallet { /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ - let dispatch_weight = Pallet::::weight(calls); + let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch(calls.len() as u32)); - (dispatch_weight, DispatchClass::Normal) + (dispatch_weight, dispatch_class, pays) })] pub fn batch( origin: OriginFor, @@ -302,9 +306,9 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ - let dispatch_weight = Pallet::::weight(calls); + let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); - (dispatch_weight, DispatchClass::Normal) + (dispatch_weight, dispatch_class, pays) })] pub fn batch_all( origin: OriginFor, @@ -401,9 +405,9 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ - let dispatch_weight = Pallet::::weight(calls); + let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); - (dispatch_weight, DispatchClass::Normal) + (dispatch_weight, dispatch_class, pays) })] pub fn force_batch( origin: OriginFor, @@ -470,19 +474,158 @@ pub mod pallet { let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); res.map(|_| ()).map_err(|e| e.error) } + + /// Dispatch a fallback call in the event the main call fails to execute. + /// May be called from any origin except `None`. + /// + /// This function first attempts to dispatch the `main` call. + /// If the `main` call fails, the `fallback` is attemted. + /// if the fallback is successfully dispatched, the weights of both calls + /// are accumulated and an event containing the main call error is deposited. + /// + /// In the event of a fallback failure the whole call fails + /// with the weights returned. + /// + /// - `main`: The main call to be dispatched. This is the primary action to execute. + /// - `fallback`: The fallback call to be dispatched in case the `main` call fails. + /// + /// ## Dispatch Logic + /// - If the origin is `root`, both the main and fallback calls are executed without + /// applying any origin filters. + /// - If the origin is not `root`, the origin filter is applied to both the `main` and + /// `fallback` calls. + /// + /// ## Use Case + /// - Some use cases might involve submitting a `batch` type call in either main, fallback + /// or both. + #[pallet::call_index(6)] + #[pallet::weight({ + let main = main.get_dispatch_info(); + let fallback = fallback.get_dispatch_info(); + ( + T::WeightInfo::if_else() + .saturating_add(main.call_weight) + .saturating_add(fallback.call_weight), + if main.class == Operational && fallback.class == Operational { Operational } else { Normal }, + ) + })] + pub fn if_else( + origin: OriginFor, + main: Box<::RuntimeCall>, + fallback: Box<::RuntimeCall>, + ) -> DispatchResultWithPostInfo { + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()); + } + + let is_root = ensure_root(origin.clone()).is_ok(); + + // Track the weights + let mut weight = T::WeightInfo::if_else(); + + let main_info = main.get_dispatch_info(); + + // Execute the main call first + let main_result = if is_root { + main.dispatch_bypass_filter(origin.clone()) + } else { + main.dispatch(origin.clone()) + }; + + // Add weight of the main call + weight = weight.saturating_add(extract_actual_weight(&main_result, &main_info)); + + let Err(main_error) = main_result else { + // If the main result is Ok, we skip the fallback logic entirely + Self::deposit_event(Event::IfElseMainSuccess); + return Ok(Some(weight).into()); + }; + + // If the main call failed, execute the fallback call + let fallback_info = fallback.get_dispatch_info(); + + let fallback_result = if is_root { + fallback.dispatch_bypass_filter(origin.clone()) + } else { + fallback.dispatch(origin) + }; + + // Add weight of the fallback call + weight = weight.saturating_add(extract_actual_weight(&fallback_result, &fallback_info)); + + let Err(fallback_error) = fallback_result else { + // Fallback succeeded. + Self::deposit_event(Event::IfElseFallbackCalled { + main_error: main_error.error, + }); + return Ok(Some(weight).into()); + }; + + // Both calls have failed, return fallback error + Err(sp_runtime::DispatchErrorWithPostInfo { + error: fallback_error.error, + post_info: Some(weight).into(), + }) + } + + /// Dispatches a function call with a provided origin. + /// + /// Almost the same as [`Pallet::dispatch_as`] but forwards any error of the inner call. + /// + /// The dispatch origin for this call must be _Root_. + #[pallet::call_index(7)] + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::dispatch_as_fallible() + .saturating_add(dispatch_info.call_weight), + dispatch_info.class, + ) + })] + pub fn dispatch_as_fallible( + origin: OriginFor, + as_origin: Box, + call: Box<::RuntimeCall>, + ) -> DispatchResult { + ensure_root(origin)?; + + call.dispatch_bypass_filter((*as_origin).into()) + .map_err(|e| e.error)?; + + Self::deposit_event(Event::DispatchedAs { result: Ok(()) }); + + Ok(()) + } } impl Pallet { /// Get the accumulated `weight` and the dispatch class for the given `calls`. - fn weight(calls: &[::RuntimeCall]) -> Weight { + fn weight_and_dispatch_class( + calls: &[::RuntimeCall], + ) -> (Weight, DispatchClass, Pays) { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()); - dispatch_infos.fold(Weight::zero(), |total_weight, di| { - if di.pays_fee == Pays::Yes { - total_weight.saturating_add(di.call_weight) - } else { - total_weight - } - }) + let pays = if dispatch_infos.clone().any(|di| di.pays_fee == Pays::No) { + Pays::No + } else { + Pays::Yes + }; + let (dispatch_weight, dispatch_class) = dispatch_infos.fold( + (Weight::zero(), DispatchClass::Operational), + |(total_weight, dispatch_class): (Weight, DispatchClass), di| { + ( + total_weight.saturating_add(di.call_weight), + // If not all are `Operational`, we want to use `DispatchClass::Normal`. + if di.class == DispatchClass::Normal { + di.class + } else { + dispatch_class + }, + ) + }, + ); + + (dispatch_weight, dispatch_class, pays) } } } diff --git a/pallets/utility/src/tests.rs b/pallets/utility/src/tests.rs index a883f1b690..17b5cd96ca 100644 --- a/pallets/utility/src/tests.rs +++ b/pallets/utility/src/tests.rs @@ -18,7 +18,7 @@ // Tests for Utility Pallet #![cfg(test)] -#![allow(clippy::arithmetic_side_effects)] +#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] use super::*; @@ -257,20 +257,20 @@ use pallet_timestamp::Call as TimestampCall; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default() .build_storage() - .expect("Failed to build storage for test"); + .unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], - dev_accounts: None, + ..Default::default() } .assimilate_storage(&mut t) - .expect("Failed to build storage for test"); + .unwrap(); pallet_collective::GenesisConfig:: { members: vec![1, 2, 3], phantom: Default::default(), } .assimilate_storage(&mut t) - .expect("Failed to build storage for test"); + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); @@ -289,6 +289,20 @@ fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> R }) } +fn utility_events() -> Vec { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let RuntimeEvent::Utility(inner) = e { + Some(inner) + } else { + None + } + }) + .collect() +} + #[test] fn as_derivative_works() { new_test_ext().execute_with(|| { @@ -708,7 +722,7 @@ fn batch_all_handles_weight_refund() { assert_eq!( extract_actual_weight(&result, &info), // Real weight is 2 calls at end_weight - ::WeightInfo::batch_all(2).saturating_add(end_weight.saturating_mul(2)), + ::WeightInfo::batch_all(2) + end_weight * 2, ); }); } @@ -1006,3 +1020,177 @@ fn with_weight_works() { ); }) } + +#[test] +fn dispatch_as_works() { + new_test_ext().execute_with(|| { + Balances::force_set_balance(RuntimeOrigin::root(), 666, 100).unwrap(); + assert_eq!(Balances::free_balance(666), 100); + assert_eq!(Balances::free_balance(777), 0); + assert_ok!(Utility::dispatch_as( + RuntimeOrigin::root(), + Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(666))), + Box::new(call_transfer(777, 100)) + )); + assert_eq!(Balances::free_balance(666), 0); + assert_eq!(Balances::free_balance(777), 100); + + System::reset_events(); + assert_ok!(Utility::dispatch_as( + RuntimeOrigin::root(), + Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(777))), + Box::new(RuntimeCall::Timestamp(TimestampCall::set { now: 0 })) + )); + assert_eq!( + utility_events(), + vec![Event::DispatchedAs { + result: Err(DispatchError::BadOrigin) + }] + ); + }) +} + +#[test] +fn if_else_with_root_works() { + new_test_ext().execute_with(|| { + let k = b"a".to_vec(); + let call = RuntimeCall::System(frame_system::Call::set_storage { + items: vec![(k.clone(), k.clone())], + }); + assert!(!TestBaseCallFilter::contains(&call)); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::if_else( + RuntimeOrigin::root(), + RuntimeCall::Balances(BalancesCall::force_transfer { + source: 1, + dest: 2, + value: 11 + }) + .into(), + call.into(), + )); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_eq!(storage::unhashed::get_raw(&k), Some(k)); + System::assert_last_event( + utility::Event::IfElseFallbackCalled { + main_error: TokenError::FundsUnavailable.into(), + } + .into(), + ); + }); +} + +#[test] +fn if_else_with_signed_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::if_else( + RuntimeOrigin::signed(1), + call_transfer(2, 11).into(), + call_transfer(2, 5).into() + )); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::free_balance(2), 15); + + System::assert_last_event( + utility::Event::IfElseFallbackCalled { + main_error: TokenError::FundsUnavailable.into(), + } + .into(), + ); + }); +} + +#[test] +fn if_else_successful_main_call() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::if_else( + RuntimeOrigin::signed(1), + call_transfer(2, 9).into(), + call_transfer(2, 1).into() + )); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::free_balance(2), 19); + + System::assert_last_event(utility::Event::IfElseMainSuccess.into()); + }) +} + +#[test] +fn dispatch_as_fallible_works() { + new_test_ext().execute_with(|| { + Balances::force_set_balance(RuntimeOrigin::root(), 666, 100).unwrap(); + assert_eq!(Balances::free_balance(666), 100); + assert_eq!(Balances::free_balance(777), 0); + assert_ok!(Utility::dispatch_as_fallible( + RuntimeOrigin::root(), + Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(666))), + Box::new(call_transfer(777, 100)) + )); + assert_eq!(Balances::free_balance(666), 0); + assert_eq!(Balances::free_balance(777), 100); + + assert_noop!( + Utility::dispatch_as_fallible( + RuntimeOrigin::root(), + Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(777))), + Box::new(RuntimeCall::Timestamp(TimestampCall::set { now: 0 })) + ), + DispatchError::BadOrigin, + ); + }) +} + +#[test] +fn if_else_failing_fallback_call() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_err_ignore_postinfo!( + Utility::if_else( + RuntimeOrigin::signed(1), + call_transfer(2, 11).into(), + call_transfer(2, 11).into() + ), + TokenError::FundsUnavailable + ); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + }) +} + +#[test] +fn if_else_with_nested_if_else_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + + let main_call = call_transfer(2, 11).into(); + let fallback_call = call_transfer(2, 5).into(); + + let nested_if_else_call = RuntimeCall::Utility(UtilityCall::if_else { + main: main_call, + fallback: fallback_call, + }) + .into(); + + // Nested `if_else` call. + assert_ok!(Utility::if_else( + RuntimeOrigin::signed(1), + nested_if_else_call, + call_transfer(2, 7).into() + )); + + // inner if_else fallback is executed. + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::free_balance(2), 15); + + // Ensure the correct event was triggered for the main call(nested if_else). + System::assert_last_event(utility::Event::IfElseMainSuccess.into()); + }); +} diff --git a/pallets/utility/src/weights.rs b/pallets/utility/src/weights.rs index 502f85a3f1..eb1f036087 100644 --- a/pallets/utility/src/weights.rs +++ b/pallets/utility/src/weights.rs @@ -18,33 +18,37 @@ //! Autogenerated weights for `pallet_utility` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `4563561839a5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=dev +// --extrinsic=* +// --runtime=target/production/wbuild/kitchensink-runtime/kitchensink_runtime.wasm +// --pallet=pallet_utility +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/utility/src/weights.rs +// --wasm-execution=compiled // --steps=50 // --repeat=20 -// --pallet=pallet_utility +// --heap-pages=4096 +// --template=substrate/.maintain/frame-weight-template.hbs // --no-storage-info -// --no-median-slopes // --no-min-squares -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./substrate/frame/utility/src/weights.rs -// --header=./substrate/HEADER-APACHE2 -// --template=./substrate/.maintain/frame-weight-template.hbs +// --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] #![allow(missing_docs)] +#![allow(dead_code)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; @@ -56,6 +60,8 @@ pub trait WeightInfo { fn batch_all(c: u32, ) -> Weight; fn dispatch_as() -> Weight; fn force_batch(c: u32, ) -> Weight; + fn dispatch_as_fallible() -> Weight; + fn if_else() -> Weight; } /// Weights for `pallet_utility` using the Substrate node and recommended hardware. @@ -68,12 +74,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_312_000 picoseconds. - Weight::from_parts(2_694_370, 3997) - // Standard Error: 5_055 - .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) + // Minimum execution time: 3_972_000 picoseconds. + Weight::from_parts(4_034_000, 3997) + // Standard Error: 2_323 + .saturating_add(Weight::from_parts(4_914_560, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -82,10 +88,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_derivative() -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_639_000, 3997) + // Minimum execution time: 5_866_000 picoseconds. + Weight::from_parts(6_097_000, 3997) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -95,20 +101,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_120_000 picoseconds. - Weight::from_parts(12_948_874, 3997) - // Standard Error: 4_643 - .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) + // Minimum execution time: 3_983_000 picoseconds. + Weight::from_parts(4_075_000, 3997) + // Standard Error: 2_176 + .saturating_add(Weight::from_parts(5_127_263, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_126_000 picoseconds. - Weight::from_parts(7_452_000, 0) + // Minimum execution time: 5_530_000 picoseconds. + Weight::from_parts(5_720_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -117,14 +123,33 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_254_000 picoseconds. - Weight::from_parts(4_879_712, 3997) - // Standard Error: 4_988 - .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) + // Minimum execution time: 3_880_000 picoseconds. + Weight::from_parts(4_035_000, 3997) + // Standard Error: 1_682 + .saturating_add(Weight::from_parts(4_902_729, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } + fn dispatch_as_fallible() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_417_000 picoseconds. + Weight::from_parts(5_705_000, 0) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:2 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + fn if_else() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `7004` + // Minimum execution time: 11_273_000 picoseconds. + Weight::from_parts(11_571_000, 7004) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } } // For backwards compatibility and tests. @@ -136,12 +161,12 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_312_000 picoseconds. - Weight::from_parts(2_694_370, 3997) - // Standard Error: 5_055 - .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) + // Minimum execution time: 3_972_000 picoseconds. + Weight::from_parts(4_034_000, 3997) + // Standard Error: 2_323 + .saturating_add(Weight::from_parts(4_914_560, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -150,10 +175,10 @@ impl WeightInfo for () { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_derivative() -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_639_000, 3997) + // Minimum execution time: 5_866_000 picoseconds. + Weight::from_parts(6_097_000, 3997) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -163,20 +188,20 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_120_000 picoseconds. - Weight::from_parts(12_948_874, 3997) - // Standard Error: 4_643 - .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) + // Minimum execution time: 3_983_000 picoseconds. + Weight::from_parts(4_075_000, 3997) + // Standard Error: 2_176 + .saturating_add(Weight::from_parts(5_127_263, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_126_000 picoseconds. - Weight::from_parts(7_452_000, 0) + // Minimum execution time: 5_530_000 picoseconds. + Weight::from_parts(5_720_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -185,12 +210,31 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_254_000 picoseconds. - Weight::from_parts(4_879_712, 3997) - // Standard Error: 4_988 - .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) + // Minimum execution time: 3_880_000 picoseconds. + Weight::from_parts(4_035_000, 3997) + // Standard Error: 1_682 + .saturating_add(Weight::from_parts(4_902_729, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } + fn dispatch_as_fallible() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_417_000 picoseconds. + Weight::from_parts(5_705_000, 0) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:2 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + fn if_else() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `7004` + // Minimum execution time: 11_273_000 picoseconds. + Weight::from_parts(11_571_000, 7004) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index aee1e04895..df11059c2a 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -50,7 +50,7 @@ use sp_core::{ use sp_runtime::Cow; use sp_runtime::generic::Era; use sp_runtime::{ - AccountId32, ApplyExtrinsicResult, ConsensusEngineId, generic, impl_opaque_keys, + AccountId32, ApplyExtrinsicResult, ConsensusEngineId, Percent, generic, impl_opaque_keys, traits::{ AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, UniqueSaturatedInto, Verify, @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 315, + spec_version: 320, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -490,7 +490,9 @@ impl CanVote for CanVoteToTriumvirate { } } -use pallet_subtensor::{CollectiveInterface, MemberManagement, ProxyInterface}; +use pallet_subtensor::{ + CollectiveInterface, CommitmentsInterface, MemberManagement, ProxyInterface, +}; pub struct ManageSenateMembers; impl MemberManagement for ManageSenateMembers { fn add_member(account: &AccountId) -> DispatchResultWithPostInfo { @@ -884,6 +886,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = System; } pub struct Proxier; @@ -910,6 +913,13 @@ impl ProxyInterface for Proxier { } } +pub struct CommitmentsI; +impl CommitmentsInterface for CommitmentsI { + fn purge_netuid(netuid: NetUid) { + pallet_commitments::Pallet::::purge_netuid(netuid); + } +} + parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; @@ -1056,7 +1066,12 @@ pub struct ResetBondsOnCommit; impl OnMetadataCommitment for ResetBondsOnCommit { #[cfg(not(feature = "runtime-benchmarks"))] fn on_metadata_commitment(netuid: NetUid, address: &AccountId) { - let _ = SubtensorModule::do_reset_bonds(netuid, address); + // Reset bonds for each mechanism of this subnet + let mechanism_count = SubtensorModule::get_current_mechanism_count(netuid); + for mecid in 0..u8::from(mechanism_count) { + let netuid_index = SubtensorModule::get_mechanism_storage_index(netuid, mecid.into()); + let _ = SubtensorModule::do_reset_bonds(netuid_index, address); + } } #[cfg(feature = "runtime-benchmarks")] @@ -1146,8 +1161,8 @@ parameter_types! { pub const SubtensorInitialTxChildKeyTakeRateLimit: u64 = INITIAL_CHILDKEY_TAKE_RATELIMIT; pub const SubtensorInitialRAORecycledForRegistration: u64 = 0; // 0 rao pub const SubtensorInitialSenateRequiredStakePercentage: u64 = 1; // 1 percent of total stake - pub const SubtensorInitialNetworkImmunity: u64 = 7 * 7200; - pub const SubtensorInitialMinAllowedUids: u16 = 128; + pub const SubtensorInitialNetworkImmunity: u64 = 1_296_000; + pub const SubtensorInitialMinAllowedUids: u16 = 64; pub const SubtensorInitialMinLockCost: u64 = 1_000_000_000_000; // 1000 TAO pub const SubtensorInitialSubnetOwnerCut: u16 = 11_796; // 18 percent // pub const SubtensorInitialSubnetLimit: u16 = 12; // (DEPRECATED) @@ -1169,6 +1184,7 @@ parameter_types! { pub const SubtensorInitialKeySwapOnSubnetCost: u64 = 1_000_000; // 0.001 TAO pub const HotkeySwapOnSubnetInterval : BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days pub const LeaseDividendsDistributionInterval: BlockNumber = 100; // 100 blocks + pub const MaxImmuneUidsPercentage: Percent = Percent::from_percent(80); } impl pallet_subtensor::Config for Runtime { @@ -1183,6 +1199,7 @@ impl pallet_subtensor::Config for Runtime { type InitialRho = SubtensorInitialRho; type InitialAlphaSigmoidSteepness = SubtensorInitialAlphaSigmoidSteepness; type InitialKappa = SubtensorInitialKappa; + type InitialMinAllowedUids = SubtensorInitialMinAllowedUids; type InitialMaxAllowedUids = SubtensorInitialMaxAllowedUids; type InitialBondsMovingAverage = SubtensorInitialBondsMovingAverage; type InitialBondsPenalty = SubtensorInitialBondsPenalty; @@ -1223,7 +1240,6 @@ impl pallet_subtensor::Config for Runtime { type InitialRAORecycledForRegistration = SubtensorInitialRAORecycledForRegistration; type InitialSenateRequiredStakePercentage = SubtensorInitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = SubtensorInitialNetworkImmunity; - type InitialNetworkMinAllowedUids = SubtensorInitialMinAllowedUids; type InitialNetworkMinLockCost = SubtensorInitialMinLockCost; type InitialNetworkLockReductionInterval = SubtensorInitialNetworkLockReductionInterval; type InitialSubnetOwnerCut = SubtensorInitialSubnetOwnerCut; @@ -1246,6 +1262,8 @@ impl pallet_subtensor::Config for Runtime { type ProxyInterface = Proxier; type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; type GetCommitments = GetCommitmentsStruct; + type MaxImmuneUidsPercentage = MaxImmuneUidsPercentage; + type CommitmentsInterface = CommitmentsI; } parameter_types! { @@ -2326,6 +2344,10 @@ impl_runtime_apis! { SubtensorModule::get_metagraph(netuid) } + fn get_mechagraph(netuid: NetUid, mecid: MechId) -> Option> { + SubtensorModule::get_mechagraph(netuid, mecid) + } + fn get_subnet_state(netuid: NetUid) -> Option> { SubtensorModule::get_subnet_state(netuid) } @@ -2334,6 +2356,10 @@ impl_runtime_apis! { SubtensorModule::get_all_metagraphs() } + fn get_all_mechagraphs() -> Vec>> { + SubtensorModule::get_all_mechagraphs() + } + fn get_all_dynamic_info() -> Vec>> { SubtensorModule::get_all_dynamic_info() } @@ -2341,7 +2367,13 @@ impl_runtime_apis! { fn get_selective_metagraph(netuid: NetUid, metagraph_indexes: Vec) -> Option> { SubtensorModule::get_selective_metagraph(netuid, metagraph_indexes) } + fn get_subnet_to_prune() -> Option { + pallet_subtensor::Pallet::::get_network_to_prune() + } + fn get_selective_mechagraph(netuid: NetUid, mecid: MechId, metagraph_indexes: Vec) -> Option> { + SubtensorModule::get_selective_mechagraph(netuid, mecid, metagraph_indexes) + } } impl subtensor_custom_rpc_runtime_api::StakeInfoRuntimeApi for Runtime { diff --git a/scripts/benchmark_action.sh b/scripts/benchmark_action.sh index 105cbe9bf5..a475211163 100755 --- a/scripts/benchmark_action.sh +++ b/scripts/benchmark_action.sh @@ -11,7 +11,7 @@ declare -A DISPATCH_PATHS=( [swap]="../pallets/swap/src/pallet/mod.rs" ) -THRESHOLD=20 +THRESHOLD=40 MAX_RETRIES=3 SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" diff --git a/scripts/install_build_env.sh b/scripts/install_build_env.sh new file mode 100644 index 0000000000..c9fd8bebca --- /dev/null +++ b/scripts/install_build_env.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +# If binaries are compiled in CI then skip this script +if [ -n "$BUILT_IN_CI" ]; then + echo "[*] BUILT_IN_CI is set to '$BUILT_IN_CI'. Skipping script..." + exit 0 +fi + +echo "" +echo "######################################################################" +echo "### Install build environment dependencies ###" +echo "######################################################################" +echo "### WARNING: DO NOT MODIFY THIS SCRIPT UNLESS YOU KNOW WHY! ###" +echo "### ###" +echo "### This script is used by: ###" +echo "### • .github/workflows/docker-localnet.yml ###" +echo "### • Dockerfile-localnet ###" +echo "### ###" +echo "### Any changes may break CI builds or local Docker environments. ###" +echo "######################################################################" +echo "" + +set -e + +echo "[*] Detecting platform..." +UNAME_OUT="$(uname -s)" +case "${UNAME_OUT}" in + Linux*) OS=Linux;; + Darwin*) OS=Mac;; + *) OS="UNKNOWN:${UNAME_OUT}" +esac + +echo "[+] Platform: $OS" + +# Determine if we have root privileges +if [ "$(id -u)" -eq 0 ]; then + SUDO="" +else + if command -v sudo &>/dev/null; then + SUDO="sudo" + else + SUDO="" + fi +fi + +# System Dependencies +if [ "$OS" = "Linux" ]; then + echo "[+] Installing dependencies on Linux..." + + if [ -z "$SUDO" ] && [ "$(id -u)" -ne 0 ]; then + echo "[!] Warning: No sudo and not root. Skipping apt install." + else + $SUDO sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list || true + $SUDO apt-get update + $SUDO apt-get install -y ca-certificates + $SUDO apt-get install -y --no-install-recommends \ + curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev \ + gcc-aarch64-linux-gnu gcc-x86-64-linux-gnu + fi + +elif [ "$OS" = "Mac" ]; then + echo "[+] Installing dependencies on macOS..." + + if ! command -v brew &> /dev/null; then + echo "[!] Homebrew not found. Installing..." + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + eval "$(/opt/homebrew/bin/brew shellenv)" + fi + + brew install protobuf openssl llvm pkg-config + + LDFLAGS="-L$(brew --prefix openssl)/lib" + export LDFLAGS + + CPPFLAGS="-I$(brew --prefix openssl)/include" + export CPPFLAGS + +else + echo "[!] Unsupported OS: $OS" + exit 1 +fi + +# Rust Toolchain + +echo "[+] Installing Rust toolchain..." +curl https://sh.rustup.rs -sSf | sh -s -- -y + +# Activate rust in shell +source "$HOME/.cargo/env" || export PATH="$HOME/.cargo/bin:$PATH" + +rustup toolchain install 1.88.0 --profile minimal +rustup default 1.88.0 + +# Add Rust Targets + +echo "Adding Rust targets for wasm + cross-arch binaries..." +rustup target add wasm32v1-none +rustup target add aarch64-unknown-linux-gnu +rustup target add x86_64-unknown-linux-gnu + +echo "[✓] Environment setup complete." \ No newline at end of file diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh new file mode 100755 index 0000000000..e5b11ebf6d --- /dev/null +++ b/scripts/install_prebuilt_binaries.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# We move the prebuild binaries required by the architecture if they were created in CI, otherwise exit with no error +if [ -z "$BUILT_IN_CI" ]; then + echo "[*] BUILT_IN_CI is not set. Skipping script..." + exit 0 +fi + +echo "" +echo "######################################################################" +echo "### Pre-built binary substitution ###" +echo "######################################################################" +echo "### WARNING: DO NOT MODIFY THIS SCRIPT UNLESS YOU KNOW WHY! ###" +echo "### ###" +echo "### This script is used by: ###" +echo "### • .github/workflows/docker-localnet.yml ###" +echo "### • Dockerfile-localnet ###" +echo "### ###" +echo "### Any changes may break CI builds or local Docker environments. ###" +echo "######################################################################" +echo "" + +set -e + +echo "[*] BUILT_IN_CI is set → using prebuilt binaries." +echo "[*] Mapping TARGETARCH=${TARGETARCH} to Rust triple..." + +# BUILD_TRIPLE are located on `.github/workflows/docker-localnet.yml` in a job `build:matrix:platform:triple` +# If these are updated in the workflow, then we need to update here in `elif [ -d "/build/ci_target" ]` section. +# We substitute the related binaries for the required Docker image layer architecture. +if [ "$TARGETARCH" = "amd64" ]; then + BUILD_TRIPLE="x86_64-unknown-linux-gnu" +elif [ "$TARGETARCH" = "arm64" ]; then + BUILD_TRIPLE="aarch64-unknown-linux-gnu" +else + echo "[!] Unknown TARGETARCH: ${TARGETARCH}" >&2 + exit 1 +fi + +echo "[*] Using BUILD_TRIPLE=$BUILD_TRIPLE" +echo "[*] Copying binaries to expected /build/target layout..." + +for RUNTIME in fast-runtime non-fast-runtime; do +# keep it for future debug +# echo "::group::/build directory tree" +# find /build -type f | sort +# echo "::endgroup::" + + mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime + cp -v /build/build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor \ + /build/target/${RUNTIME}/release/node-subtensor + cp -v /build/build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ + /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm +done \ No newline at end of file diff --git a/scripts/localnet.sh b/scripts/localnet.sh index 703dd8b885..1b96baa19b 100755 --- a/scripts/localnet.sh +++ b/scripts/localnet.sh @@ -1,5 +1,11 @@ #!/bin/bash +# If binaries are compiled in CI then skip this script +if [ -n "$BUILT_IN_CI" ]; then + echo "[*] BUILT_IN_CI is set to '$BUILT_IN_CI'. Skipping script..." + exit 0 +fi + # Check if `--no-purge` passed as a parameter NO_PURGE=0 @@ -54,9 +60,25 @@ if [ ! -d "$SPEC_PATH" ]; then mkdir -p "$SPEC_PATH" fi -if [[ $BUILD_BINARY == "1" ]]; then +if [[ "$BUILD_BINARY" == "1" ]]; then echo "*** Building substrate binary..." - CARGO_TARGET_DIR="$BUILD_DIR" cargo build --workspace --profile=release --features "$FEATURES" --manifest-path "$BASE_DIR/Cargo.toml" + + BUILD_CMD=( + cargo build + --workspace + --profile=release + --features "$FEATURES" + --manifest-path "$BASE_DIR/Cargo.toml" + ) + + if [[ -n "$CARGO_BUILD_TARGET" ]]; then + echo "[+] Cross-compiling for target: $CARGO_BUILD_TARGET" + BUILD_CMD+=(--target "$CARGO_BUILD_TARGET") + else + echo "[+] Building for host architecture" + fi + + CARGO_TARGET_DIR="$BUILD_DIR" "${BUILD_CMD[@]}" echo "*** Binary compiled" fi diff --git a/scripts/localnet_patch.sh b/scripts/localnet_patch.sh index f5a84e8348..e3bee8c5b8 100755 --- a/scripts/localnet_patch.sh +++ b/scripts/localnet_patch.sh @@ -19,7 +19,7 @@ if ! grep -q 'pub fn DefaultPendingCooldown() -> u64 {' "$DefaultPend exit 1 fi -if ! grep -q 'TransactionType::SetChildren => 150, // 30 minutes' "$SetChildren"; then +if ! grep -q 'Self::SetChildren => 150, // 30 minutes' "$SetChildren"; then echo "Error: Target string not found in $SetChildren" exit 1 fi @@ -27,6 +27,6 @@ fi # replace perl -0777 -i -pe 's|pub const DurationOfStartCall: u64 = prod_or_fast!\(7 \* 24 \* 60 \* 60 / 12, 10\);|pub const DurationOfStartCall: u64 = prod_or_fast!(5, 10);|' "$DurationOfStartCall" perl -0777 -i -pe 's|pub fn DefaultPendingCooldown\(\) -> u64 \{\s*prod_or_fast!\(7_200, 15\)\s*\}|pub fn DefaultPendingCooldown() -> u64 {\n prod_or_fast!(15, 15)\n }|g' "$DefaultPendingCooldown" -perl -0777 -i -pe 's|TransactionType::SetChildren => 150, // 30 minutes|TransactionType::SetChildren => 15, // 3 min|' "$SetChildren" +perl -0777 -i -pe 's|Self::SetChildren => 150, // 30 minutes|Self::SetChildren => 15, // 3 min|' "$SetChildren" echo "Patch applied successfully."