diff --git a/Cargo.lock b/Cargo.lock index 956dfcda95..7d65c7aac9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7619,6 +7619,7 @@ dependencies = [ "sp-keyring", "sp-keystore", "sp-runtime", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6)", "subtensor-macros", "tle", "w3f-bls", diff --git a/Dockerfile-localnet b/Dockerfile-localnet index fcdd830c15..70232b0553 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -52,6 +52,10 @@ COPY --from=builder /build/snapshot.json /snapshot.json COPY --from=builder /build/scripts/localnet.sh scripts/localnet.sh RUN chmod +x /scripts/localnet.sh +# Copy WebAssembly artifacts +COPY --from=builder /build/target/fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm +COPY --from=builder /build/target/non-fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/non-fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm + ## Ubdate certificates RUN apt-get update && apt-get install -y ca-certificates diff --git a/docs/transaction-priority.md b/docs/transaction-priority.md new file mode 100644 index 0000000000..36ebf79e64 --- /dev/null +++ b/docs/transaction-priority.md @@ -0,0 +1,36 @@ +## Transaction Priority + +### Overview +In Subtensor, transaction priority is determined by custom transaction extensions, which alter or override the default Substrate SDK behavior. Extensions affecting transaction priority are: + +- **`ChargeTransactionPaymentWrapper`** (wraps `ChargeTransactionPayment`) +- **`DrandPriority`** + +Substrate SDK combines priorities from all transaction extensions using addition. + +--- + +### 1. `ChargeTransactionPaymentWrapper` +In the Substrate SDK, `ChargeTransactionPayment` normally calculates transaction priority based on: +- **Tip** — an extra fee paid by the sender. +- **Weight** — computational complexity of the transaction. +- **Dispatch class** — category of the transaction (`Normal`, `Operational`, `Mandatory`). + +However, in Subtensor, `ChargeTransactionPaymentWrapper` **overrides** this logic. +It replaces the dynamic calculation with a **flat priority scale** based only on the dispatch class. + +#### Current priority values: +| Dispatch Class | Priority Value | Notes | +|---------------------|-------------------|--------------------------------------------------------------| +| `Normal` | `1` | Standard transactions | +| `Mandatory` | `1` | Rarely used, same as `Normal` | +| `Operational` | `10_000_000_000` | Reserved for critical system extrinsics (e.g.: `sudo` calls) | + + +--- + +### 2. `DrandPriority` + +Special pallet_drand priority: 10_000 for `write_pulse` extrinsic. + +--- \ No newline at end of file diff --git a/evm-tests/src/substrate.ts b/evm-tests/src/substrate.ts index 2a627a2c0c..6f3acc866c 100644 --- a/evm-tests/src/substrate.ts +++ b/evm-tests/src/substrate.ts @@ -168,30 +168,31 @@ export async function waitForTransactionCompletion(api: TypedApi, // }) } + export async function getTransactionWatchPromise(tx: Transaction<{}, string, string, void>, signer: PolkadotSigner,) { return new Promise((resolve, reject) => { // store the txHash, then use it in timeout. easier to know which tx is not finalized in time let txHash = "" const subscription = tx.signSubmitAndWatch(signer).subscribe({ next(value) { - console.log("Event:", value); txHash = value.txHash // TODO investigate why finalized not for each extrinsic if (value.type === "finalized") { console.log("Transaction is finalized in block:", value.txHash); subscription.unsubscribe(); + clearTimeout(timeoutId); if (!value.ok) { console.log("Transaction threw an error:", value.dispatchError) } // Resolve the promise when the transaction is finalized resolve(); - } }, error(err) { console.error("Transaction failed:", err); subscription.unsubscribe(); + clearTimeout(timeoutId); // Reject the promise in case of an error reject(err); @@ -201,7 +202,7 @@ export async function getTransactionWatchPromise(tx: Transaction<{}, string, str } }); - setTimeout(() => { + const timeoutId = setTimeout(() => { subscription.unsubscribe(); console.log('unsubscribed because of timeout for tx {}', txHash); reject() diff --git a/evm-tests/src/subtensor.ts b/evm-tests/src/subtensor.ts index a2e5d49083..b1ff818038 100644 --- a/evm-tests/src/subtensor.ts +++ b/evm-tests/src/subtensor.ts @@ -32,7 +32,7 @@ export async function addNewSubnetwork(api: TypedApi, hotkey: Key // force set balance for a ss58 address export async function forceSetBalanceToSs58Address(api: TypedApi, ss58Address: string) { const alice = getAliceSigner() - const balance = tao(1e8) + const balance = tao(1e10) const internalCall = api.tx.Balances.force_set_balance({ who: MultiAddress.Id(ss58Address), new_free: balance }) const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) diff --git a/evm-tests/test/uid.precompile.lookup.test.ts b/evm-tests/test/evm-uid.precompile.lookup.test.ts similarity index 100% rename from evm-tests/test/uid.precompile.lookup.test.ts rename to evm-tests/test/evm-uid.precompile.lookup.test.ts diff --git a/evm-tests/test/neuron.precompile.reveal-weights.test.ts b/evm-tests/test/neuron.precompile.reveal-weights.test.ts index 8045ac18f1..4ac63468db 100644 --- a/evm-tests/test/neuron.precompile.reveal-weights.test.ts +++ b/evm-tests/test/neuron.precompile.reveal-weights.test.ts @@ -11,9 +11,9 @@ import { generateRandomEthersWallet } from "../src/utils" import { convertH160ToPublicKey } from "../src/address-utils" import { blake2AsU8a } from "@polkadot/util-crypto" import { - forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, setCommitRevealWeightsEnabled, setWeightsSetRateLimit, burnedRegister, + forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, setWeightsSetRateLimit, burnedRegister, setTempo, setCommitRevealWeightsInterval, - startCall + startCall, } from "../src/subtensor" // hardcode some values for reveal hash @@ -52,6 +52,7 @@ describe("Test neuron precompile reveal weights", () => { const coldkey = getRandomSubstrateKeypair(); let api: TypedApi + let commitEpoch: number; // sudo account alice as signer let alice: PolkadotSigner; @@ -65,13 +66,11 @@ describe("Test neuron precompile reveal weights", () => { await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) await forceSetBalanceToEthAddress(api, wallet.address) let netuid = await addNewSubnetwork(api, hotkey, coldkey) + // await disableCommitRevealWeights(api, netuid) await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) - // enable commit reveal feature - await setCommitRevealWeightsEnabled(api, netuid, true) - // set it as 0, we can set the weight anytime await setWeightsSetRateLimit(api, netuid, BigInt(0)) const ss58Address = convertH160ToSS58(wallet.address) @@ -90,8 +89,15 @@ describe("Test neuron precompile reveal weights", () => { const subnetId = totalNetworks - 1 const commitHash = getCommitHash(subnetId, wallet.address) const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet); - const tx = await contract.commitWeights(subnetId, commitHash) - await tx.wait() + try { + const tx = await contract.commitWeights(subnetId, commitHash) + await tx.wait() + } catch (e) { + console.log("commitWeights failed", e) + } + + const commitBlock = await api.query.System.Number.getValue() + commitEpoch = Math.trunc(commitBlock / (100 + 1)) const ss58Address = convertH160ToSS58(wallet.address) @@ -108,9 +114,19 @@ describe("Test neuron precompile reveal weights", () => { const netuid = totalNetworks - 1 const contract = new ethers.Contract(INEURON_ADDRESS, INeuronABI, wallet); // set tempo or epoch large, then enough time to reveal weight - await setTempo(api, netuid, 60000) - // set interval epoch as 0, we can reveal at the same epoch - await setCommitRevealWeightsInterval(api, netuid, BigInt(0)) + await setTempo(api, netuid, 100) + // set interval epoch as 1, it is the minimum value now + await setCommitRevealWeightsInterval(api, netuid, BigInt(1)) + + while (true) { + const currentBlock = await api.query.System.Number.getValue() + const currentEpoch = Math.trunc(currentBlock / (100 + 1)) + // wait for one second for fast blocks + if (currentEpoch > commitEpoch) { + break + } + await new Promise(resolve => setTimeout(resolve, 1000)) + } const tx = await contract.revealWeights( netuid, @@ -120,6 +136,7 @@ describe("Test neuron precompile reveal weights", () => { version_key ); await tx.wait() + const ss58Address = convertH160ToSS58(wallet.address) // check the weight commit is removed after reveal successfully diff --git a/node/src/benchmarking.rs b/node/src/benchmarking.rs index df21d5bef4..ad2abfc935 100644 --- a/node/src/benchmarking.rs +++ b/node/src/benchmarking.rs @@ -5,8 +5,8 @@ use crate::client::FullClient; use node_subtensor_runtime as runtime; -use node_subtensor_runtime::check_nonce; use node_subtensor_runtime::pallet_subtensor; +use node_subtensor_runtime::{check_nonce, transaction_payment_wrapper}; use runtime::{BalancesCall, SystemCall}; use sc_cli::Result; use sc_client_api::BlockBackend; @@ -123,21 +123,27 @@ pub fn create_benchmark_extrinsic( .checked_next_power_of_two() .map(|c| c / 2) .unwrap_or(2) as u64; - let extra: runtime::TransactionExtensions = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(sp_runtime::generic::Era::mortal( - period, - best_block.saturated_into(), - )), - check_nonce::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(0), - pallet_subtensor::SubtensorTransactionExtension::::new(), - frame_metadata_hash_extension::CheckMetadataHash::::new(true), - ); + let extra: runtime::TransactionExtensions = + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::mortal( + period, + best_block.saturated_into(), + )), + check_nonce::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + transaction_payment_wrapper::ChargeTransactionPaymentWrapper::new( + pallet_transaction_payment::ChargeTransactionPayment::::from(0), + ), + pallet_subtensor::transaction_extension::SubtensorTransactionExtension::< + runtime::Runtime, + >::new(), + pallet_drand::drand_priority::DrandPriority::::new(), + frame_metadata_hash_extension::CheckMetadataHash::::new(true), + ); let raw_payload = runtime::SignedPayload::from_raw( call.clone(), @@ -152,6 +158,7 @@ pub fn create_benchmark_extrinsic( (), (), (), + (), None, ), ); diff --git a/node/src/command.rs b/node/src/command.rs index eb261bedfa..847d6faba2 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -318,14 +318,14 @@ fn customise_config(arg_matches: &ArgMatches, config: Configuration) -> Configur // If the operator did **not** supply `--rpc-max-subscriptions-per-connection` set to high value. config.rpc.max_subs_per_conn = match arg_matches - .value_source("rpc-max-subscriptions-per-connection") + .value_source("rpc_max_subscriptions_per_connection") { Some(ValueSource::CommandLine) => cli.run.rpc_params.rpc_max_subscriptions_per_connection, _ => 10000, }; // If the operator did **not** supply `--rpc-max-connections` set to high value. - config.rpc.max_connections = match arg_matches.value_source("rpc-max-connections") { + config.rpc.max_connections = match arg_matches.value_source("rpc_max_connections") { Some(ValueSource::CommandLine) => cli.run.rpc_params.rpc_max_connections, _ => 10000, }; diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 10bc00c5ae..03a20a5f21 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -107,8 +107,6 @@ pub mod pallet { BondsMovingAverageMaxReached, /// Only root can set negative sigmoid steepness values NegativeSigmoidSteepness, - /// Reveal Peroid is not within the valid range. - RevealPeriodOutOfBounds, } /// Enum for specifying the type of precompile operation. #[derive( @@ -1311,14 +1309,10 @@ pub mod pallet { Error::::SubnetDoesNotExist ); - const MAX_COMMIT_REVEAL_PEROIDS: u64 = 100; - ensure!( - interval <= MAX_COMMIT_REVEAL_PEROIDS, - Error::::RevealPeriodOutOfBounds - ); - - pallet_subtensor::Pallet::::set_reveal_period(netuid, interval); log::debug!("SetWeightCommitInterval( netuid: {netuid:?}, interval: {interval:?} ) "); + + pallet_subtensor::Pallet::::set_reveal_period(netuid, interval)?; + Ok(()) } @@ -1673,6 +1667,21 @@ pub mod pallet { pallet_subtensor::Pallet::::set_commit_reveal_weights_version(version); Ok(()) } + + /// Sets the number of immune owner neurons + #[pallet::call_index(72)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_owner_immune_neuron_limit( + origin: OriginFor, + netuid: NetUid, + immune_neurons: u16, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::set_owner_immune_neuron_limit(netuid, immune_neurons)?; + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 754befc805..5290d3ddfc 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1120,7 +1120,7 @@ fn test_sudo_set_commit_reveal_weights_enabled() { let netuid = NetUid::from(1); add_network(netuid, 10); - let to_be_set: bool = true; + let to_be_set: bool = false; let init_value: bool = SubtensorModule::get_commit_reveal_weights_enabled(netuid); assert_ok!(AdminUtils::sudo_set_commit_reveal_weights_enabled( @@ -1459,7 +1459,7 @@ fn sudo_set_commit_reveal_weights_interval() { netuid, too_high ), - Error::::RevealPeriodOutOfBounds + pallet_subtensor::Error::::RevealPeriodTooLarge ); let to_be_set = 55; diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 7e3b1704a0..34192b6fa2 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -208,7 +208,7 @@ pub mod pallet { Weight::from_parts(33_480_000, 0) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)), - DispatchClass::Operational, + DispatchClass::Normal, Pays::No ))] pub fn set_commitment( diff --git a/pallets/drand/Cargo.toml b/pallets/drand/Cargo.toml index c6a0705627..269e993d02 100644 --- a/pallets/drand/Cargo.toml +++ b/pallets/drand/Cargo.toml @@ -27,6 +27,7 @@ frame-system.workspace = true sp-core.workspace = true sp-io.workspace = true sp-runtime.workspace = true +sp-std.workspace = true # arkworks dependencies sp-ark-bls12-381.workspace = true ark-bls12-381 = { workspace = true, features = ["curve"] } @@ -55,6 +56,7 @@ std = [ "frame-system/std", "scale-info/std", "sp-core/std", + "sp-std/std", "sp-io/std", "sp-keystore/std", "sp-keyring/std", diff --git a/pallets/drand/src/drand_priority.rs b/pallets/drand/src/drand_priority.rs new file mode 100644 index 0000000000..c63ffa5803 --- /dev/null +++ b/pallets/drand/src/drand_priority.rs @@ -0,0 +1,89 @@ +use crate::{Call, Config}; +use codec::{Decode, DecodeWithMemTracking, Encode}; +use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; +use frame_support::pallet_prelude::Weight; +use frame_support::traits::IsSubType; +use scale_info::TypeInfo; +use sp_runtime::traits::{ + DispatchInfoOf, DispatchOriginOf, Dispatchable, Implication, TransactionExtension, + ValidateResult, +}; +use sp_runtime::transaction_validity::{ + TransactionPriority, TransactionSource, TransactionValidityError, ValidTransaction, +}; +use sp_std::marker::PhantomData; +use subtensor_macros::freeze_struct; + +pub type RuntimeCallFor = ::RuntimeCall; + +#[freeze_struct("d0d094192bd6390e")] +#[derive(Default, Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] +pub struct DrandPriority(pub PhantomData); + +impl sp_std::fmt::Debug for DrandPriority { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "DrandPriority") + } +} + +impl DrandPriority { + pub fn new() -> Self { + Self(PhantomData) + } + + fn get_drand_priority() -> TransactionPriority { + 10_000u64 + } +} + +impl TransactionExtension> + for DrandPriority +where + ::RuntimeCall: + Dispatchable, + ::RuntimeCall: IsSubType>, +{ + const IDENTIFIER: &'static str = "DrandPriority"; + type Implicit = (); + type Val = (); + type Pre = (); + + fn weight(&self, _call: &RuntimeCallFor) -> Weight { + // TODO: benchmark transaction extension + Weight::zero() + } + + fn validate( + &self, + origin: DispatchOriginOf>, + call: &RuntimeCallFor, + _info: &DispatchInfoOf>, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Implication, + _source: TransactionSource, + ) -> ValidateResult> { + match call.is_sub_type() { + Some(Call::write_pulse { .. }) => { + let validity = ValidTransaction { + priority: Self::get_drand_priority(), + ..Default::default() + }; + + Ok((validity, (), origin)) + } + _ => Ok((Default::default(), (), origin)), + } + } + + fn prepare( + self, + _val: Self::Val, + _origin: &DispatchOriginOf>, + _call: &RuntimeCallFor, + _info: &DispatchInfoOf>, + _len: usize, + ) -> Result { + Ok(()) + } +} diff --git a/pallets/drand/src/lib.rs b/pallets/drand/src/lib.rs index 0acf8ec493..145aeb6657 100644 --- a/pallets/drand/src/lib.rs +++ b/pallets/drand/src/lib.rs @@ -58,6 +58,7 @@ use sp_runtime::{ }; pub mod bls12_381; +pub mod drand_priority; pub mod migrations; pub mod types; pub mod utils; @@ -404,9 +405,9 @@ pub mod pallet { /// * `origin`: the root user /// * `config`: the beacon configuration #[pallet::call_index(1)] - #[pallet::weight(Weight::from_parts(8_766_000, 0) + #[pallet::weight((Weight::from_parts(8_766_000, 0) .saturating_add(T::DbWeight::get().reads(0_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)))] + .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Operational))] pub fn set_beacon_config( origin: OriginFor, config_payload: BeaconConfigurationPayload>, @@ -425,9 +426,9 @@ pub mod pallet { /// allows the root user to set the oldest stored round #[pallet::call_index(2)] - #[pallet::weight(Weight::from_parts(5_370_000, 0) + #[pallet::weight((Weight::from_parts(5_370_000, 0) .saturating_add(T::DbWeight::get().reads(0_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)))] + .saturating_add(T::DbWeight::get().writes(1_u64)), DispatchClass::Operational))] pub fn set_oldest_stored_round(origin: OriginFor, oldest_round: u64) -> DispatchResult { ensure_root(origin)?; OldestStoredRound::::put(oldest_round); diff --git a/pallets/registry/src/lib.rs b/pallets/registry/src/lib.rs index e4e99e4459..a4f7571bea 100644 --- a/pallets/registry/src/lib.rs +++ b/pallets/registry/src/lib.rs @@ -113,7 +113,7 @@ pub mod pallet { #[pallet::call_index(0)] #[pallet::weight(( T::WeightInfo::set_identity(), - DispatchClass::Operational + DispatchClass::Normal ))] pub fn set_identity( origin: OriginFor, diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 6a2a35e3c0..5cdb47685d 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -60,6 +60,7 @@ mod pallet_benchmarks { Subtensor::::set_network_registration_allowed(netuid, true); Subtensor::::set_max_registrations_per_block(netuid, 4096); Subtensor::::set_target_registrations_per_interval(netuid, 4096); + Subtensor::::set_commit_reveal_weights_enabled(netuid, false); let mut seed: u32 = 1; let mut dests = Vec::new(); diff --git a/pallets/subtensor/src/coinbase/reveal_commits.rs b/pallets/subtensor/src/coinbase/reveal_commits.rs index 029b77227c..e7bc6dc008 100644 --- a/pallets/subtensor/src/coinbase/reveal_commits.rs +++ b/pallets/subtensor/src/coinbase/reveal_commits.rs @@ -45,9 +45,9 @@ impl Pallet { let reveal_epoch = cur_epoch.saturating_sub(reveal_period); // Clean expired commits - for (epoch, _) in CRV3WeightCommitsV2::::iter_prefix(netuid) { + for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid) { if epoch < reveal_epoch { - CRV3WeightCommitsV2::::remove(netuid, epoch); + TimelockedWeightCommits::::remove(netuid, epoch); } } @@ -57,7 +57,7 @@ impl Pallet { return Ok(()); } - let mut entries = CRV3WeightCommitsV2::::take(netuid, reveal_epoch); + let mut entries = TimelockedWeightCommits::::take(netuid, reveal_epoch); let mut unrevealed = VecDeque::new(); // Keep popping items off the front of the queue until we successfully reveal a commit. @@ -185,11 +185,11 @@ impl Pallet { continue; } - Self::deposit_event(Event::CRV3WeightsRevealed(netuid, who)); + Self::deposit_event(Event::TimelockedWeightsRevealed(netuid, who)); } if !unrevealed.is_empty() { - CRV3WeightCommitsV2::::insert(netuid, reveal_epoch, unrevealed); + TimelockedWeightCommits::::insert(netuid, reveal_epoch, unrevealed); } Ok(()) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 551883653b..dcdab8072e 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -426,6 +426,47 @@ impl Pallet { (prop_alpha_dividends, tao_dividends) } + fn get_immune_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + // Gather (block, uid, hotkey) only for hotkeys that have a UID and a registration block. + let mut triples: Vec<(u64, u16, T::AccountId)> = OwnedHotkeys::::get(coldkey) + .into_iter() + .filter_map(|hotkey| { + // Uids must exist, filter_map ignores hotkeys without UID + Uids::::get(netuid, &hotkey).map(|uid| { + let block = BlockAtRegistration::::get(netuid, uid); + (block, uid, hotkey) + }) + }) + .collect(); + + // Sort by BlockAtRegistration (descending), then by uid (ascending) + // Recent registration is priority so that we can let older keys expire (get non-immune) + triples.sort_by(|(b1, u1, _), (b2, u2, _)| b2.cmp(b1).then(u1.cmp(u2))); + + // Keep first ImmuneOwnerUidsLimit + let limit = ImmuneOwnerUidsLimit::::get(netuid).into(); + if triples.len() > limit { + triples.truncate(limit); + } + + // Project to just hotkeys + let mut immune_hotkeys: Vec = + triples.into_iter().map(|(_, _, hk)| hk).collect(); + + // Insert subnet owner hotkey in the beginning of the list if valid and not + // already present + if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) { + if Uids::::get(netuid, &owner_hk).is_some() && !immune_hotkeys.contains(&owner_hk) { + immune_hotkeys.insert(0, owner_hk); + if immune_hotkeys.len() > limit { + immune_hotkeys.truncate(limit); + } + } + } + + immune_hotkeys + } + pub fn distribute_dividends_and_incentives( netuid: NetUid, owner_cut: AlphaCurrency, @@ -454,17 +495,20 @@ impl Pallet { } // Distribute mining incentives. + let subnet_owner_coldkey = SubnetOwner::::get(netuid); + let owner_hotkeys = Self::get_immune_owner_hotkeys(netuid, &subnet_owner_coldkey); + log::debug!("incentives: owner hotkeys: {owner_hotkeys:?}"); for (hotkey, incentive) in incentives { log::debug!("incentives: hotkey: {incentive:?}"); - if let Ok(owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { - if hotkey == owner_hotkey { - log::debug!( - "incentives: hotkey: {hotkey:?} is SN owner hotkey, skipping {incentive:?}" - ); - continue; // Skip/burn miner-emission for SN owner hotkey. - } + // Skip/burn miner-emission for immune keys + if owner_hotkeys.contains(&hotkey) { + log::debug!( + "incentives: hotkey: {hotkey:?} is SN owner hotkey or associated hotkey, skipping {incentive:?}" + ); + continue; } + // Increase stake for miner. Self::increase_stake_for_hotkey_and_coldkey_on_subnet( &hotkey.clone(), diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index ce3d3fd208..2f302c2a5e 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -607,7 +607,7 @@ impl Pallet { } // ---------- v3 ------------------------------------------------------ - for (_epoch, q) in CRV3WeightCommitsV2::::iter_prefix(netuid) { + for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { for (who, cb, ..) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { if let Some(i) = uid_of(who) { diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index ea4beb3f2b..ef41b07c78 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -8,28 +8,20 @@ use frame_system::{self as system, ensure_signed}; pub use pallet::*; -use codec::{Decode, DecodeWithMemTracking, Encode}; +use codec::{Decode, Encode}; use frame_support::sp_runtime::transaction_validity::InvalidTransaction; -use frame_support::sp_runtime::transaction_validity::ValidTransaction; use frame_support::{ - dispatch::{self, DispatchInfo, DispatchResult, DispatchResultWithPostInfo, PostDispatchInfo}, + dispatch::{self, DispatchResult, DispatchResultWithPostInfo}, ensure, pallet_macros::import_section, pallet_prelude::*, - traits::{IsSubType, tokens::fungible}, + traits::tokens::fungible, }; use pallet_balances::Call as BalancesCall; // use pallet_scheduler as Scheduler; use scale_info::TypeInfo; use sp_core::Get; -use sp_runtime::{ - DispatchError, - traits::{ - AsSystemOriginSigner, DispatchInfoOf, Dispatchable, Implication, PostDispatchInfoOf, - TransactionExtension, ValidateResult, - }, - transaction_validity::{TransactionValidity, TransactionValidityError}, -}; +use sp_runtime::{DispatchError, transaction_validity::TransactionValidityError}; use sp_std::marker::PhantomData; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; @@ -55,6 +47,7 @@ use macros::{config, dispatches, errors, events, genesis, hooks}; #[cfg(test)] mod tests; +pub mod transaction_extension; // apparently this is stabilized since rust 1.36 extern crate alloc; @@ -113,6 +106,11 @@ pub mod pallet { /// Minimum balance required to perform a coldkey swap pub const MIN_BALANCE_TO_PERFORM_COLDKEY_SWAP: TaoCurrency = TaoCurrency::new(100_000_000); // 0.1 TAO in RAO + /// Minimum commit reveal periods + pub const MIN_COMMIT_REVEAL_PEROIDS: u64 = 1; + /// Maximum commit reveal periods + pub const MAX_COMMIT_REVEAL_PEROIDS: u64 = 100; + #[pallet::pallet] #[pallet::without_storage_info] #[pallet::storage_version(STORAGE_VERSION)] @@ -768,7 +766,7 @@ pub mod pallet { #[pallet::type_value] /// Default value for weight commit/reveal enabled. pub fn DefaultCommitRevealWeightsEnabled() -> bool { - false + true } #[pallet::type_value] /// Default value for weight commit/reveal version. @@ -1464,6 +1462,26 @@ pub mod pallet { pub type SubtokenEnabled = StorageMap<_, Identity, NetUid, bool, ValueQuery, DefaultFalse>; + #[pallet::type_value] + /// Default value for burn keys limit + pub fn DefaultImmuneOwnerUidsLimit() -> u16 { + 1 + } + #[pallet::type_value] + /// Maximum value for burn keys limit + pub fn MaxImmuneOwnerUidsLimit() -> u16 { + 10 + } + #[pallet::type_value] + /// Minimum value for burn keys limit + pub fn MinImmuneOwnerUidsLimit() -> u16 { + 1 + } + #[pallet::storage] + /// --- MAP ( netuid ) --> Burn key limit + pub type ImmuneOwnerUidsLimit = + StorageMap<_, Identity, NetUid, u16, ValueQuery, DefaultImmuneOwnerUidsLimit>; + /// ======================================= /// ==== Subnetwork Consensus Storage ==== /// ======================================= @@ -1664,6 +1682,23 @@ pub mod pallet { OptionQuery, >; #[pallet::storage] + /// MAP (netuid, epoch) → VecDeque<(who, commit_block, ciphertext, reveal_round)> + /// Stores a queue of weight commits for an account on a given subnet. + pub type TimelockedWeightCommits = StorageDoubleMap< + _, + Twox64Concat, + NetUid, + Twox64Concat, + u64, // epoch key + VecDeque<( + T::AccountId, + u64, // commit_block + BoundedVec>, + RoundNumber, + )>, + ValueQuery, + >; + #[pallet::storage] /// MAP (netuid, epoch) → VecDeque<(who, ciphertext, reveal_round)> /// DEPRECATED for CRV3WeightCommitsV2 pub type CRV3WeightCommits = StorageDoubleMap< @@ -1681,7 +1716,7 @@ pub mod pallet { >; #[pallet::storage] /// MAP (netuid, epoch) → VecDeque<(who, commit_block, ciphertext, reveal_round)> - /// Stores a queue of v3 commits for an account on a given netuid. + /// DEPRECATED for TimelockedWeightCommits pub type CRV3WeightCommitsV2 = StorageDoubleMap< _, Twox64Concat, @@ -1803,40 +1838,6 @@ pub mod pallet { // ---- Subtensor helper functions. impl Pallet { - /// Returns the transaction priority for setting weights. - pub fn get_priority_set_weights(hotkey: &T::AccountId, netuid: NetUid) -> u64 { - if let Ok(uid) = Self::get_uid_for_net_and_hotkey(netuid, hotkey) { - // TODO rethink this. - let _stake = Self::get_inherited_for_hotkey_on_subnet(hotkey, netuid); - let current_block_number: u64 = Self::get_current_block_as_u64(); - let default_priority: u64 = - current_block_number.saturating_sub(Self::get_last_update_for_uid(netuid, uid)); - return default_priority.saturating_add(u32::MAX as u64); - } - 0 - } - - // FIXME this function is used both to calculate for alpha stake amount as well as tao - // amount - /// Returns the transaction priority for stake operations. - pub fn get_priority_staking( - coldkey: &T::AccountId, - hotkey: &T::AccountId, - stake_amount: u64, - ) -> u64 { - match LastColdkeyHotkeyStakeBlock::::get(coldkey, hotkey) { - Some(last_stake_block) => { - let current_block_number = Self::get_current_block_as_u64(); - let default_priority = current_block_number.saturating_sub(last_stake_block); - - default_priority - .saturating_add(u32::MAX as u64) - .saturating_add(stake_amount) - } - None => stake_amount, - } - } - /// Is the caller allowed to set weights pub fn check_weights_min_stake(hotkey: &T::AccountId, netuid: NetUid) -> bool { // Blacklist weights transactions for low stake peers. @@ -1879,22 +1880,6 @@ pub mod pallet { } } -/************************************************************ - CallType definition -************************************************************/ -#[derive(Debug, PartialEq, Default)] -pub enum CallType { - SetWeights, - AddStake, - RemoveStake, - AddDelegate, - Register, - Serve, - RegisterNetwork, - #[default] - Other, -} - #[derive(Debug, PartialEq)] pub enum CustomTransactionError { ColdkeyInSwapSchedule, @@ -1952,533 +1937,6 @@ impl From for TransactionValidityError { } } -#[freeze_struct("2e02eb32e5cb25d3")] -#[derive(Default, Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] -pub struct SubtensorTransactionExtension(pub PhantomData); - -impl sp_std::fmt::Debug for SubtensorTransactionExtension { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "SubtensorTransactionExtension") - } -} - -impl SubtensorTransactionExtension -where - ::RuntimeCall: - Dispatchable, - ::RuntimeCall: IsSubType>, -{ - pub fn new() -> Self { - Self(Default::default()) - } - - pub fn get_priority_vanilla() -> u64 { - // Return high priority so that every extrinsic except set_weights function will - // have a higher priority than the set_weights call - u64::MAX - } - - pub fn get_priority_set_weights(who: &T::AccountId, netuid: NetUid) -> u64 { - Pallet::::get_priority_set_weights(who, netuid) - } - - pub fn get_priority_staking( - coldkey: &T::AccountId, - hotkey: &T::AccountId, - stake_amount: u64, - ) -> u64 { - Pallet::::get_priority_staking(coldkey, hotkey, stake_amount) - } - - pub fn check_weights_min_stake(who: &T::AccountId, netuid: NetUid) -> bool { - Pallet::::check_weights_min_stake(who, netuid) - } - - pub fn validity_ok(priority: u64) -> ValidTransaction { - ValidTransaction { - priority, - ..Default::default() - } - } - - pub fn result_to_validity(result: Result<(), Error>, priority: u64) -> TransactionValidity { - if let Err(err) = result { - Err(match err { - Error::::AmountTooLow => CustomTransactionError::StakeAmountTooLow.into(), - Error::::SubnetNotExists => CustomTransactionError::SubnetDoesntExist.into(), - Error::::NotEnoughBalanceToStake => CustomTransactionError::BalanceTooLow.into(), - Error::::HotKeyAccountNotExists => { - CustomTransactionError::HotkeyAccountDoesntExist.into() - } - Error::::NotEnoughStakeToWithdraw => { - CustomTransactionError::NotEnoughStakeToWithdraw.into() - } - Error::::InsufficientLiquidity => { - CustomTransactionError::InsufficientLiquidity.into() - } - Error::::SlippageTooHigh => CustomTransactionError::SlippageTooHigh.into(), - Error::::TransferDisallowed => CustomTransactionError::TransferDisallowed.into(), - Error::::HotKeyNotRegisteredInNetwork => { - CustomTransactionError::HotKeyNotRegisteredInNetwork.into() - } - Error::::InvalidIpAddress => CustomTransactionError::InvalidIpAddress.into(), - Error::::ServingRateLimitExceeded => { - CustomTransactionError::ServingRateLimitExceeded.into() - } - Error::::InvalidPort => CustomTransactionError::InvalidPort.into(), - _ => CustomTransactionError::BadRequest.into(), - }) - } else { - Ok(ValidTransaction { - priority, - ..Default::default() - }) - } - } -} - -impl - TransactionExtension<::RuntimeCall> - for SubtensorTransactionExtension -where - ::RuntimeCall: - Dispatchable, - ::RuntimeOrigin: AsSystemOriginSigner + Clone, - ::RuntimeCall: IsSubType>, - ::RuntimeCall: IsSubType>, -{ - const IDENTIFIER: &'static str = "SubtensorTransactionExtension"; - - type Implicit = (); - type Val = Option; - type Pre = Option; - - fn weight(&self, _call: &::RuntimeCall) -> Weight { - // TODO: benchmark transaction extension - Weight::zero() - } - - fn validate( - &self, - origin: ::RuntimeOrigin, - call: &::RuntimeCall, - _info: &DispatchInfoOf<::RuntimeCall>, - _len: usize, - _self_implicit: Self::Implicit, - _inherited_implication: &impl Implication, - _source: TransactionSource, - ) -> ValidateResult::RuntimeCall> { - // Ensure the transaction is signed, else we just skip the extension. - let Some(who) = origin.as_system_origin_signer() else { - return Ok((Default::default(), None, origin)); - }; - - match call.is_sub_type() { - Some(Call::commit_weights { netuid, .. }) => { - if Self::check_weights_min_stake(who, *netuid) { - let priority: u64 = Self::get_priority_set_weights(who, *netuid); - let validity = Self::validity_ok(priority); - Ok((validity, Some(who.clone()), origin)) - } else { - Err(CustomTransactionError::StakeAmountTooLow.into()) - } - } - Some(Call::reveal_weights { - netuid, - uids, - values, - salt, - version_key, - }) => { - if Self::check_weights_min_stake(who, *netuid) { - let provided_hash = Pallet::::get_commit_hash( - who, - *netuid, - uids, - values, - salt, - *version_key, - ); - match Pallet::::find_commit_block_via_hash(provided_hash) { - Some(commit_block) => { - if Pallet::::is_reveal_block_range(*netuid, commit_block) { - let priority: u64 = Self::get_priority_set_weights(who, *netuid); - let validity = Self::validity_ok(priority); - Ok((validity, Some(who.clone()), origin)) - } else { - Err(CustomTransactionError::CommitBlockNotInRevealRange.into()) - } - } - None => Err(CustomTransactionError::CommitNotFound.into()), - } - } else { - Err(CustomTransactionError::StakeAmountTooLow.into()) - } - } - Some(Call::batch_reveal_weights { - netuid, - uids_list, - values_list, - salts_list, - version_keys, - }) => { - if Self::check_weights_min_stake(who, *netuid) { - let num_reveals = uids_list.len(); - if num_reveals == values_list.len() - && num_reveals == salts_list.len() - && num_reveals == version_keys.len() - { - let provided_hashs = (0..num_reveals) - .map(|i| { - Pallet::::get_commit_hash( - who, - *netuid, - uids_list.get(i).unwrap_or(&Vec::new()), - values_list.get(i).unwrap_or(&Vec::new()), - salts_list.get(i).unwrap_or(&Vec::new()), - *version_keys.get(i).unwrap_or(&0_u64), - ) - }) - .collect::>(); - - let batch_reveal_block = provided_hashs - .iter() - .filter_map(|hash| Pallet::::find_commit_block_via_hash(*hash)) - .collect::>(); - - if provided_hashs.len() == batch_reveal_block.len() { - if Pallet::::is_batch_reveal_block_range(*netuid, batch_reveal_block) - { - let priority: u64 = Self::get_priority_set_weights(who, *netuid); - let validity = Self::validity_ok(priority); - Ok((validity, Some(who.clone()), origin)) - } else { - Err(CustomTransactionError::CommitBlockNotInRevealRange.into()) - } - } else { - Err(CustomTransactionError::CommitNotFound.into()) - } - } else { - Err(CustomTransactionError::InputLengthsUnequal.into()) - } - } else { - Err(CustomTransactionError::StakeAmountTooLow.into()) - } - } - Some(Call::set_weights { netuid, .. }) => { - if Self::check_weights_min_stake(who, *netuid) { - let priority: u64 = Self::get_priority_set_weights(who, *netuid); - let validity = Self::validity_ok(priority); - Ok((validity, Some(who.clone()), origin)) - } else { - Err(CustomTransactionError::StakeAmountTooLow.into()) - } - } - Some(Call::set_tao_weights { netuid, hotkey, .. }) => { - if Self::check_weights_min_stake(hotkey, *netuid) { - let priority: u64 = Self::get_priority_set_weights(hotkey, *netuid); - let validity = Self::validity_ok(priority); - Ok((validity, Some(who.clone()), origin)) - } else { - Err(CustomTransactionError::StakeAmountTooLow.into()) - } - } - Some(Call::commit_crv3_weights { - netuid, - reveal_round, - .. - }) => { - if Self::check_weights_min_stake(who, *netuid) { - if *reveal_round < pallet_drand::LastStoredRound::::get() { - return Err(CustomTransactionError::InvalidRevealRound.into()); - } - let priority: u64 = Pallet::::get_priority_set_weights(who, *netuid); - let validity = Self::validity_ok(priority); - Ok((validity, Some(who.clone()), origin)) - } else { - Err(CustomTransactionError::StakeAmountTooLow.into()) - } - } - Some(Call::commit_timelocked_weights { - netuid, - reveal_round, - .. - }) => { - if Self::check_weights_min_stake(who, *netuid) { - if *reveal_round < pallet_drand::LastStoredRound::::get() { - return Err(CustomTransactionError::InvalidRevealRound.into()); - } - let priority: u64 = Pallet::::get_priority_set_weights(who, *netuid); - let validity = Self::validity_ok(priority); - Ok((validity, Some(who.clone()), origin)) - } else { - Err(CustomTransactionError::StakeAmountTooLow.into()) - } - } - Some(Call::add_stake { - hotkey, - netuid: _, - amount_staked, - }) => { - if ColdkeySwapScheduled::::contains_key(who) { - return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); - } - let validity = Self::validity_ok(Self::get_priority_staking( - who, - hotkey, - (*amount_staked).into(), - )); - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::add_stake_limit { - hotkey, - netuid: _, - amount_staked, - .. - }) => { - if ColdkeySwapScheduled::::contains_key(who) { - return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); - } - - let validity = Self::validity_ok(Self::get_priority_staking( - who, - hotkey, - (*amount_staked).into(), - )); - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::remove_stake { - hotkey, - netuid: _, - amount_unstaked, - }) => { - let validity = Self::validity_ok(Self::get_priority_staking( - who, - hotkey, - (*amount_unstaked).into(), - )); - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::remove_stake_limit { - hotkey, - netuid: _, - amount_unstaked, - .. - }) => { - let validity = Self::validity_ok(Self::get_priority_staking( - who, - hotkey, - (*amount_unstaked).into(), - )); - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::move_stake { - origin_hotkey, - destination_hotkey: _, - origin_netuid: _, - destination_netuid: _, - alpha_amount, - }) => { - if ColdkeySwapScheduled::::contains_key(who) { - return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); - } - let validity = Self::validity_ok(Self::get_priority_staking( - who, - origin_hotkey, - (*alpha_amount).into(), - )); - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::transfer_stake { - destination_coldkey: _, - hotkey, - origin_netuid: _, - destination_netuid: _, - alpha_amount, - }) => { - if ColdkeySwapScheduled::::contains_key(who) { - return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); - } - let validity = Self::validity_ok(Self::get_priority_staking( - who, - hotkey, - (*alpha_amount).into(), - )); - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::swap_stake { - hotkey, - origin_netuid: _, - destination_netuid: _, - alpha_amount, - }) => { - if ColdkeySwapScheduled::::contains_key(who) { - return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); - } - let validity = Self::validity_ok(Self::get_priority_staking( - who, - hotkey, - (*alpha_amount).into(), - )); - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::swap_stake_limit { - hotkey, - origin_netuid: _, - destination_netuid: _, - alpha_amount, - .. - }) => { - if ColdkeySwapScheduled::::contains_key(who) { - return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); - } - - let validity = Self::validity_ok(Self::get_priority_staking( - who, - hotkey, - (*alpha_amount).into(), - )); - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::register { netuid, .. } | Call::burned_register { netuid, .. }) => { - if ColdkeySwapScheduled::::contains_key(who) { - return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); - } - - let registrations_this_interval = - Pallet::::get_registrations_this_interval(*netuid); - let max_registrations_per_interval = - Pallet::::get_target_registrations_per_interval(*netuid); - if registrations_this_interval >= (max_registrations_per_interval.saturating_mul(3)) - { - // If the registration limit for the interval is exceeded, reject the transaction - return Err(CustomTransactionError::RateLimitExceeded.into()); - } - let validity = ValidTransaction { - priority: Self::get_priority_vanilla(), - ..Default::default() - }; - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::register_network { .. }) => { - let validity = Self::validity_ok(Self::get_priority_vanilla()); - Ok((validity, Some(who.clone()), origin)) - } - Some(Call::dissolve_network { .. }) => { - if ColdkeySwapScheduled::::contains_key(who) { - Err(CustomTransactionError::ColdkeyInSwapSchedule.into()) - } else { - let validity = Self::validity_ok(Self::get_priority_vanilla()); - Ok((validity, Some(who.clone()), origin)) - } - } - Some(Call::serve_axon { - netuid, - version, - ip, - port, - ip_type, - protocol, - placeholder1, - placeholder2, - }) => { - // Fully validate the user input - Self::result_to_validity( - Pallet::::validate_serve_axon( - who, - *netuid, - *version, - *ip, - *port, - *ip_type, - *protocol, - *placeholder1, - *placeholder2, - ), - Self::get_priority_vanilla(), - ) - .map(|validity| (validity, Some(who.clone()), origin.clone())) - } - _ => { - if let Some( - BalancesCall::transfer_keep_alive { .. } - | BalancesCall::transfer_all { .. } - | BalancesCall::transfer_allow_death { .. }, - ) = call.is_sub_type() - { - if ColdkeySwapScheduled::::contains_key(who) { - return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); - } - } - let validity = Self::validity_ok(Self::get_priority_vanilla()); - Ok((validity, Some(who.clone()), origin)) - } - } - } - - // NOTE: Add later when we put in a pre and post dispatch step. - fn prepare( - self, - val: Self::Val, - _origin: &::RuntimeOrigin, - call: &::RuntimeCall, - _info: &DispatchInfoOf<::RuntimeCall>, - _len: usize, - ) -> Result { - // The transaction is not signed, given val is None, so we just skip this step. - if val.is_none() { - return Ok(None); - } - - match call.is_sub_type() { - Some(Call::add_stake { .. }) => Ok(Some(CallType::AddStake)), - Some(Call::remove_stake { .. }) => Ok(Some(CallType::RemoveStake)), - Some(Call::set_weights { .. }) => Ok(Some(CallType::SetWeights)), - Some(Call::commit_weights { .. }) => Ok(Some(CallType::SetWeights)), - Some(Call::reveal_weights { .. }) => Ok(Some(CallType::SetWeights)), - Some(Call::register { .. }) => Ok(Some(CallType::Register)), - Some(Call::serve_axon { .. }) => Ok(Some(CallType::Serve)), - Some(Call::serve_axon_tls { .. }) => Ok(Some(CallType::Serve)), - Some(Call::register_network { .. }) => Ok(Some(CallType::RegisterNetwork)), - _ => Ok(Some(CallType::Other)), - } - } - - fn post_dispatch( - pre: Self::Pre, - _info: &DispatchInfoOf<::RuntimeCall>, - _post_info: &mut PostDispatchInfoOf<::RuntimeCall>, - _len: usize, - _result: &dispatch::DispatchResult, - ) -> Result<(), TransactionValidityError> { - // Skip this step if the transaction is not signed, meaning pre is None. - let call_type = match pre { - Some(call_type) => call_type, - None => return Ok(()), - }; - - match call_type { - CallType::SetWeights => { - log::debug!("Not Implemented!"); - } - CallType::AddStake => { - log::debug!("Not Implemented! Need to add potential transaction fees here."); - } - CallType::RemoveStake => { - log::debug!("Not Implemented! Need to add potential transaction fees here."); - } - CallType::Register => { - log::debug!("Not Implemented!"); - } - _ => { - log::debug!("Not Implemented!"); - } - } - - Ok(()) - } -} - use sp_std::vec; // TODO: unravel this rats nest, for some reason rustc thinks this is unused even though it's diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index aff5e8cd7a..35439479ab 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -279,7 +279,7 @@ mod dispatches { /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// #[pallet::call_index(99)] - #[pallet::weight((Weight::from_parts(62_300_000, 0) + #[pallet::weight((Weight::from_parts(77_750_000, 0) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_crv3_weights( @@ -693,7 +693,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(4)] - #[pallet::weight((Weight::from_parts(36_090_000, 0) + #[pallet::weight((Weight::from_parts(33_010_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon( @@ -939,7 +939,7 @@ mod dispatches { #[pallet::call_index(70)] #[pallet::weight((Weight::from_parts(275_300_000, 0) .saturating_add(T::DbWeight::get().reads(47)) - .saturating_add(T::DbWeight::get().writes(37)), DispatchClass::Operational, Pays::No))] + .saturating_add(T::DbWeight::get().writes(37)), DispatchClass::Normal, Pays::No))] pub fn swap_hotkey( origin: OriginFor, hotkey: T::AccountId, @@ -1195,7 +1195,7 @@ mod dispatches { #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) .saturating_add(T::DbWeight::get().reads(36)) - .saturating_add(T::DbWeight::get().writes(52)), DispatchClass::Operational, Pays::No))] + .saturating_add(T::DbWeight::get().writes(52)), DispatchClass::Normal, Pays::No))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1283,7 +1283,7 @@ mod dispatches { #[pallet::call_index(67)] #[pallet::weight((Weight::from_parts(119_000_000, 0) .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::Yes))] + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Normal, Pays::Yes))] pub fn set_children( origin: T::RuntimeOrigin, hotkey: T::AccountId, @@ -1329,7 +1329,7 @@ mod dispatches { #[pallet::call_index(73)] #[pallet::weight((Weight::from_parts(37_830_000, 0) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Operational, Pays::Yes))] + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::Yes))] pub fn schedule_swap_coldkey( origin: OriginFor, new_coldkey: T::AccountId, @@ -1404,7 +1404,7 @@ mod dispatches { #[pallet::call_index(74)] #[pallet::weight((Weight::from_parts(119_000_000, 0) .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::Yes))] + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Normal, Pays::Yes))] pub fn schedule_dissolve_network( _origin: OriginFor, _netuid: NetUid, @@ -1540,7 +1540,7 @@ mod dispatches { #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) .saturating_add(T::DbWeight::get().reads(35)) - .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Operational, Pays::No))] + .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Normal, Pays::No))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -1577,7 +1577,7 @@ mod dispatches { #[pallet::call_index(83)] #[pallet::weight((Weight::from_parts(28_830_000, 0) .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Operational, Pays::Yes))] + .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Normal, Pays::Yes))] pub fn unstake_all(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_unstake_all(origin, hotkey) } @@ -1639,7 +1639,7 @@ mod dispatches { #[pallet::call_index(85)] #[pallet::weight((Weight::from_parts(164_300_000, 0) .saturating_add(T::DbWeight::get().reads(15_u64)) - .saturating_add(T::DbWeight::get().writes(7_u64)), DispatchClass::Operational, Pays::Yes))] + .saturating_add(T::DbWeight::get().writes(7_u64)), DispatchClass::Normal, Pays::Yes))] pub fn move_stake( origin: T::RuntimeOrigin, origin_hotkey: T::AccountId, @@ -1682,7 +1682,7 @@ mod dispatches { #[pallet::call_index(86)] #[pallet::weight((Weight::from_parts(160_300_000, 0) .saturating_add(T::DbWeight::get().reads(13_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)), DispatchClass::Operational, Pays::Yes))] + .saturating_add(T::DbWeight::get().writes(6_u64)), DispatchClass::Normal, Pays::Yes))] pub fn transfer_stake( origin: T::RuntimeOrigin, destination_coldkey: T::AccountId, @@ -1725,7 +1725,7 @@ mod dispatches { Weight::from_parts(351_300_000, 0) .saturating_add(T::DbWeight::get().reads(37_u64)) .saturating_add(T::DbWeight::get().writes(22_u64)), - DispatchClass::Operational, + DispatchClass::Normal, Pays::Yes ))] pub fn swap_stake( @@ -1898,7 +1898,7 @@ mod dispatches { Weight::from_parts(411_500_000, 0) .saturating_add(T::DbWeight::get().reads(37_u64)) .saturating_add(T::DbWeight::get().writes(22_u64)), - DispatchClass::Operational, + DispatchClass::Normal, Pays::Yes ))] pub fn swap_stake_limit( @@ -1932,7 +1932,7 @@ mod dispatches { #[pallet::call_index(91)] #[pallet::weight(( Weight::from_parts(27_150_000, 0).saturating_add(T::DbWeight::get().reads_writes(3, 3)), - DispatchClass::Operational, + DispatchClass::Normal, Pays::Yes ))] pub fn try_associate_hotkey( @@ -1957,7 +1957,7 @@ mod dispatches { #[pallet::call_index(92)] #[pallet::weight(( Weight::from_parts(29_780_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 2)), - DispatchClass::Operational, + DispatchClass::Normal, Pays::Yes ))] pub fn start_call(origin: T::RuntimeOrigin, netuid: NetUid) -> DispatchResult { @@ -1995,7 +1995,7 @@ mod dispatches { #[pallet::call_index(93)] #[pallet::weight(( Weight::from_parts(3_000_000, 0).saturating_add(T::DbWeight::get().reads_writes(2, 1)), - DispatchClass::Operational, + DispatchClass::Normal, Pays::Yes ))] pub fn associate_evm_key( @@ -2021,7 +2021,7 @@ mod dispatches { #[pallet::call_index(101)] #[pallet::weight(( Weight::from_parts(92_600_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 4)), - DispatchClass::Operational, + DispatchClass::Normal, Pays::Yes ))] pub fn recycle_alpha( @@ -2046,7 +2046,7 @@ mod dispatches { #[pallet::call_index(102)] #[pallet::weight(( Weight::from_parts(90_880_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 3)), - DispatchClass::Operational, + DispatchClass::Normal, Pays::Yes ))] pub fn burn_alpha( @@ -2201,7 +2201,7 @@ mod dispatches { /// * commit_reveal_version (`u16`): /// - The client (bittensor-drand) version #[pallet::call_index(113)] - #[pallet::weight((Weight::from_parts(65_780_000, 0) + #[pallet::weight((Weight::from_parts(64_530_000, 0) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 71750e7534..e6d9c231d1 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -244,5 +244,11 @@ mod errors { SymbolAlreadyInUse, /// Incorrect commit-reveal version. IncorrectCommitRevealVersion, + /// Reveal period is too large. + RevealPeriodTooLarge, + /// Reveal period is too small. + RevealPeriodTooSmall, + /// Generic error for out-of-range parameter value + InvalidValue, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index d959ccd6e0..2fab5ecdb4 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -399,5 +399,19 @@ mod events { /// /// - **version**: The required version. CommitRevealVersionSet(u16), + + /// Timelocked weights have been successfully committed. + /// + /// - **who**: The account ID of the user committing the weights. + /// - **netuid**: The network identifier. + /// - **commit_hash**: The hash representing the committed weights. + /// - **reveal_round**: The round at which weights can be revealed. + TimelockedWeightsCommitted(T::AccountId, NetUid, H256, u64), + + /// Timelocked Weights have been successfully revealed. + /// + /// - **netuid**: The network identifier. + /// - **who**: The account ID of the user revealing the weights. + TimelockedWeightsRevealed(NetUid, T::AccountId), } } diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 7083c24e5d..4bf7a6b115 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -129,7 +129,13 @@ mod hooks { // Migrate subnet symbols to fix the shift after subnet 81 .saturating_add(migrations::migrate_subnet_symbols::migrate_subnet_symbols::()) // Migrate CRV3 add commit_block - .saturating_add(migrations::migrate_crv3_commits_add_block::migrate_crv3_commits_add_block::()); + .saturating_add(migrations::migrate_crv3_commits_add_block::migrate_crv3_commits_add_block::()) + // Migrate Commit-Reveal Settings + .saturating_add(migrations::migrate_commit_reveal_settings::migrate_commit_reveal_settings::()) + //Migrate CRV3 to TimelockedCommits + .saturating_add(migrations::migrate_crv3_v2_to_timelocked::migrate_crv3_v2_to_timelocked::()) + // Migrate to fix root counters + .saturating_add(migrations::migrate_fix_root_tao_and_alpha_in::migrate_fix_root_tao_and_alpha_in::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_commit_reveal_settings.rs b/pallets/subtensor/src/migrations/migrate_commit_reveal_settings.rs new file mode 100644 index 0000000000..54df469600 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_commit_reveal_settings.rs @@ -0,0 +1,64 @@ +use alloc::string::String; + +use crate::MIN_COMMIT_REVEAL_PEROIDS; +use frame_support::IterableStorageMap; +use frame_support::{traits::Get, weights::Weight}; +use subtensor_runtime_common::NetUid; + +use super::*; + +pub fn migrate_commit_reveal_settings() -> Weight { + let migration_name = b"migrate_commit_reveal_settings".to_vec(); + + // Initialize the weight with one read operation. + let mut weight = T::DbWeight::get().reads(1); + + // Check if the migration has already run + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + let netuids: Vec = as IterableStorageMap>::iter() + .map(|(netuid, _)| netuid) + .collect(); + weight = weight.saturating_add( + T::DbWeight::get() + .reads(netuids.len() as u64) + .saturating_mul(2), + ); + + for netuid in netuids.iter() { + if netuid.is_root() { + continue; + } + if !CommitRevealWeightsEnabled::::get(*netuid) { + CommitRevealWeightsEnabled::::insert(*netuid, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + + if RevealPeriodEpochs::::get(*netuid) == 0 { + RevealPeriodEpochs::::insert(*netuid, MIN_COMMIT_REVEAL_PEROIDS); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + } + + // Mark the migration as completed + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed.", + String::from_utf8_lossy(&migration_name) + ); + + // Return the migration weight. + weight +} diff --git a/pallets/subtensor/src/migrations/migrate_crv3_v2_to_timelocked.rs b/pallets/subtensor/src/migrations/migrate_crv3_v2_to_timelocked.rs new file mode 100644 index 0000000000..7ae5a2529c --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_crv3_v2_to_timelocked.rs @@ -0,0 +1,36 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; +use sp_std::vec::Vec; + +// --------------- Migration ------------------------------------------ +/// Moves every (netuid, epoch) queue from `CRV3WeightCommitsV2` into +/// `TimelockedWeightCommits`. Identical key/value layout → pure move. +pub fn migrate_crv3_v2_to_timelocked() -> Weight { + let mig_name: Vec = b"crv3_v2_to_timelocked_v1".to_vec(); + let mut total_weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&mig_name) { + log::info!( + "Migration '{}' already executed - skipping", + String::from_utf8_lossy(&mig_name) + ); + return total_weight; + } + log::info!("Running migration '{}'", String::from_utf8_lossy(&mig_name)); + + for (netuid, epoch, old_q) in CRV3WeightCommitsV2::::drain() { + total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + TimelockedWeightCommits::::insert(netuid, epoch, old_q); + } + + HasMigrationRun::::insert(&mig_name, true); + total_weight = total_weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{}' completed", + String::from_utf8_lossy(&mig_name) + ); + total_weight +} diff --git a/pallets/subtensor/src/migrations/migrate_fix_root_tao_and_alpha_in.rs b/pallets/subtensor/src/migrations/migrate_fix_root_tao_and_alpha_in.rs new file mode 100644 index 0000000000..f20d71c302 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_fix_root_tao_and_alpha_in.rs @@ -0,0 +1,57 @@ +use super::migrate_init_total_issuance::migrate_init_total_issuance; +use super::*; +use alloc::string::String; + +pub fn migrate_fix_root_tao_and_alpha_in() -> Weight { + let migration_name = b"migrate_fix_root_tao_and_alpha_in".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // Update counters (unstaked more than stake) + let total_staked = 2_109_761_275_100_688_u64; + let total_unstaked = 2_179_659_173_851_658_u64; + let reserve_diff = total_unstaked.saturating_sub(total_staked); + let volume_diff = (total_unstaked as u128).saturating_add(total_staked as u128); + SubnetTAO::::mutate(NetUid::ROOT, |amount| { + *amount = amount.saturating_sub(TaoCurrency::from(reserve_diff)); + }); + SubnetAlphaIn::::mutate(NetUid::ROOT, |amount| { + *amount = amount.saturating_add(AlphaCurrency::from(reserve_diff)); + }); + SubnetAlphaOut::::mutate(NetUid::ROOT, |amount| { + *amount = amount.saturating_sub(AlphaCurrency::from(reserve_diff)); + }); + SubnetVolume::::mutate(NetUid::ROOT, |amount| { + *amount = amount.saturating_add(volume_diff); + }); + TotalStake::::mutate(|amount| { + *amount = amount.saturating_sub(TaoCurrency::from(reserve_diff)); + }); + + weight = weight.saturating_add(T::DbWeight::get().writes(5)); + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + target: "runtime", + "Migration '{}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + // We need to run the total issuance migration to update the total issuance + // when the root subnet TAO has been updated. + migrate_init_total_issuance::().saturating_add(weight) +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index cdf142357b..65dda18125 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -6,14 +6,17 @@ use sp_io::hashing::twox_128; use sp_io::storage::clear_prefix; pub mod migrate_chain_identity; pub mod migrate_coldkey_swap_scheduled; +pub mod migrate_commit_reveal_settings; pub mod migrate_commit_reveal_v2; pub mod migrate_create_root_network; pub mod migrate_crv3_commits_add_block; +pub mod migrate_crv3_v2_to_timelocked; pub mod migrate_delete_subnet_21; pub mod migrate_delete_subnet_3; pub mod migrate_disable_commit_reveal; pub mod migrate_fix_is_network_member; pub mod migrate_fix_root_subnet_tao; +pub mod migrate_fix_root_tao_and_alpha_in; pub mod migrate_identities_v2; pub mod migrate_init_total_issuance; pub mod migrate_orphaned_storage_items; diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 935bbe8667..737329ccb7 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -1,6 +1,8 @@ use super::*; use crate::epoch::math::*; +use crate::{Error, MAX_COMMIT_REVEAL_PEROIDS, MIN_COMMIT_REVEAL_PEROIDS}; use codec::Compact; +use frame_support::dispatch::DispatchResult; use safe_math::*; use sp_core::{ConstU32, H256}; use sp_runtime::{ @@ -9,7 +11,6 @@ use sp_runtime::{ }; use sp_std::{collections::vec_deque::VecDeque, vec}; use subtensor_runtime_common::NetUid; - impl Pallet { /// ---- The implementation for committing weight hashes. /// @@ -212,8 +213,8 @@ impl Pallet { /// 4. Rejects the call when the hotkey already has ≥ 10 unrevealed commits in /// the current epoch. /// 5. Appends `(hotkey, commit_block, commit, reveal_round)` to - /// `CRV3WeightCommitsV2[netuid][epoch]`. - /// 6. Emits `CRV3WeightsCommitted` with the Blake2 hash of `commit`. + /// `TimelockedWeightCommits[netuid][epoch]`. + /// 6. Emits `TimelockedWeightsCommitted` with the Blake2 hash of `commit`. /// 7. Updates `LastUpdateForUid` so subsequent rate-limit checks include this /// commit. /// @@ -225,7 +226,7 @@ impl Pallet { /// * `TooManyUnrevealedCommits` – Caller already has 10 unrevealed commits. /// /// # Events - /// * `CRV3WeightsCommitted(hotkey, netuid, commit_hash)` – Fired after the commit is successfully stored. + /// * `TimelockedWeightsCommitted(hotkey, netuid, commit_hash, reveal_round)` – Fired after the commit is successfully stored. pub fn do_commit_timelocked_weights( origin: T::RuntimeOrigin, netuid: NetUid, @@ -271,7 +272,7 @@ impl Pallet { false => Self::get_epoch_index(netuid, cur_block), }; - CRV3WeightCommitsV2::::try_mutate(netuid, cur_epoch, |commits| -> DispatchResult { + TimelockedWeightCommits::::try_mutate(netuid, cur_epoch, |commits| -> DispatchResult { // 7. Verify that the number of unrevealed commits is within the allowed limit. let unrevealed_commits_for_who = commits @@ -289,10 +290,11 @@ impl Pallet { commits.push_back((who.clone(), cur_block, commit, reveal_round)); // 9. Emit the WeightsCommitted event - Self::deposit_event(Event::CRV3WeightsCommitted( + Self::deposit_event(Event::TimelockedWeightsCommitted( who.clone(), netuid, commit_hash, + reveal_round, )); // 10. Update the last commit block for the hotkey's UID. @@ -1061,9 +1063,21 @@ impl Pallet { (first_reveal_block, last_reveal_block) } - pub fn set_reveal_period(netuid: NetUid, reveal_period: u64) { + pub fn set_reveal_period(netuid: NetUid, reveal_period: u64) -> DispatchResult { + ensure!( + reveal_period <= MAX_COMMIT_REVEAL_PEROIDS, + Error::::RevealPeriodTooLarge + ); + + ensure!( + reveal_period >= MIN_COMMIT_REVEAL_PEROIDS, + Error::::RevealPeriodTooSmall + ); + RevealPeriodEpochs::::insert(netuid, reveal_period); + Self::deposit_event(Event::CommitRevealPeriodsSet(netuid, reveal_period)); + Ok(()) } pub fn get_reveal_period(netuid: NetUid) -> u64 { RevealPeriodEpochs::::get(netuid) diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 41d25c8aea..2ffc21ea1d 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -2637,7 +2637,8 @@ fn test_childkey_set_weights_single_parent() { new_test_ext(1).execute_with(|| { let subnet_owner_coldkey = U256::from(1001); let subnet_owner_hotkey = U256::from(1002); - let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + let netuid = + add_dynamic_network_disable_commit_reveal(&subnet_owner_hotkey, &subnet_owner_coldkey); Tempo::::insert(netuid, 1); // Define hotkeys @@ -2746,7 +2747,8 @@ fn test_set_weights_no_parent() { new_test_ext(1).execute_with(|| { let subnet_owner_coldkey = U256::from(1001); let subnet_owner_hotkey = U256::from(1002); - let netuid = add_dynamic_network(&subnet_owner_hotkey, &subnet_owner_coldkey); + let netuid = + add_dynamic_network_disable_commit_reveal(&subnet_owner_hotkey, &subnet_owner_coldkey); let hotkey: U256 = U256::from(2); let spare_hk: U256 = U256::from(3); @@ -3568,7 +3570,7 @@ fn test_dividend_distribution_with_children() { fn test_dynamic_parent_child_relationships() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1); - add_network(netuid, 1, 0); + add_network_disable_commit_reveal(netuid, 1, 0); // Define hotkeys and coldkeys let parent = U256::from(1); diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index f74565c44e..927f98e2fa 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1759,6 +1759,7 @@ fn test_incentive_to_subnet_owner_is_burned() { let other_ck = U256::from(2); let other_hk = U256::from(3); + Owner::::insert(other_hk, other_ck); let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); @@ -1798,6 +1799,195 @@ fn test_incentive_to_subnet_owner_is_burned() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_incentive_to_subnet_owners_hotkey_is_burned --exact --show-output --nocapture +#[test] +fn test_incentive_to_subnet_owners_hotkey_is_burned() { + new_test_ext(1).execute_with(|| { + let subnet_owner_ck = U256::from(0); + let subnet_owner_hk = U256::from(1); + + // Other hk owned by owner + let other_hk = U256::from(3); + Owner::::insert(other_hk, subnet_owner_ck); + OwnedHotkeys::::insert(subnet_owner_ck, vec![subnet_owner_hk, other_hk]); + + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + Uids::::insert(netuid, other_hk, 1); + + // Set the burn key limit to 2 + ImmuneOwnerUidsLimit::::insert(netuid, 2); + + let pending_tao: u64 = 1_000_000_000; + let pending_alpha = AlphaCurrency::ZERO; // None to valis + let owner_cut = AlphaCurrency::ZERO; + let mut incentives: BTreeMap = BTreeMap::new(); + + // Give incentive to other_hk + incentives.insert(other_hk, 10_000_000.into()); + + // Give incentives to subnet_owner_hk + incentives.insert(subnet_owner_hk, 10_000_000.into()); + + // Verify stake before + let subnet_owner_stake_before = + SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); + assert_eq!(subnet_owner_stake_before, 0.into()); + let other_stake_before = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); + assert_eq!(other_stake_before, 0.into()); + + // Distribute dividends and incentives + SubtensorModule::distribute_dividends_and_incentives( + netuid, + owner_cut, + incentives, + BTreeMap::new(), + BTreeMap::new(), + ); + + // Verify stake after + let subnet_owner_stake_after = + SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); + assert_eq!(subnet_owner_stake_after, 0.into()); + let other_stake_after = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); + assert_eq!(other_stake_after, 0.into()); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_incentive_to_subnet_owners_hotkey_is_burned_with_limit --exact --show-output --nocapture +#[test] +fn test_incentive_to_subnet_owners_hotkey_is_burned_with_limit() { + new_test_ext(1).execute_with(|| { + let subnet_owner_ck = U256::from(0); + let subnet_owner_hk = U256::from(1); + + // Other hk owned by owner + let other_hk = U256::from(3); + Owner::::insert(other_hk, subnet_owner_ck); + OwnedHotkeys::::insert(subnet_owner_ck, vec![subnet_owner_hk, other_hk]); + + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + Uids::::insert(netuid, other_hk, 1); + + // Set the burn key limit to 1 - testing the limits + ImmuneOwnerUidsLimit::::insert(netuid, 1); + + let pending_tao: u64 = 1_000_000_000; + let pending_alpha = AlphaCurrency::ZERO; // None to valis + let owner_cut = AlphaCurrency::ZERO; + let mut incentives: BTreeMap = BTreeMap::new(); + + // Give incentive to other_hk + incentives.insert(other_hk, 10_000_000.into()); + + // Give incentives to subnet_owner_hk + incentives.insert(subnet_owner_hk, 10_000_000.into()); + + // Verify stake before + let subnet_owner_stake_before = + SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); + assert_eq!(subnet_owner_stake_before, 0.into()); + let other_stake_before = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); + assert_eq!(other_stake_before, 0.into()); + + // Distribute dividends and incentives + SubtensorModule::distribute_dividends_and_incentives( + netuid, + owner_cut, + incentives, + BTreeMap::new(), + BTreeMap::new(), + ); + + // Verify stake after + let subnet_owner_stake_after = + SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); + assert_eq!(subnet_owner_stake_after, 0.into()); + let other_stake_after = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); + + // Testing the limit - should be not burned + assert!(other_stake_after > 0.into()); + }); +} + +// Test that if number of sn owner hotkeys is greater than ImmuneOwnerUidsLimit, then the ones with +// higher BlockAtRegistration are used to burn +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_burn_key_sorting --exact --show-output --nocapture +#[test] +fn test_burn_key_sorting() { + new_test_ext(1).execute_with(|| { + let subnet_owner_ck = U256::from(0); + let subnet_owner_hk = U256::from(1); + + // Other hk owned by owner + let other_hk_1 = U256::from(3); + let other_hk_2 = U256::from(4); + let other_hk_3 = U256::from(5); + Owner::::insert(other_hk_1, subnet_owner_ck); + Owner::::insert(other_hk_2, subnet_owner_ck); + Owner::::insert(other_hk_3, subnet_owner_ck); + OwnedHotkeys::::insert( + subnet_owner_ck, + vec![subnet_owner_hk, other_hk_1, other_hk_2, other_hk_3], + ); + + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + + // Set block of registration and UIDs for other hotkeys + // HK1 has block of registration 2 + // HK2 and HK3 have the same block of registration 1, so they are sorted by UID + // Set HK2 UID = 3 and HK3 UID = 2 so that HK3 is burned and HK2 is not + // Summary: HK1 and HK3 should be burned, HK2 should be not. + // Let's test it now. + BlockAtRegistration::::insert(netuid, 1, 2); + BlockAtRegistration::::insert(netuid, 3, 1); + BlockAtRegistration::::insert(netuid, 2, 1); + Uids::::insert(netuid, other_hk_1, 1); + Uids::::insert(netuid, other_hk_2, 3); + Uids::::insert(netuid, other_hk_3, 2); + + // Set the burn key limit to 3 because we also have sn owner + ImmuneOwnerUidsLimit::::insert(netuid, 3); + + let pending_tao: u64 = 1_000_000_000; + let pending_alpha = AlphaCurrency::ZERO; // None to valis + let owner_cut = AlphaCurrency::ZERO; + let mut incentives: BTreeMap = BTreeMap::new(); + + // Give incentive to hotkeys + incentives.insert(other_hk_1, 10_000_000.into()); + incentives.insert(other_hk_2, 10_000_000.into()); + incentives.insert(other_hk_3, 10_000_000.into()); + + // Give incentives to subnet_owner_hk + incentives.insert(subnet_owner_hk, 10_000_000.into()); + + // Distribute dividends and incentives + SubtensorModule::distribute_dividends_and_incentives( + netuid, + owner_cut, + incentives, + BTreeMap::new(), + BTreeMap::new(), + ); + + // SN owner is burned + let subnet_owner_stake_after = + SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); + assert_eq!(subnet_owner_stake_after, 0.into()); + + // Testing the limits - HK1 and HK3 should be burned, HK2 should be not burned + let other_stake_after_1 = + SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk_1, netuid); + let other_stake_after_2 = + SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk_2, netuid); + let other_stake_after_3 = + SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk_3, netuid); + assert_eq!(other_stake_after_1, 0.into()); + assert!(other_stake_after_2 > 0.into()); + assert_eq!(other_stake_after_3, 0.into()); + }); +} + #[test] fn test_calculate_dividend_distribution_totals() { new_test_ext(1).execute_with(|| { @@ -2254,7 +2444,7 @@ fn test_drain_pending_emission_no_miners_all_drained() { #[test] fn test_drain_pending_emission_zero_emission() { new_test_ext(1).execute_with(|| { - let netuid = add_dynamic_network(&U256::from(1), &U256::from(2)); + let netuid = add_dynamic_network_disable_commit_reveal(&U256::from(1), &U256::from(2)); let hotkey = U256::from(3); let coldkey = U256::from(4); let miner_hk = U256::from(5); @@ -2332,6 +2522,7 @@ fn test_run_coinbase_not_started() { let sn_owner_ck = U256::from(8); add_network_without_emission_block(netuid, tempo, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); SubnetOwner::::insert(netuid, sn_owner_ck); @@ -2420,6 +2611,7 @@ fn test_run_coinbase_not_started_start_after() { let sn_owner_ck = U256::from(8); add_network_without_emission_block(netuid, tempo, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); assert_eq!(FirstEmissionBlockNumber::::get(netuid), None); SubnetOwner::::insert(netuid, sn_owner_ck); diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index bdf675648b..25b4c48781 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -159,7 +159,7 @@ fn init_run_epochs( bonds_penalty: u16, ) { // === Create the network - add_network(netuid, u16::MAX - 1, 0); // set higher tempo to avoid built-in epoch, then manual epoch instead + add_network_disable_commit_reveal(netuid, u16::MAX - 1, 0); // set higher tempo to avoid built-in epoch, then manual epoch instead // === Set bonds penalty SubtensorModule::set_bonds_penalty(netuid, bonds_penalty); @@ -560,7 +560,7 @@ fn test_1_graph() { let hotkey = U256::from(0); let uid: u16 = 0; let stake_amount: u64 = 1_000_000_000; - add_network(netuid, u16::MAX - 1, 0); // set higher tempo to avoid built-in epoch, then manual epoch instead + add_network_disable_commit_reveal(netuid, u16::MAX - 1, 0); // set higher tempo to avoid built-in epoch, then manual epoch instead SubtensorModule::set_max_allowed_uids(netuid, 1); SubtensorModule::add_balance_to_coldkey_account( &coldkey, @@ -630,7 +630,7 @@ fn test_10_graph() { // each with 1 stake and self weights. let n: usize = 10; let netuid = NetUid::from(1); - add_network(netuid, u16::MAX - 1, 0); // set higher tempo to avoid built-in epoch, then manual epoch instead + add_network_disable_commit_reveal(netuid, u16::MAX - 1, 0); // set higher tempo to avoid built-in epoch, then manual epoch instead SubtensorModule::set_max_allowed_uids(netuid, n as u16); for i in 0..10 { add_node(netuid, U256::from(i), U256::from(i), i as u16, 1) @@ -1004,7 +1004,7 @@ fn test_bonds() { let max_stake: u64 = 4; let stakes: Vec = vec![1, 2, 3, 4, 0, 0, 0, 0]; let block_number = System::block_number(); - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_max_allowed_uids( netuid, n ); assert_eq!(SubtensorModule::get_max_allowed_uids(netuid), n); SubtensorModule::set_max_registrations_per_block( netuid, n ); @@ -1351,7 +1351,7 @@ fn test_active_stake() { let tempo: u16 = 1; let block_number: u64 = System::block_number(); let stake: u64 = 1; - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_max_allowed_uids(netuid, n); assert_eq!(SubtensorModule::get_max_allowed_uids(netuid), n); SubtensorModule::set_max_registrations_per_block(netuid, n); @@ -1567,7 +1567,7 @@ fn test_outdated_weights() { let tempo: u16 = 0; let mut block_number: u64 = System::block_number(); let stake: u64 = 1; - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_max_allowed_uids(netuid, n); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_max_registrations_per_block(netuid, n); @@ -1757,7 +1757,7 @@ fn test_zero_weights() { let tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead let mut block_number: u64 = 0; let stake: u64 = 1; - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_max_allowed_uids(netuid, n); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_max_registrations_per_block(netuid, n); @@ -1960,7 +1960,7 @@ fn test_deregistered_miner_bonds() { let high_tempo: u16 = u16::MAX - 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead let stake: u64 = 1; - add_network(netuid, high_tempo, 0); + add_network_disable_commit_reveal(netuid, high_tempo, 0); SubtensorModule::set_max_allowed_uids(netuid, n); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_max_registrations_per_block(netuid, n); @@ -2669,7 +2669,7 @@ pub fn assert_approx_eq(left: I32F32, right: I32F32, epsilon: I32F32) { fn setup_yuma_3_scenario(netuid: NetUid, n: u16, sparse: bool, max_stake: u64, stakes: Vec) { let block_number = System::block_number(); let tempo: u16 = 1; // high tempo to skip automatic epochs in on_initialize, use manual epochs instead - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_max_allowed_uids(netuid, n); assert_eq!(SubtensorModule::get_max_allowed_uids(netuid), n); @@ -3567,7 +3567,7 @@ fn test_epoch_masks_incoming_to_sniped_uid_prevents_inheritance() { let reveal: u64 = 2; add_network(netuid, tempo, 0); - SubtensorModule::set_reveal_period(netuid, reveal); + assert_ok!(SubtensorModule::set_reveal_period(netuid, reveal)); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::set_max_allowed_uids(netuid, 3); SubtensorModule::set_target_registrations_per_interval(netuid, u16::MAX); @@ -3709,7 +3709,7 @@ fn test_epoch_does_not_mask_outside_window_but_masks_inside() { let reveal: u16 = 2; add_network(netuid, tempo, 0); - SubtensorModule::set_reveal_period(netuid, reveal as u64); + assert_ok!(SubtensorModule::set_reveal_period(netuid, reveal as u64)); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::set_target_registrations_per_interval(netuid, u16::MAX); diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index e62dacf89e..e93aab7669 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -64,7 +64,7 @@ fn test_migration_transfer_nets_to_foundation() { add_network(11.into(), 1, 0); log::info!("{:?}", SubtensorModule::get_subnet_owner(1.into())); - //assert_eq!(SubtensorModule::::get_subnet_owner(1), ); + //assert_eq!(SubtensorModule::::get_subnet_owner(1), ); // Run the migration to transfer ownership let hex = @@ -862,6 +862,45 @@ fn test_migrate_fix_root_subnet_tao() { }); } +// cargo test --package pallet-subtensor --lib -- tests::migration::test_migrate_fix_root_tao_and_alpha_in --exact --show-output +#[test] +fn test_migrate_fix_root_tao_and_alpha_in() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "migrate_fix_root_tao_and_alpha_in"; + + // Set counters initially + let initial_value = 1_000_000_000_000; + SubnetTAO::::insert(NetUid::ROOT, TaoCurrency::from(initial_value)); + SubnetAlphaIn::::insert(NetUid::ROOT, AlphaCurrency::from(initial_value)); + SubnetAlphaOut::::insert(NetUid::ROOT, AlphaCurrency::from(initial_value)); + SubnetVolume::::insert(NetUid::ROOT, initial_value as u128); + TotalStake::::set(TaoCurrency::from(initial_value)); + + assert!( + !HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should not have run yet" + ); + + // Run the migration + let weight = + crate::migrations::migrate_fix_root_tao_and_alpha_in::migrate_fix_root_tao_and_alpha_in::(); + + // Verify the migration ran correctly + assert!( + HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should be marked as run" + ); + assert!(!weight.is_zero(), "Migration weight should be non-zero"); + + // Verify counters have changed + assert!(SubnetTAO::::get(NetUid::ROOT) != initial_value.into()); + assert!(SubnetAlphaIn::::get(NetUid::ROOT) != initial_value.into()); + assert!(SubnetAlphaOut::::get(NetUid::ROOT) != initial_value.into()); + assert!(SubnetVolume::::get(NetUid::ROOT) != initial_value as u128); + assert!(TotalStake::::get() != initial_value.into()); + }); +} + #[test] fn test_migrate_subnet_symbols() { new_test_ext(1).execute_with(|| { @@ -1132,3 +1171,207 @@ fn test_migrate_disable_commit_reveal() { ); }); } + +#[test] +fn test_migrate_commit_reveal_settings() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "migrate_commit_reveal_settings"; + + // Set up some networks first + let netuid1: u16 = 1; + let netuid2: u16 = 2; + // Add networks to simulate existing networks + add_network(netuid1.into(), 1, 0); + add_network(netuid2.into(), 1, 0); + + // Ensure the storage items use default values initially (but aren't explicitly set) + // Since these are ValueQuery storage items, they return defaults even when not set + assert_eq!(RevealPeriodEpochs::::get(NetUid::from(netuid1)), 1u64); + assert_eq!(RevealPeriodEpochs::::get(NetUid::from(netuid2)), 1u64); + assert!(CommitRevealWeightsEnabled::::get(NetUid::from(netuid1))); + assert!(CommitRevealWeightsEnabled::::get(NetUid::from(netuid2))); + + // Check migration hasn't run + assert!(!HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec())); + + // Run migration + let weight = crate::migrations::migrate_commit_reveal_settings::migrate_commit_reveal_settings::(); + + // Check migration has been marked as run + assert!(HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec())); + + // Verify RevealPeriodEpochs was set correctly + assert_eq!(RevealPeriodEpochs::::get(NetUid::from(netuid1)), 1u64); + assert_eq!(RevealPeriodEpochs::::get(NetUid::from(netuid2)), 1u64); + + // Verify CommitRevealWeightsEnabled was set correctly + assert!(CommitRevealWeightsEnabled::::get(NetUid::from(netuid1))); + assert!(CommitRevealWeightsEnabled::::get(NetUid::from(netuid2))); + }); +} + +#[test] +fn test_migrate_commit_reveal_settings_already_run() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "migrate_commit_reveal_settings"; + // Mark migration as already run + HasMigrationRun::::insert(MIGRATION_NAME.as_bytes().to_vec(), true); + + // Run migration + let weight = crate::migrations::migrate_commit_reveal_settings::migrate_commit_reveal_settings::(); + + // Should only have read weight for checking migration status + let expected_weight = ::DbWeight::get().reads(1); + assert_eq!(weight, expected_weight); + }); +} + +#[test] +fn test_migrate_commit_reveal_settings_no_networks() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "migrate_commit_reveal_settings"; + + // Check migration hasn't run + assert!(!HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec())); + + // Run migration + let weight = crate::migrations::migrate_commit_reveal_settings::migrate_commit_reveal_settings::(); + + // Check migration has been marked as run + assert!(HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec())); + + // Check that weight calculation is correct (no networks, so no additional reads/writes) + // 1 read for migration check + 0 reads for networks + 0 writes for storage + 1 write for migration flag + let expected_weight = ::DbWeight::get().reads(1) + ::DbWeight::get().writes(1); + assert_eq!(weight, expected_weight); + }); +} + +#[test] +fn test_migrate_commit_reveal_settings_multiple_networks() { + new_test_ext(1).execute_with(|| { + const MIGRATION_NAME: &str = "migrate_commit_reveal_settings"; + + // Set up multiple networks + let netuids = vec![1u16, 2u16, 3u16, 10u16, 42u16]; + for netuid in &netuids { + add_network((*netuid).into(), 1, 0); + } + + // Run migration + let weight = crate::migrations::migrate_commit_reveal_settings::migrate_commit_reveal_settings::(); + + // Verify all networks have correct settings + for netuid in &netuids { + assert_eq!(RevealPeriodEpochs::::get(NetUid::from(*netuid)), 1u64); + assert!(CommitRevealWeightsEnabled::::get(NetUid::from(*netuid))); + } + + // Check migration has been marked as run + assert!(HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec())); + }); +} + +#[test] +fn test_migrate_commit_reveal_settings_values_access() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + add_network(netuid.into(), 1, 0); + + // Run migration + crate::migrations::migrate_commit_reveal_settings::migrate_commit_reveal_settings::(); + + // Test that we can access the values using the pallet functions + assert_eq!( + SubtensorModule::get_reveal_period(NetUid::from(netuid)), + 1u64 + ); + + // Test direct storage access + assert_eq!(RevealPeriodEpochs::::get(NetUid::from(netuid)), 1u64); + assert!(CommitRevealWeightsEnabled::::get(NetUid::from( + netuid + ))); + }); +} + +#[test] +fn test_migrate_crv3_v2_to_timelocked() { + new_test_ext(1).execute_with(|| { + // ------------------------------ + // 0. Constants / helpers + // ------------------------------ + const MIG_NAME: &[u8] = b"crv3_v2_to_timelocked_v1"; + let netuid = NetUid::from(99); + let epoch: u64 = 7; + + // ------------------------------ + // 1. Simulate OLD storage (4‑tuple; V2 layout) + // ------------------------------ + let who: U256 = U256::from(0xdeadbeef_u64); + let commit_block: u64 = 12345; + let ciphertext: BoundedVec> = + vec![1u8, 2, 3].try_into().unwrap(); + let round: RoundNumber = 9; + + let old_queue: VecDeque<_> = + VecDeque::from(vec![(who, commit_block, ciphertext.clone(), round)]); + + // Insert under the deprecated alias + CRV3WeightCommitsV2::::insert(netuid, epoch, old_queue.clone()); + + // Sanity: entry decodes under old alias + assert_eq!( + CRV3WeightCommitsV2::::get(netuid, epoch), + old_queue, + "pre-migration: old queue should be present" + ); + + // Destination should be empty pre-migration + assert!( + TimelockedWeightCommits::::get(netuid, epoch).is_empty(), + "pre-migration: destination should be empty" + ); + + assert!( + !HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag should be false before run" + ); + + // ------------------------------ + // 2. Run migration + // ------------------------------ + let w = crate::migrations::migrate_crv3_v2_to_timelocked::migrate_crv3_v2_to_timelocked::< + Test, + >(); + assert!(!w.is_zero(), "weight must be non-zero"); + + // ------------------------------ + // 3. Verify results + // ------------------------------ + assert!( + HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag not set" + ); + + // Old storage must be empty (drained) + assert!( + CRV3WeightCommitsV2::::get(netuid, epoch).is_empty(), + "old queue should have been drained" + ); + + // New storage must match exactly + let new_q = TimelockedWeightCommits::::get(netuid, epoch); + assert_eq!( + new_q, old_queue, + "migrated queue must exactly match the old queue" + ); + + // Verify the front element matches what we inserted + let (who2, commit_block2, cipher2, round2) = new_q.front().cloned().unwrap(); + assert_eq!(who2, who); + assert_eq!(commit_block2, commit_block); + assert_eq!(cipher2, ciphertext); + assert_eq!(round2, round); + }); +} diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index fd7dcbab45..8aa6fe6cdd 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -870,6 +870,19 @@ pub fn add_dynamic_network_without_emission_block(hotkey: &U256, coldkey: &U256) netuid } +#[allow(dead_code)] +pub fn add_dynamic_network_disable_commit_reveal(hotkey: &U256, coldkey: &U256) -> NetUid { + let netuid = add_dynamic_network(hotkey, coldkey); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); + netuid +} + +#[allow(dead_code)] +pub fn add_network_disable_commit_reveal(netuid: NetUid, tempo: u16, _modality: u16) { + add_network(netuid, tempo, _modality); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); +} + // Helper function to set up a neuron with stake #[allow(dead_code)] pub fn setup_neuron_with_stake(netuid: NetUid, hotkey: U256, coldkey: U256, stake: TaoCurrency) { diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 67c3fd3c4d..7ccb591620 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -12,7 +12,8 @@ use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid}; use super::mock; use super::mock::*; -use crate::{AxonInfoOf, CustomTransactionError, Error, SubtensorTransactionExtension}; +use crate::transaction_extension::SubtensorTransactionExtension; +use crate::{AxonInfoOf, CustomTransactionError, Error}; /******************************************** subscribing::subscribe() tests diff --git a/pallets/subtensor/src/tests/serving.rs b/pallets/subtensor/src/tests/serving.rs index c874831fcb..b4173a8ebb 100644 --- a/pallets/subtensor/src/tests/serving.rs +++ b/pallets/subtensor/src/tests/serving.rs @@ -2,6 +2,7 @@ use super::mock::*; use crate::Error; +use crate::transaction_extension::SubtensorTransactionExtension; use crate::*; use frame_support::assert_noop; use frame_support::pallet_prelude::Weight; @@ -11,7 +12,7 @@ use frame_support::{ }; use frame_system::{Config, RawOrigin}; use sp_core::U256; -use sp_runtime::traits::TxBaseImplication; +use sp_runtime::traits::{DispatchInfoOf, TransactionExtension, TxBaseImplication}; mod test { use std::net::{Ipv4Addr, Ipv6Addr}; @@ -1402,10 +1403,10 @@ fn test_serve_axon_validate() { placeholder2, }); - let info: crate::DispatchInfo = - crate::DispatchInfoOf::<::RuntimeCall>::default(); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); - let extension = crate::SubtensorTransactionExtension::::new(); + let extension = SubtensorTransactionExtension::::new(); // Submit to the signed extension validate function let result_bad = extension.validate( RawOrigin::Signed(hotkey).into(), diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index 9fa9ffa2c0..8345d24fff 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -10,6 +10,7 @@ use pallet_subtensor_swap::Call as SwapCall; use pallet_subtensor_swap::tick::TickIndex; use safe_math::FixedExt; use sp_core::{Get, H256, U256}; +use sp_runtime::traits::Dispatchable; use substrate_fixed::traits::FromFixed; use substrate_fixed::types::{I96F32, I110F18, U64F64, U96F32}; use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid, TaoCurrency}; diff --git a/pallets/subtensor/src/tests/swap_coldkey.rs b/pallets/subtensor/src/tests/swap_coldkey.rs index 1f1b9b113b..54bdc253ce 100644 --- a/pallets/subtensor/src/tests/swap_coldkey.rs +++ b/pallets/subtensor/src/tests/swap_coldkey.rs @@ -2,6 +2,7 @@ use approx::assert_abs_diff_eq; use codec::Encode; +use frame_support::dispatch::DispatchInfo; use frame_support::error::BadOrigin; use frame_support::traits::OnInitialize; use frame_support::traits::schedule::DispatchTime; @@ -10,6 +11,7 @@ use frame_support::weights::Weight; use frame_support::{assert_err, assert_noop, assert_ok}; use frame_system::{Config, RawOrigin}; use sp_core::{Get, H256, U256}; +use sp_runtime::traits::{DispatchInfoOf, TransactionExtension}; use sp_runtime::{DispatchError, traits::TxBaseImplication}; use substrate_fixed::types::U96F32; use subtensor_runtime_common::{AlphaCurrency, Currency, SubnetInfo, TaoCurrency}; @@ -17,9 +19,9 @@ use subtensor_swap_interface::{OrderType, SwapHandler}; use super::mock; use super::mock::*; +use crate::transaction_extension::SubtensorTransactionExtension; use crate::*; use crate::{Call, ColdkeySwapScheduleDuration, Error}; - // // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_coldkey -- test_swap_total_hotkey_coldkey_stakes_this_interval --exact --nocapture // #[test] // fn test_swap_total_hotkey_coldkey_stakes_this_interval() { @@ -2245,9 +2247,9 @@ fn test_coldkey_in_swap_schedule_prevents_funds_usage() { assert!(ColdkeySwapScheduled::::contains_key(who)); // Setup the extension - let info: crate::DispatchInfo = - crate::DispatchInfoOf::<::RuntimeCall>::default(); - let extension = crate::SubtensorTransactionExtension::::new(); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); + let extension = SubtensorTransactionExtension::::new(); // Try each call @@ -2567,9 +2569,9 @@ fn test_coldkey_in_swap_schedule_prevents_critical_calls() { assert!(ColdkeySwapScheduled::::contains_key(who)); // Setup the extension - let info: crate::DispatchInfo = - crate::DispatchInfoOf::<::RuntimeCall>::default(); - let extension = crate::SubtensorTransactionExtension::::new(); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); + let extension = SubtensorTransactionExtension::::new(); // Try each call diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 7befc1c668..4784c6b00e 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -2,6 +2,7 @@ use ark_serialize::CanonicalDeserialize; use ark_serialize::CanonicalSerialize; +use frame_support::dispatch::DispatchInfo; use frame_support::{ assert_err, assert_ok, dispatch::{DispatchClass, DispatchResult, GetDispatchInfo, Pays}, @@ -13,6 +14,7 @@ use scale_info::prelude::collections::HashMap; use sha2::Digest; use sp_core::Encode; use sp_core::{Get, H256, U256}; +use sp_runtime::traits::{DispatchInfoOf, TransactionExtension}; use sp_runtime::{ BoundedVec, DispatchError, traits::{BlakeTwo256, ConstU32, Hash, TxBaseImplication}, @@ -32,8 +34,8 @@ use w3f_bls::EngineBLS; use super::mock; use super::mock::*; use crate::coinbase::reveal_commits::{LegacyWeightsTlockPayload, WeightsTlockPayload}; +use crate::transaction_extension::SubtensorTransactionExtension; use crate::*; - /*************************** pub fn set_weights() tests *****************************/ @@ -100,10 +102,10 @@ fn test_set_rootweights_validate() { // Verify stake is less than minimum assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) < min_stake); - let info: crate::DispatchInfo = - crate::DispatchInfoOf::<::RuntimeCall>::default(); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); - let extension = crate::SubtensorTransactionExtension::::new(); + let extension = SubtensorTransactionExtension::::new(); // Submit to the signed extension validate function let result_no_stake = extension.validate( RawOrigin::Signed(who).into(), @@ -250,8 +252,8 @@ fn test_commit_weights_validate() { SubtensorModule::set_stake_threshold(min_stake_with_slippage.to_u64() + 1); // Submit to the signed extension validate function - let info = crate::DispatchInfoOf::<::RuntimeCall>::default(); - let extension = crate::SubtensorTransactionExtension::::new(); + let info = DispatchInfoOf::<::RuntimeCall>::default(); + let extension = SubtensorTransactionExtension::::new(); // Submit to the signed extension validate function let result_no_stake = extension.validate( RawOrigin::Signed(who).into(), @@ -371,10 +373,10 @@ fn test_set_weights_validate() { // Verify stake is less than minimum assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) < min_stake); - let info: crate::DispatchInfo = - crate::DispatchInfoOf::<::RuntimeCall>::default(); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); - let extension = crate::SubtensorTransactionExtension::::new(); + let extension = SubtensorTransactionExtension::::new(); // Submit to the signed extension validate function let result_no_stake = extension.validate( RawOrigin::Signed(who).into(), @@ -472,10 +474,10 @@ fn test_reveal_weights_validate() { // Verify stake is less than minimum assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) < min_stake); - let info: crate::DispatchInfo = - crate::DispatchInfoOf::<::RuntimeCall>::default(); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); - let extension = crate::SubtensorTransactionExtension::::new(); + let extension = SubtensorTransactionExtension::::new(); // Submit to the signed extension validate function let result_no_stake = extension.validate( RawOrigin::Signed(who).into(), @@ -654,9 +656,9 @@ fn test_batch_reveal_weights_validate() { // Set the minimum stake SubtensorModule::set_stake_threshold(min_stake.into()); - let info: crate::DispatchInfo = - crate::DispatchInfoOf::<::RuntimeCall>::default(); - let extension = crate::SubtensorTransactionExtension::::new(); + let info: DispatchInfo = + DispatchInfoOf::<::RuntimeCall>::default(); + let extension = SubtensorTransactionExtension::::new(); // Test 1: StakeAmountTooLow - Verify stake is less than minimum assert!(SubtensorModule::get_total_stake_for_hotkey(&hotkey) < min_stake); @@ -812,6 +814,7 @@ fn test_set_weights_is_root_error() { let weights = vec![1]; let version_key: u64 = 0; let hotkey = U256::from(1); + SubtensorModule::set_commit_reveal_weights_enabled(NetUid::ROOT, false); assert_err!( SubtensorModule::set_weights( @@ -834,7 +837,7 @@ fn test_weights_err_no_validator_permit() { let hotkey_account_id = U256::from(55); let netuid = NetUid::from(1); let tempo: u16 = 13; - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_min_allowed_weights(netuid, 0); SubtensorModule::set_max_allowed_uids(netuid, 3); SubtensorModule::set_max_weight_limit(netuid, u16::MAX); @@ -882,7 +885,7 @@ fn test_set_stake_threshold_failed() { let hotkey = U256::from(0); let coldkey = U256::from(0); - add_network(netuid, 1, 0); + add_network_disable_commit_reveal(netuid, 1, 0); register_ok_neuron(netuid, hotkey, coldkey, 2143124); SubtensorModule::set_stake_threshold(20_000_000_000_000); SubtensorModule::add_balance_to_coldkey_account(&hotkey, u64::MAX); @@ -944,8 +947,8 @@ fn test_weights_version_key() { let netuid0 = NetUid::from(1); let netuid1 = NetUid::from(2); - add_network(netuid0, 1, 0); - add_network(netuid1, 1, 0); + add_network_disable_commit_reveal(netuid0, 1, 0); + add_network_disable_commit_reveal(netuid1, 1, 0); register_ok_neuron(netuid0, hotkey, coldkey, 2143124); register_ok_neuron(netuid1, hotkey, coldkey, 3124124); @@ -1020,7 +1023,7 @@ fn test_weights_err_setting_weights_too_fast() { let hotkey_account_id = U256::from(55); let netuid = NetUid::from(1); let tempo: u16 = 13; - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_min_allowed_weights(netuid, 0); SubtensorModule::set_max_allowed_uids(netuid, 3); SubtensorModule::set_max_weight_limit(netuid, u16::MAX); @@ -1173,7 +1176,7 @@ fn test_weights_err_max_weight_limit() { // Add network. let netuid = NetUid::from(1); let tempo: u16 = 100; - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); // Set params. SubtensorModule::set_max_allowed_uids(netuid, 5); @@ -1262,6 +1265,7 @@ fn test_no_signature() { new_test_ext(0).execute_with(|| { let uids: Vec = vec![]; let values: Vec = vec![]; + SubtensorModule::set_commit_reveal_weights_enabled(1.into(), false); let result = SubtensorModule::set_weights(RuntimeOrigin::none(), 1.into(), uids, values, 0); assert_eq!(result, Err(DispatchError::BadOrigin)); }); @@ -1346,7 +1350,7 @@ fn test_set_weight_not_enough_values() { let tempo: u16 = 13; let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; let account_id = U256::from(1); - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); register_ok_neuron(netuid, account_id, U256::from(2), 100000); let neuron_uid: u16 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &U256::from(1)) @@ -1409,7 +1413,7 @@ fn test_set_weight_too_many_uids() { new_test_ext(0).execute_with(|| { let netuid = NetUid::from(1); let tempo: u16 = 13; - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); register_ok_neuron(1.into(), U256::from(1), U256::from(2), 100_000); let neuron_uid: u16 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &U256::from(1)) @@ -3194,10 +3198,10 @@ fn test_reveal_at_exact_epoch() { 1.into(), ); - let reveal_periods: Vec = vec![0, 1, 2, 7, 40, 86, 100]; + let reveal_periods: Vec = vec![1, 2, 7, 40, 86, 100]; for &reveal_period in &reveal_periods { - SubtensorModule::set_reveal_period(netuid, reveal_period); + assert_ok!(SubtensorModule::set_reveal_period(netuid, reveal_period)); let salt: Vec = vec![42; 8]; let commit_hash: H256 = BlakeTwo256::hash_of(&( @@ -3333,7 +3337,7 @@ fn test_tempo_and_reveal_period_change_during_commit_reveal_process() { let initial_tempo: u16 = 100; let initial_reveal_period: u64 = 1; add_network(netuid, initial_tempo, 0); - SubtensorModule::set_reveal_period(netuid, initial_reveal_period); + assert_ok!(SubtensorModule::set_reveal_period(netuid, initial_reveal_period)); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::set_weights_set_rate_limit(netuid, 0); @@ -3377,7 +3381,7 @@ fn test_tempo_and_reveal_period_change_during_commit_reveal_process() { let new_tempo: u16 = 50; let new_reveal_period: u64 = 2; SubtensorModule::set_tempo(netuid, new_tempo); - SubtensorModule::set_reveal_period(netuid, new_reveal_period); + assert_ok!(SubtensorModule::set_reveal_period(netuid, new_reveal_period)); log::info!( "Changed tempo to {new_tempo} and reveal period to {new_reveal_period}" ); @@ -3431,10 +3435,11 @@ fn test_tempo_and_reveal_period_change_during_commit_reveal_process() { let new_tempo_after_reveal: u16 = 200; let new_reveal_period_after_reveal: u64 = 1; SubtensorModule::set_tempo(netuid, new_tempo_after_reveal); - SubtensorModule::set_reveal_period(netuid, new_reveal_period_after_reveal); - log::info!( - "Changed tempo to {new_tempo_after_reveal} and reveal period to {new_reveal_period_after_reveal} after reveal" - ); + assert_ok!(SubtensorModule::set_reveal_period( + netuid, + new_reveal_period_after_reveal + )); + log::info!("Changed tempo to {new_tempo_after_reveal} and reveal period to {new_reveal_period_after_reveal} after reveal"); // Step 5: Commit again let new_salt: Vec = vec![43; 8]; @@ -3621,7 +3626,7 @@ fn test_reveal_at_exact_block() { let tempo: u16 = 360; System::set_block_number(0); - add_network(netuid, tempo, 0); + add_network_disable_commit_reveal(netuid, tempo, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::set_weights_set_rate_limit(netuid, 0); @@ -3631,24 +3636,10 @@ fn test_reveal_at_exact_block() { SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); - let reveal_periods: Vec = vec![ - 0, - 1, - 2, - 5, - 19, - 21, - 30, - 77, - 104, - 833, - 1999, - 36398, - u32::MAX as u64, - ]; + let reveal_periods: Vec = vec![1, 2, 5, 19, 21, 30, 77]; for &reveal_period in &reveal_periods { - SubtensorModule::set_reveal_period(netuid, reveal_period); + assert_ok!(SubtensorModule::set_reveal_period(netuid, reveal_period)); // Step 1: Commit weights let salt: Vec = vec![42 + (reveal_period % 100) as u16; 8]; @@ -4401,7 +4392,7 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { add_network(netuid, initial_tempo, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::set_weights_set_rate_limit(netuid, 0); - SubtensorModule::set_reveal_period(netuid, initial_reveal_period); + assert_ok!(SubtensorModule::set_reveal_period(netuid, initial_reveal_period)); SubtensorModule::set_max_registrations_per_block(netuid, u16::MAX); SubtensorModule::set_target_registrations_per_interval(netuid, u16::MAX); @@ -4512,7 +4503,7 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { // ==== Modify Network Parameters During Commits ==== SubtensorModule::set_tempo(netuid, 150); - SubtensorModule::set_reveal_period(netuid, 7); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 7)); log::info!("Changed tempo to 150 and reveal_period to 7 during commits."); step_epochs(3, netuid); @@ -4558,7 +4549,7 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { // ==== Change Network Parameters Again ==== SubtensorModule::set_tempo(netuid, 200); - SubtensorModule::set_reveal_period(netuid, 10); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 10)); log::info!("Changed tempo to 200 and reveal_period to 10 after initial reveals."); step_epochs(10, netuid); @@ -5011,7 +5002,7 @@ fn test_reveal_crv3_commits_success() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 3); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); let neuron_uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1) .expect("Failed to get neuron UID for hotkey1"); @@ -5163,7 +5154,7 @@ fn test_reveal_crv3_commits_cannot_reveal_after_reveal_epoch() { register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 3); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); let neuron_uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1) .expect("Failed to get neuron UID for hotkey1"); @@ -5303,7 +5294,7 @@ fn test_do_commit_crv3_weights_success() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = CRV3WeightCommitsV2::::get(netuid, cur_epoch); + let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); assert_eq!(commits.len(), 1); assert_eq!(commits[0].0, hotkey); assert_eq!(commits[0].2, commit_data); @@ -5619,7 +5610,7 @@ fn test_reveal_crv3_commits_multiple_commits_some_fail_some_succeed() { register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 1); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 1)); SubtensorModule::set_weights_set_rate_limit(netuid, 0); // Prepare a valid payload for hotkey1 @@ -5742,7 +5733,7 @@ fn test_reveal_crv3_commits_do_set_weights_failure() { add_network(netuid, 5, 0); register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 3); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); SubtensorModule::set_weights_set_rate_limit(netuid, 0); // Prepare payload with mismatched uids and values lengths @@ -5828,7 +5819,7 @@ fn test_reveal_crv3_commits_payload_decoding_failure() { add_network(netuid, 5, 0); register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 3); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); SubtensorModule::set_weights_set_rate_limit(netuid, 0); let invalid_payload = vec![0u8; 10]; // Not a valid encoding of WeightsTlockPayload @@ -5906,7 +5897,7 @@ fn test_reveal_crv3_commits_signature_deserialization_failure() { add_network(netuid, 5, 0); register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 3); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); SubtensorModule::set_weights_set_rate_limit(netuid, 0); let version_key = SubtensorModule::get_weights_version_key(netuid); @@ -6052,7 +6043,7 @@ fn test_reveal_crv3_commits_with_incorrect_identity_message() { add_network(netuid, 5, 0); register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 1); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 1)); SubtensorModule::set_weights_set_rate_limit(netuid, 0); // Prepare a valid payload but use incorrect identity message during encryption @@ -6140,7 +6131,7 @@ fn test_multiple_commits_by_same_hotkey_within_limit() { add_network(netuid, 5, 0); register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 1); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 1)); SubtensorModule::set_weights_set_rate_limit(netuid, 0); for i in 0..10 { @@ -6158,7 +6149,7 @@ fn test_multiple_commits_by_same_hotkey_within_limit() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = CRV3WeightCommitsV2::::get(netuid, cur_epoch); + let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); assert_eq!( commits.len(), 10, @@ -6178,7 +6169,7 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { add_network(netuid, /*tempo*/ 5, 0); register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 1); // reveal_period = 1 epoch + assert_ok!(SubtensorModule::set_reveal_period(netuid, 1)); // reveal_period = 1 epoch SubtensorModule::set_weights_set_rate_limit(netuid, 0); // --------------------------------------------------------------------- @@ -6192,7 +6183,7 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { for &epoch in &[past_epoch, reveal_epoch] { let bounded_commit = vec![epoch as u8; 5].try_into().expect("bounded vec"); - assert_ok!(CRV3WeightCommitsV2::::try_mutate( + assert_ok!(TimelockedWeightCommits::::try_mutate( netuid, epoch, |q| -> DispatchResult { @@ -6203,8 +6194,8 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { } // Sanity – both epochs presently hold a commit. - assert!(!CRV3WeightCommitsV2::::get(netuid, past_epoch).is_empty()); - assert!(!CRV3WeightCommitsV2::::get(netuid, reveal_epoch).is_empty()); + assert!(!TimelockedWeightCommits::::get(netuid, past_epoch).is_empty()); + assert!(!TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty()); // --------------------------------------------------------------------- // Run the reveal pass WITHOUT a pulse – only expiry housekeeping runs. @@ -6213,13 +6204,13 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { // past_epoch (< reveal_epoch) must be gone assert!( - CRV3WeightCommitsV2::::get(netuid, past_epoch).is_empty(), + TimelockedWeightCommits::::get(netuid, past_epoch).is_empty(), "expired epoch {past_epoch} should be cleared" ); // reveal_epoch queue is *kept* because its commit could still be revealed later. assert!( - !CRV3WeightCommitsV2::::get(netuid, reveal_epoch).is_empty(), + !TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty(), "reveal-epoch {reveal_epoch} must be retained until commit can be revealed" ); }); @@ -6235,7 +6226,7 @@ fn test_reveal_crv3_commits_multiple_valid_commits_all_processed() { // ───── network parameters ─────────────────────────────────────────── add_network(netuid, 5, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 1); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 1)); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_stake_threshold(0); SubtensorModule::set_max_registrations_per_block(netuid, 100); @@ -6351,7 +6342,7 @@ fn test_reveal_crv3_commits_max_neurons() { // ───── network parameters ─────────────────────────────────────────── add_network(netuid, 5, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 1); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 1)); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_stake_threshold(0); SubtensorModule::set_max_registrations_per_block(netuid, 10_000); @@ -6580,7 +6571,7 @@ fn test_reveal_crv3_commits_hotkey_check() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 3); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); let neuron_uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1) .expect("Failed to get neuron UID for hotkey1"); @@ -6697,7 +6688,7 @@ fn test_reveal_crv3_commits_hotkey_check() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 3); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); let neuron_uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1) .expect("Failed to get neuron UID for hotkey1"); @@ -6846,7 +6837,7 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { add_network(netuid, 5, 0); register_ok_neuron(netuid, hotkey, U256::from(3), 100_000); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 3); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_stake_threshold(0); @@ -6895,7 +6886,7 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { )); // epoch in which commit was stored - let stored_epoch = CRV3WeightCommitsV2::::iter_prefix(netuid) + let stored_epoch = TimelockedWeightCommits::::iter_prefix(netuid) .next() .map(|(e, _)| e) .expect("commit stored"); @@ -6909,7 +6900,7 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { // run *one* block inside reveal epoch without pulse → commit should stay queued step_block(1); assert!( - !CRV3WeightCommitsV2::::get(netuid, stored_epoch).is_empty(), + !TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), "commit must remain queued when pulse is missing" ); @@ -6937,7 +6928,7 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { assert!(!weights.is_empty(), "weights must be set after pulse"); assert!( - CRV3WeightCommitsV2::::get(netuid, stored_epoch).is_empty(), + TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), "queue should be empty after successful reveal" ); }); @@ -6961,7 +6952,7 @@ fn test_reveal_crv3_commits_legacy_payload_success() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_weights_set_rate_limit(netuid, 0); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_reveal_period(netuid, 3); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1).unwrap(); let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey2).unwrap(); @@ -7080,7 +7071,7 @@ fn test_reveal_crv3_commits_legacy_payload_success() { // commit should be gone assert!( - CRV3WeightCommitsV2::::get(netuid, commit_epoch).is_empty(), + TimelockedWeightCommits::::get(netuid, commit_epoch).is_empty(), "commit storage should be cleaned after reveal" ); }); diff --git a/pallets/subtensor/src/transaction_extension.rs b/pallets/subtensor/src/transaction_extension.rs new file mode 100644 index 0000000000..deb42efabf --- /dev/null +++ b/pallets/subtensor/src/transaction_extension.rs @@ -0,0 +1,373 @@ +use crate::{ + BalancesCall, Call, ColdkeySwapScheduled, Config, CustomTransactionError, Error, Pallet, +}; +use codec::{Decode, DecodeWithMemTracking, Encode}; +use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; +use frame_support::pallet_prelude::Weight; +use frame_support::traits::IsSubType; +use scale_info::TypeInfo; +use sp_runtime::traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, Implication, TransactionExtension, + ValidateResult, +}; +use sp_runtime::transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, +}; +use sp_std::marker::PhantomData; +use sp_std::vec::Vec; +use subtensor_macros::freeze_struct; +use subtensor_runtime_common::NetUid; + +#[freeze_struct("2e02eb32e5cb25d3")] +#[derive(Default, Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] +pub struct SubtensorTransactionExtension(pub PhantomData); + +impl sp_std::fmt::Debug for SubtensorTransactionExtension { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "SubtensorTransactionExtension") + } +} + +impl SubtensorTransactionExtension +where + ::RuntimeCall: + Dispatchable, + ::RuntimeCall: IsSubType>, +{ + pub fn new() -> Self { + Self(Default::default()) + } + pub fn validity_ok(priority: u64) -> ValidTransaction { + ValidTransaction { + priority, + ..Default::default() + } + } + + pub fn check_weights_min_stake(who: &T::AccountId, netuid: NetUid) -> bool { + Pallet::::check_weights_min_stake(who, netuid) + } + + pub fn result_to_validity(result: Result<(), Error>, priority: u64) -> TransactionValidity { + if let Err(err) = result { + Err(match err { + Error::::AmountTooLow => CustomTransactionError::StakeAmountTooLow.into(), + Error::::SubnetNotExists => CustomTransactionError::SubnetDoesntExist.into(), + Error::::NotEnoughBalanceToStake => CustomTransactionError::BalanceTooLow.into(), + Error::::HotKeyAccountNotExists => { + CustomTransactionError::HotkeyAccountDoesntExist.into() + } + Error::::NotEnoughStakeToWithdraw => { + CustomTransactionError::NotEnoughStakeToWithdraw.into() + } + Error::::InsufficientLiquidity => { + CustomTransactionError::InsufficientLiquidity.into() + } + Error::::SlippageTooHigh => CustomTransactionError::SlippageTooHigh.into(), + Error::::TransferDisallowed => CustomTransactionError::TransferDisallowed.into(), + Error::::HotKeyNotRegisteredInNetwork => { + CustomTransactionError::HotKeyNotRegisteredInNetwork.into() + } + Error::::InvalidIpAddress => CustomTransactionError::InvalidIpAddress.into(), + Error::::ServingRateLimitExceeded => { + CustomTransactionError::ServingRateLimitExceeded.into() + } + Error::::InvalidPort => CustomTransactionError::InvalidPort.into(), + _ => CustomTransactionError::BadRequest.into(), + }) + } else { + Ok(ValidTransaction { + priority, + ..Default::default() + }) + } + } +} + +impl + TransactionExtension<::RuntimeCall> + for SubtensorTransactionExtension +where + ::RuntimeCall: + Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, + ::RuntimeCall: IsSubType>, + ::RuntimeCall: IsSubType>, +{ + const IDENTIFIER: &'static str = "SubtensorTransactionExtension"; + + type Implicit = (); + type Val = Option; + type Pre = (); + + fn weight(&self, _call: &::RuntimeCall) -> Weight { + // TODO: benchmark transaction extension + Weight::zero() + } + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &::RuntimeCall, + _info: &DispatchInfoOf<::RuntimeCall>, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Implication, + _source: TransactionSource, + ) -> ValidateResult::RuntimeCall> { + // Ensure the transaction is signed, else we just skip the extension. + let Some(who) = origin.as_system_origin_signer() else { + return Ok((Default::default(), None, origin)); + }; + + match call.is_sub_type() { + Some(Call::commit_weights { netuid, .. }) => { + if Self::check_weights_min_stake(who, *netuid) { + Ok((Default::default(), Some(who.clone()), origin)) + } else { + Err(CustomTransactionError::StakeAmountTooLow.into()) + } + } + Some(Call::reveal_weights { + netuid, + uids, + values, + salt, + version_key, + }) => { + if Self::check_weights_min_stake(who, *netuid) { + let provided_hash = Pallet::::get_commit_hash( + who, + *netuid, + uids, + values, + salt, + *version_key, + ); + match Pallet::::find_commit_block_via_hash(provided_hash) { + Some(commit_block) => { + if Pallet::::is_reveal_block_range(*netuid, commit_block) { + Ok((Default::default(), Some(who.clone()), origin)) + } else { + Err(CustomTransactionError::CommitBlockNotInRevealRange.into()) + } + } + None => Err(CustomTransactionError::CommitNotFound.into()), + } + } else { + Err(CustomTransactionError::StakeAmountTooLow.into()) + } + } + Some(Call::batch_reveal_weights { + netuid, + uids_list, + values_list, + salts_list, + version_keys, + }) => { + if Self::check_weights_min_stake(who, *netuid) { + let num_reveals = uids_list.len(); + if num_reveals == values_list.len() + && num_reveals == salts_list.len() + && num_reveals == version_keys.len() + { + let provided_hashes = (0..num_reveals) + .map(|i| { + Pallet::::get_commit_hash( + who, + *netuid, + uids_list.get(i).unwrap_or(&Vec::new()), + values_list.get(i).unwrap_or(&Vec::new()), + salts_list.get(i).unwrap_or(&Vec::new()), + *version_keys.get(i).unwrap_or(&0_u64), + ) + }) + .collect::>(); + + let batch_reveal_block = provided_hashes + .iter() + .filter_map(|hash| Pallet::::find_commit_block_via_hash(*hash)) + .collect::>(); + + if provided_hashes.len() == batch_reveal_block.len() { + if Pallet::::is_batch_reveal_block_range(*netuid, batch_reveal_block) + { + Ok((Default::default(), Some(who.clone()), origin)) + } else { + Err(CustomTransactionError::CommitBlockNotInRevealRange.into()) + } + } else { + Err(CustomTransactionError::CommitNotFound.into()) + } + } else { + Err(CustomTransactionError::InputLengthsUnequal.into()) + } + } else { + Err(CustomTransactionError::StakeAmountTooLow.into()) + } + } + Some(Call::set_weights { netuid, .. }) => { + if Self::check_weights_min_stake(who, *netuid) { + Ok((Default::default(), Some(who.clone()), origin)) + } else { + Err(CustomTransactionError::StakeAmountTooLow.into()) + } + } + Some(Call::set_tao_weights { netuid, hotkey, .. }) => { + if Self::check_weights_min_stake(hotkey, *netuid) { + Ok((Default::default(), Some(who.clone()), origin)) + } else { + Err(CustomTransactionError::StakeAmountTooLow.into()) + } + } + Some(Call::commit_crv3_weights { + netuid, + reveal_round, + .. + }) => { + if Self::check_weights_min_stake(who, *netuid) { + if *reveal_round < pallet_drand::LastStoredRound::::get() { + return Err(CustomTransactionError::InvalidRevealRound.into()); + } + Ok((Default::default(), Some(who.clone()), origin)) + } else { + Err(CustomTransactionError::StakeAmountTooLow.into()) + } + } + Some(Call::commit_timelocked_weights { + netuid, + reveal_round, + .. + }) => { + if Self::check_weights_min_stake(who, *netuid) { + if *reveal_round < pallet_drand::LastStoredRound::::get() { + return Err(CustomTransactionError::InvalidRevealRound.into()); + } + Ok((Default::default(), Some(who.clone()), origin)) + } else { + Err(CustomTransactionError::StakeAmountTooLow.into()) + } + } + Some(Call::add_stake { .. }) => { + if ColdkeySwapScheduled::::contains_key(who) { + return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); + } + + Ok((Default::default(), Some(who.clone()), origin)) + } + Some(Call::add_stake_limit { .. }) => { + if ColdkeySwapScheduled::::contains_key(who) { + return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); + } + + Ok((Default::default(), Some(who.clone()), origin)) + } + Some(Call::remove_stake { .. }) => Ok((Default::default(), Some(who.clone()), origin)), + Some(Call::remove_stake_limit { .. }) => { + Ok((Default::default(), Some(who.clone()), origin)) + } + Some(Call::move_stake { .. }) => { + if ColdkeySwapScheduled::::contains_key(who) { + return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); + } + Ok((Default::default(), Some(who.clone()), origin)) + } + Some(Call::transfer_stake { .. }) => { + if ColdkeySwapScheduled::::contains_key(who) { + return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); + } + + Ok((Default::default(), Some(who.clone()), origin)) + } + Some(Call::swap_stake { .. }) => { + if ColdkeySwapScheduled::::contains_key(who) { + return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); + } + Ok((Default::default(), Some(who.clone()), origin)) + } + Some(Call::swap_stake_limit { .. }) => { + if ColdkeySwapScheduled::::contains_key(who) { + return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); + } + Ok((Default::default(), Some(who.clone()), origin)) + } + Some(Call::register { netuid, .. } | Call::burned_register { netuid, .. }) => { + if ColdkeySwapScheduled::::contains_key(who) { + return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); + } + + let registrations_this_interval = + Pallet::::get_registrations_this_interval(*netuid); + let max_registrations_per_interval = + Pallet::::get_target_registrations_per_interval(*netuid); + if registrations_this_interval >= (max_registrations_per_interval.saturating_mul(3)) + { + // If the registration limit for the interval is exceeded, reject the transaction + return Err(CustomTransactionError::RateLimitExceeded.into()); + } + + Ok((Default::default(), Some(who.clone()), origin)) + } + Some(Call::dissolve_network { .. }) => { + if ColdkeySwapScheduled::::contains_key(who) { + Err(CustomTransactionError::ColdkeyInSwapSchedule.into()) + } else { + Ok((Default::default(), Some(who.clone()), origin)) + } + } + Some(Call::serve_axon { + netuid, + version, + ip, + port, + ip_type, + protocol, + placeholder1, + placeholder2, + }) => { + // Fully validate the user input + Self::result_to_validity( + Pallet::::validate_serve_axon( + who, + *netuid, + *version, + *ip, + *port, + *ip_type, + *protocol, + *placeholder1, + *placeholder2, + ), + 0u64, + ) + .map(|validity| (validity, Some(who.clone()), origin.clone())) + } + _ => { + if let Some( + BalancesCall::transfer_keep_alive { .. } + | BalancesCall::transfer_all { .. } + | BalancesCall::transfer_allow_death { .. }, + ) = call.is_sub_type() + { + if ColdkeySwapScheduled::::contains_key(who) { + return Err(CustomTransactionError::ColdkeyInSwapSchedule.into()); + } + } + + Ok((Default::default(), Some(who.clone()), origin)) + } + } + } + + // NOTE: Add later when we put in a pre and post dispatch step. + fn prepare( + self, + _val: Self::Val, + _origin: &::RuntimeOrigin, + _call: &::RuntimeCall, + _info: &DispatchInfoOf<::RuntimeCall>, + _len: usize, + ) -> Result { + Ok(()) + } +} diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index c7b93535f3..9fd6d27de7 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -804,4 +804,29 @@ impl Pallet { Err(_) => None, } } + + /// Set the per-subnet limit (for the given `netuid`) on the number of **owner-immune** + /// neurons (UIDs). + /// + /// The value must lie within the inclusive bounds defined by [`MinImmuneOwnerUidsLimit`] + /// and [`MaxImmuneOwnerUidsLimit`]. If the bound check fails, this returns + /// [`Error::::InvalidValue`] and leaves storage unchanged. + /// + /// # Parameters + /// - `netuid`: Identifier of the subnet to update. + /// - `limit`: New inclusive upper bound for the count of owner-immune UIDs on this subnet. + /// + /// # Returns + /// - `Ok(())` on success (value written to storage). + /// - `Err(Error::::InvalidValue)` if `limit` is outside `[MinImmuneOwnerUidsLimit, MaxImmuneOwnerUidsLimit]`. + pub fn set_owner_immune_neuron_limit(netuid: NetUid, limit: u16) -> DispatchResult { + ensure!( + limit >= MinImmuneOwnerUidsLimit::::get() + && limit <= MaxImmuneOwnerUidsLimit::::get(), + Error::::InvalidValue + ); + + ImmuneOwnerUidsLimit::::insert(netuid, limit); + Ok(()) + } } diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 5ddc6efaaf..69bf3eacbb 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -885,7 +885,7 @@ impl Pallet { liquidity: u64, ) -> Result<(Position, u64, u64), Error> { ensure!( - Self::count_positions(netuid, coldkey_account_id) <= T::MaxPositions::get() as usize, + Self::count_positions(netuid, coldkey_account_id) < T::MaxPositions::get() as usize, Error::::MaxPositionsExceeded ); diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index 4c3a890c9b..845acd957a 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -342,6 +342,40 @@ fn test_add_liquidity_basic() { }); } +#[test] +fn test_add_liquidity_max_limit_enforced() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let liquidity = 2_000_000_000_u64; + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + let limit = MaxPositions::get() as usize; + + for _ in 0..limit { + Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + TickIndex::MIN, + TickIndex::MAX, + liquidity, + ) + .unwrap(); + } + + let test_result = Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + TickIndex::MIN, + TickIndex::MAX, + liquidity, + ); + + assert_err!(test_result, Error::::MaxPositionsExceeded); + }); +} + #[test] fn test_add_liquidity_out_of_bounds() { new_test_ext().execute_with(|| { diff --git a/pallets/utility/src/lib.rs b/pallets/utility/src/lib.rs index 294836677d..8ee888889e 100644 --- a/pallets/utility/src/lib.rs +++ b/pallets/utility/src/lib.rs @@ -190,9 +190,9 @@ pub mod pallet { /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ - let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(calls); + let dispatch_weight = Pallet::::weight(calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch(calls.len() as u32)); - (dispatch_weight, dispatch_class) + (dispatch_weight, DispatchClass::Normal) })] pub fn batch( origin: OriginFor, @@ -302,9 +302,9 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ - let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(calls); + let dispatch_weight = Pallet::::weight(calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); - (dispatch_weight, dispatch_class) + (dispatch_weight, DispatchClass::Normal) })] pub fn batch_all( origin: OriginFor, @@ -401,9 +401,9 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ - let (dispatch_weight, dispatch_class) = Pallet::::weight_and_dispatch_class(calls); + let dispatch_weight = Pallet::::weight(calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); - (dispatch_weight, dispatch_class) + (dispatch_weight, DispatchClass::Normal) })] pub fn force_batch( origin: OriginFor, @@ -474,29 +474,15 @@ pub mod pallet { impl Pallet { /// Get the accumulated `weight` and the dispatch class for the given `calls`. - fn weight_and_dispatch_class( - calls: &[::RuntimeCall], - ) -> (Weight, DispatchClass) { + fn weight(calls: &[::RuntimeCall]) -> Weight { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()); - let (dispatch_weight, dispatch_class) = dispatch_infos.fold( - (Weight::zero(), DispatchClass::Operational), - |(total_weight, dispatch_class), di| { - ( - if di.pays_fee == Pays::Yes { - total_weight.saturating_add(di.call_weight) - } else { - total_weight - }, - if di.class == DispatchClass::Normal { - di.class - } else { - dispatch_class - }, - ) - }, - ); - - (dispatch_weight, dispatch_class) + dispatch_infos.fold(Weight::zero(), |total_weight, di| { + if di.pays_fee == Pays::Yes { + total_weight.saturating_add(di.call_weight) + } else { + total_weight + } + }) } } } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 56a263a813..60687d3a30 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -12,6 +12,7 @@ use core::num::NonZeroU64; pub mod check_nonce; mod migrations; +pub mod transaction_payment_wrapper; extern crate alloc; @@ -149,8 +150,12 @@ impl frame_system::offchain::CreateSignedTransaction frame_system::CheckEra::::from(Era::Immortal), check_nonce::CheckNonce::::from(nonce).into(), frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(0), - pallet_subtensor::SubtensorTransactionExtension::::new(), + ChargeTransactionPaymentWrapper::new( + pallet_transaction_payment::ChargeTransactionPayment::::from(0), + ), + pallet_subtensor::transaction_extension::SubtensorTransactionExtension::::new( + ), + pallet_drand::drand_priority::DrandPriority::::new(), frame_metadata_hash_extension::CheckMetadataHash::::new(true), ); @@ -215,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 302, + spec_version: 306, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -1258,6 +1263,7 @@ impl pallet_subtensor_swap::Config for Runtime { type WeightInfo = pallet_subtensor_swap::weights::DefaultWeight; } +use crate::transaction_payment_wrapper::ChargeTransactionPaymentWrapper; use sp_runtime::BoundedVec; pub struct AuraPalletIntrf; @@ -1626,8 +1632,9 @@ pub type TransactionExtensions = ( frame_system::CheckEra, check_nonce::CheckNonce, frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - pallet_subtensor::SubtensorTransactionExtension, + ChargeTransactionPaymentWrapper, + pallet_subtensor::transaction_extension::SubtensorTransactionExtension, + pallet_drand::drand_priority::DrandPriority, frame_metadata_hash_extension::CheckMetadataHash, ); diff --git a/runtime/src/transaction_payment_wrapper.rs b/runtime/src/transaction_payment_wrapper.rs new file mode 100644 index 0000000000..f299c52497 --- /dev/null +++ b/runtime/src/transaction_payment_wrapper.rs @@ -0,0 +1,161 @@ +use crate::Weight; +use codec::{Decode, DecodeWithMemTracking, Encode}; +use frame_election_provider_support::private::sp_arithmetic::traits::SaturatedConversion; +use frame_support::dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}; +use frame_support::pallet_prelude::TypeInfo; +use pallet_transaction_payment::{ChargeTransactionPayment, Config, Pre, Val}; +use sp_runtime::DispatchResult; +use sp_runtime::traits::{ + DispatchInfoOf, DispatchOriginOf, Dispatchable, Implication, PostDispatchInfoOf, + TransactionExtension, TransactionExtensionMetadata, ValidateResult, +}; +use sp_runtime::transaction_validity::{ + TransactionPriority, TransactionSource, TransactionValidity, TransactionValidityError, +}; +use sp_std::vec::Vec; +use subtensor_macros::freeze_struct; + +#[freeze_struct("5f10cb9db06873c0")] +#[derive(Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct ChargeTransactionPaymentWrapper { + charge_transaction_payment: ChargeTransactionPayment, +} + +impl core::fmt::Debug for ChargeTransactionPaymentWrapper { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "ChargeTransactionPaymentWrapper",) + } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { + Ok(()) + } +} + +impl ChargeTransactionPaymentWrapper { + pub fn new(charge_transaction_payment: ChargeTransactionPayment) -> Self { + Self { + charge_transaction_payment, + } + } +} + +impl TransactionExtension for ChargeTransactionPaymentWrapper +where + T::RuntimeCall: Dispatchable, +{ + const IDENTIFIER: &'static str = "ChargeTransactionPaymentWrapper"; + type Implicit = (); + type Val = Val; + type Pre = Pre; + + fn weight(&self, call: &T::RuntimeCall) -> Weight { + self.charge_transaction_payment.weight(call) + } + + fn validate( + &self, + origin: DispatchOriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + self_implicit: Self::Implicit, + inherited_implication: &impl Implication, + source: TransactionSource, + ) -> ValidateResult { + let inner_validate = self.charge_transaction_payment.validate( + origin, + call, + info, + len, + self_implicit, + inherited_implication, + source, + ); + + match inner_validate { + Ok((mut valid_transaction, val, origin)) => { + let overridden_priority = { + match info.class { + DispatchClass::Normal => 1u64, + DispatchClass::Mandatory => { + // Mandatory extrinsics should be prohibited (e.g. by the [`CheckWeight`] + // extensions), but just to be safe let's return the same priority as `Normal` here. + 1u64 + } + DispatchClass::Operational => { + // System calls + 10_000_000_000u64 + } + } + .saturated_into::() + }; + + valid_transaction.priority = overridden_priority; + + Ok((valid_transaction, val, origin)) + } + Err(err) => Err(err), + } + } + + fn prepare( + self, + val: Self::Val, + origin: &DispatchOriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + self.charge_transaction_payment + .prepare(val, origin, call, info, len) + } + fn metadata() -> Vec { + ChargeTransactionPayment::::metadata() + } + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result { + ChargeTransactionPayment::::post_dispatch_details(pre, info, post_info, len, result) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + ChargeTransactionPayment::::post_dispatch(pre, info, post_info, len, result) + } + + fn bare_validate( + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + ChargeTransactionPayment::::bare_validate(call, info, len) + } + + fn bare_validate_and_prepare( + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + ChargeTransactionPayment::::bare_validate_and_prepare(call, info, len) + } + + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + ChargeTransactionPayment::::bare_post_dispatch(info, post_info, len, result) + } +}