Skip to content

Commit

Permalink
Merge pull request #429 from opentensor/fix-remaining-direct-indexing
Browse files Browse the repository at this point in the history
Remove more direct indexing
  • Loading branch information
sam0x17 authored May 17, 2024
2 parents 0120e42 + 7d2c624 commit d5d20a3
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 23 deletions.
46 changes: 32 additions & 14 deletions pallets/subtensor/src/epoch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,6 @@ impl<T: Config> Pallet<T> {
}

/// Output unnormalized sparse weights, input weights are assumed to be row max-upscaled in u16.
#[allow(clippy::indexing_slicing)]
pub fn get_weights_sparse(netuid: u16) -> Vec<Vec<(u16, I32F32)>> {
let n: usize = Self::get_subnetwork_n(netuid) as usize;
let mut weights: Vec<Vec<(u16, I32F32)>> = vec![vec![]; n];
Expand All @@ -743,52 +742,71 @@ impl<T: Config> Pallet<T> {
.filter(|(uid_i, _)| *uid_i < n as u16)
{
for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) {
weights[uid_i as usize].push((*uid_j, I32F32::from_num(*weight_ij)));
weights
.get_mut(uid_i as usize)
.expect("uid_i is filtered to be less than n; qed")
.push((*uid_j, I32F32::from_num(*weight_ij)));
}
}
weights
}

/// Output unnormalized weights in [n, n] matrix, input weights are assumed to be row max-upscaled in u16.
#[allow(clippy::indexing_slicing)]
pub fn get_weights(netuid: u16) -> Vec<Vec<I32F32>> {
let n: usize = Self::get_subnetwork_n(netuid) as usize;
let mut weights: Vec<Vec<I32F32>> = vec![vec![I32F32::from_num(0.0); n]; n];
for (uid_i, weights_i) in
for (uid_i, weights_vec) in
<Weights<T> as IterableStorageDoubleMap<u16, u16, Vec<(u16, u16)>>>::iter_prefix(netuid)
.filter(|(uid_i, _)| *uid_i < n as u16)
{
for (uid_j, weight_ij) in weights_i {
weights[uid_i as usize][uid_j as usize] = I32F32::from_num(weight_ij);
for (uid_j, weight_ij) in weights_vec
.into_iter()
.filter(|(uid_j, _)| *uid_j < n as u16)
{
*weights
.get_mut(uid_i as usize)
.expect("uid_i is filtered to be less than n; qed")
.get_mut(uid_j as usize)
.expect("uid_j is filtered to be less than n; qed") =
I32F32::from_num(weight_ij);
}
}
weights
}

/// Output unnormalized sparse bonds, input bonds are assumed to be column max-upscaled in u16.
#[allow(clippy::indexing_slicing)]
pub fn get_bonds_sparse(netuid: u16) -> Vec<Vec<(u16, I32F32)>> {
let n: usize = Self::get_subnetwork_n(netuid) as usize;
let mut bonds: Vec<Vec<(u16, I32F32)>> = vec![vec![]; n];
for (uid_i, bonds_i) in
for (uid_i, bonds_vec) in
<Bonds<T> as IterableStorageDoubleMap<u16, u16, Vec<(u16, u16)>>>::iter_prefix(netuid)
.filter(|(uid_i, _)| *uid_i < n as u16)
{
for (uid_j, bonds_ij) in bonds_i {
bonds[uid_i as usize].push((uid_j, I32F32::from_num(bonds_ij)));
for (uid_j, bonds_ij) in bonds_vec {
bonds
.get_mut(uid_i as usize)
.expect("uid_i is filtered to be less than n; qed")
.push((uid_j, I32F32::from_num(bonds_ij)));
}
}
bonds
}

/// Output unnormalized bonds in [n, n] matrix, input bonds are assumed to be column max-upscaled in u16.
#[allow(clippy::indexing_slicing)]
pub fn get_bonds(netuid: u16) -> Vec<Vec<I32F32>> {
let n: usize = Self::get_subnetwork_n(netuid) as usize;
let mut bonds: Vec<Vec<I32F32>> = vec![vec![I32F32::from_num(0.0); n]; n];
for (uid_i, bonds_i) in
for (uid_i, bonds_vec) in
<Bonds<T> as IterableStorageDoubleMap<u16, u16, Vec<(u16, u16)>>>::iter_prefix(netuid)
.filter(|(uid_i, _)| *uid_i < n as u16)
{
for (uid_j, bonds_ij) in bonds_i {
bonds[uid_i as usize][uid_j as usize] = I32F32::from_num(bonds_ij);
for (uid_j, bonds_ij) in bonds_vec.into_iter().filter(|(uid_j, _)| *uid_j < n as u16) {
*bonds
.get_mut(uid_i as usize)
.expect("uid_i has been filtered to be less than n; qed")
.get_mut(uid_j as usize)
.expect("uid_j has been filtered to be less than n; qed") =
I32F32::from_num(bonds_ij);
}
}
bonds
Expand Down
13 changes: 4 additions & 9 deletions pallets/subtensor/src/registration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ use super::*;
use frame_support::storage::IterableStorageDoubleMap;
use sp_core::{Get, H256, U256};
use sp_io::hashing::{keccak_256, sha2_256};
use sp_runtime::MultiAddress;
use system::pallet_prelude::BlockNumberFor;

const LOG_TARGET: &str = "runtime::subtensor::registration";
Expand Down Expand Up @@ -528,18 +527,14 @@ impl<T: Config> Pallet<T> {
hash_as_vec
}

#[allow(clippy::indexing_slicing)]
pub fn hash_block_and_hotkey(block_hash_bytes: &[u8; 32], hotkey: &T::AccountId) -> H256 {
// Get the public key from the account id.
let hotkey_pubkey: MultiAddress<T::AccountId, ()> = MultiAddress::Id(hotkey.clone());
let binding = hotkey_pubkey.encode();
// Skip extra 0th byte.
let hotkey_bytes: &[u8] = binding[1..].as_ref();
let binding = hotkey.encode();
// Safe because Substrate guarantees that all AccountId types are at least 32 bytes
let (hotkey_bytes, _) = binding.split_at(32);
let mut full_bytes = [0u8; 64];
let (first_half, second_half) = full_bytes.split_at_mut(32);
first_half.copy_from_slice(block_hash_bytes);
// Safe because Substrate guarantees that all AccountId types are at least 32 bytes
second_half.copy_from_slice(&hotkey_bytes[..32]);
second_half.copy_from_slice(hotkey_bytes);
let keccak_256_seal_hash_vec: [u8; 32] = keccak_256(&full_bytes[..]);

H256::from_slice(&keccak_256_seal_hash_vec)
Expand Down

0 comments on commit d5d20a3

Please sign in to comment.