Skip to content

Commit

Permalink
Adressing first part of reviews
Browse files Browse the repository at this point in the history
  • Loading branch information
FOUCHARD Denis committed Feb 19, 2024
1 parent ace93e4 commit ac39ced
Show file tree
Hide file tree
Showing 14 changed files with 323 additions and 310 deletions.
4 changes: 2 additions & 2 deletions examples/plot_int_alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,13 +135,13 @@

parcels = compute_parcels(niimg=template_train[0], mask=masker, n_parcels=100, n_jobs=5)
model = IndividualizedNeuralTuning(
n_jobs=8, alignment_method="parcellation", n_components=None
n_jobs=8, alignment_method="parcelation", n_components=20
)
model.fit(train_data, parcels=parcels, verbose=False)
train_stimulus = np.copy(model.shared_response)
train_tuning = np.linalg.pinv(train_stimulus) @ model.denoised_signal[-1]
model_bis = IndividualizedNeuralTuning(
n_jobs=8, alignment_method="parcellation", n_components=None
n_jobs=8, alignment_method="parcelation", n_components=20
)
model_bis.fit(test_data, parcels=parcels, verbose=False)
test_stimulus = np.copy(model_bis.shared_response)
Expand Down
2 changes: 1 addition & 1 deletion examples/plot_srm_alignment.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-

"""
Hyperalignment-base prediction using Feilong Ma's IndividualNeuralTuning Model.
Hyperalignment-base prediction using Hugo Richard's FastSRM.
See article : https://doi.org/10.1162/imag_a_00032
==========================
Expand Down
37 changes: 19 additions & 18 deletions fmralign/alignment_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ class IndividualizedNeuralTuning(Alignment):

def __init__(
self,
decomp_method=None,
decomp_method="pca",
n_components=None,
alignment_method="searchlight",
n_jobs=1,
Expand All @@ -492,7 +492,7 @@ def __init__(
Parameters:
--------
decomp_method : str
The decomposition method to use. If None, "pca" will be used. Can be ["pca", "procrustes"] Default is None.
The decomposition method to use. Can be ["pca", "pcav1", "procrustes"] Default is "pca".
alignment_method : str
The alignment method to use. Can be either "searchlight" or "parcelation", Default is "searchlight".
n_components : int
Expand Down Expand Up @@ -535,7 +535,8 @@ def _tuning_estimator(shared_response, target):
Parameters:
--------
shared_response : array-like
The shared response matrix.
The shared response matrix of shape (n_timepoints, k)
where k is the dimension of the sources latent space.
target : array-like
The target matrix.
latent_dim : int, optional
Expand All @@ -551,31 +552,31 @@ def _tuning_estimator(shared_response, target):
return np.linalg.inv(shared_response).dot(target)

@staticmethod
def _stimulus_estimator(full_signal, n_t, n_s, latent_dim=None, scaling=True):
def _stimulus_estimator(full_signal, n_subjects, latent_dim=None, scaling=True):
"""
Estimates the stimulus matrix for the Individualized Neural Tuning model.
Parameters:
--------
full_signal : numpy.ndarray
The full signal of shape (n_t, n_s).
n_t : int
The number of time points.
n_s : int
Concatenated signal for all subjects,
of shape (n_timepoints, n_subjects * n_voxels).
n_subjects : int
The number of subjects.
latent_dim : int, optional
The number of latent dimensions to use. Defaults to None.
scaling : bool, optional
Whether to scale the stimulus matrix sources. Defaults to True.
"""
n_timepoints = full_signal.shape[0]
if scaling:
U = svd_pca(full_signal)
else:
U, _, _ = safe_svd(full_signal)
if latent_dim is not None and latent_dim < n_t:
if latent_dim is not None and latent_dim < n_timepoints:
U = U[:, :latent_dim]

stimulus = np.sqrt(n_s) * U
stimulus = np.sqrt(n_subjects) * U
stimulus = stimulus.astype(np.float32)
return stimulus

Expand Down Expand Up @@ -639,10 +640,10 @@ def fit(

X_ = np.array(X, copy=True, dtype=np.float32)

self.n_s, self.n_t, self.n_v = X_.shape
self.n_subjects, self.n_time_points, self.n_voxels = X_.shape

self.tuning_data = np.empty(self.n_s, dtype=np.float32)
self.denoised_signal = np.empty(self.n_s, dtype=np.float32)
self.tuning_data = np.empty(self.n_subjects, dtype=np.float32)
self.denoised_signal = np.empty(self.n_subjects, dtype=np.float32)

if searchlights is None:
self.regions = parcels
Expand Down Expand Up @@ -670,15 +671,15 @@ def fit(

full_signal = np.concatenate(self.denoised_signal, axis=1)
self.shared_response = self._stimulus_estimator(
full_signal, self.n_t, self.n_s, self.n_components
full_signal, self.n_subjects, self.n_components
)
if tuning:
self.tuning_data = Parallel(n_jobs=self.n_jobs)(
delayed(self._tuning_estimator)(
self.shared_response,
self.denoised_signal[i],
)
for i in range(self.n_s)
for i in range(self.n_subjects)
)

return self
Expand All @@ -690,13 +691,13 @@ def transform(self, X, verbose=False):
Parameters:
--------
X : array-like
The test data of shape (n_subjects, n_samples, n_voxels).
The test data of shape (n_subjects, n_timepoints, n_voxels).
verbose : bool(optional)
Whether to print progress information. Defaults to False.
Returns:
--------
array-like: The transformed data of shape (n_subjects, n_samples, n_voxels).
array-like: The transformed data of shape (n_subjects, n_timepoints, n_voxels).
"""

full_signal = np.concatenate(X, axis=1, dtype=np.float32)
Expand All @@ -705,7 +706,7 @@ def transform(self, X, verbose=False):
print("Predict : Computing stimulus matrix...")

stimulus_ = self._stimulus_estimator(
full_signal, self.n_t, self.n_s, self.n_components
full_signal, self.n_time_points, self.n_subjects, self.n_components
)

if verbose:
Expand Down
8 changes: 2 additions & 6 deletions fmralign/fetch_example_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,7 @@ def fetch_ibc_subjects_contrasts(subjects, data_dir=None, verbose=1):
"""
# The URLs can be retrieved from the nilearn account on OSF
if subjects == "all":
subjects = [
"sub-{i:02d}" for i in [1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]
]
subjects = ["sub-{i:02d}" for i in [1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]]
dataset_name = "ibc"
data_dir = get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)

Expand All @@ -64,9 +62,7 @@ def fetch_ibc_subjects_contrasts(subjects, data_dir=None, verbose=1):
)
metadata_df = pd.read_csv(metadata_path[0])
conditions = metadata_df.condition.unique()
metadata_df["path"] = metadata_df["path"].str.replace(
"path_to_dir", data_dir
)
metadata_df["path"] = metadata_df["path"].str.replace("path_to_dir", data_dir)
# filter the dataframe to return only rows relevant for subjects argument
metadata_df = metadata_df[metadata_df.subject.isin(subjects)]

Expand Down
48 changes: 24 additions & 24 deletions fmralign/generate_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@


def generate_dummy_signal(
n_s: int,
n_t: int,
n_v: int,
n_subjects: int,
n_timepoints: int,
n_voxels: int,
S_std=1,
latent_dim=None,
T_mean=0,
Expand Down Expand Up @@ -56,61 +56,61 @@ def generate_dummy_signal(
Tuning matrices.
"""
if latent_dim is None:
latent_dim = n_t
latent_dim = n_timepoints

rng = np.random.RandomState(seed=seed)

if generative_method == "custom":
sigma = n_s * np.arange(1, latent_dim + 1)
sigma = n_subjects * np.arange(1, latent_dim + 1)
# np.random.shuffle(sigma)
# Generate common signal matrix
S_train = S_std * np.random.randn(n_t, latent_dim)
S_train = S_std * np.random.randn(n_timepoints, latent_dim)
# Normalize each row to have unit norm
S_train = S_train / np.linalg.norm(S_train, axis=0, keepdims=True)
S_train = S_train @ np.diag(sigma)
S_test = S_std * np.random.randn(n_t, latent_dim)
S_test = S_std * np.random.randn(n_timepoints, latent_dim)
S_test = S_test / np.linalg.norm(S_test, axis=0, keepdims=True)
S_test = S_test @ np.diag(sigma)

elif generative_method == "fastsrm":
Sigma = rng.dirichlet(np.ones(latent_dim), 1).flatten()
S_train = np.sqrt(Sigma)[:, None] * rng.randn(n_t, latent_dim)
S_test = np.sqrt(Sigma)[:, None] * rng.randn(n_t, latent_dim)
S_train = np.sqrt(Sigma)[:, None] * rng.randn(n_timepoints, latent_dim)
S_test = np.sqrt(Sigma)[:, None] * rng.randn(n_timepoints, latent_dim)

elif generative_method == "multiviewica":
S_train = np.random.laplace(size=(n_t, latent_dim))
S_test = np.random.laplace(size=(n_t, latent_dim))
S_train = np.random.laplace(size=(n_timepoints, latent_dim))
S_test = np.random.laplace(size=(n_timepoints, latent_dim))

else:
raise ValueError("Unknown generative method")

# Generate indiivdual spatial components
data_train, data_test = [], []
Ts = []
for _ in range(n_s):
for _ in range(n_subjects):
if generative_method == "custom" or generative_method == "multiviewica":
W = T_mean + T_std * np.random.randn(latent_dim, n_v)
W = T_mean + T_std * np.random.randn(latent_dim, n_voxels)
else:
W = projection(rng.randn(latent_dim, n_v))
W = projection(rng.randn(latent_dim, n_voxels))

Ts.append(W)
X_train = S_train @ W
N = np.random.randn(n_t, n_v)
N = (
N
noise = np.random.randn(n_timepoints, n_voxels)
noise = (
noise
* np.linalg.norm(X_train)
/ (SNR * np.linalg.norm(N, axis=0, keepdims=True))
/ (SNR * np.linalg.norm(noise, axis=0, keepdims=True))
)
X_train += N
X_train += noise
data_train.append(X_train)
X_test = S_test @ W
N = np.random.randn(n_t, n_v)
N = (
N
noise = np.random.randn(n_timepoints, n_voxels)
noise = (
noise
* np.linalg.norm(X_test)
/ (SNR * np.linalg.norm(N, axis=0, keepdims=True))
/ (SNR * np.linalg.norm(noise, axis=0, keepdims=True))
)
X_test += N
X_test += noise
data_test.append(X_test)

data_train = np.array(data_train)
Expand Down
Loading

0 comments on commit ac39ced

Please sign in to comment.