Skip to content

Commit

Permalink
fix typo
Browse files Browse the repository at this point in the history
  • Loading branch information
FabianIsensee committed Sep 18, 2023
1 parent b4e97fe commit 913705f
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 17 deletions.
12 changes: 6 additions & 6 deletions nnunetv2/inference/data_iterators.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def preprocess_fromfiles_save_to_queue(list_of_lists: List[List[str]],
label_manager = plans_manager.get_label_manager(dataset_json)
preprocessor = configuration_manager.preprocessor_class(verbose=verbose)
for idx in range(len(list_of_lists)):
data, seg, data_properites = preprocessor.run_case(list_of_lists[idx],
data, seg, data_properties = preprocessor.run_case(list_of_lists[idx],
list_of_segs_from_prev_stage_files[
idx] if list_of_segs_from_prev_stage_files is not None else None,
plans_manager,
Expand All @@ -40,7 +40,7 @@ def preprocess_fromfiles_save_to_queue(list_of_lists: List[List[str]],

data = torch.from_numpy(data).contiguous().float()

item = {'data': data, 'data_properites': data_properites,
item = {'data': data, 'data_properties': data_properties,
'ofile': output_filenames_truncated[idx] if output_filenames_truncated is not None else None}
success = False
while not success:
Expand Down Expand Up @@ -150,7 +150,7 @@ def generate_train_batch(self):
# if we have a segmentation from the previous stage we have to process it together with the images so that we
# can crop it appropriately (if needed). Otherwise it would just be resized to the shape of the data after
# preprocessing and then there might be misalignments
data, seg, data_properites = self.preprocessor.run_case(files, seg_prev_stage, self.plans_manager,
data, seg, data_properties = self.preprocessor.run_case(files, seg_prev_stage, self.plans_manager,
self.configuration_manager,
self.dataset_json)
if seg_prev_stage is not None:
Expand All @@ -159,7 +159,7 @@ def generate_train_batch(self):

data = torch.from_numpy(data)

return {'data': data, 'data_properites': data_properites, 'ofile': ofile}
return {'data': data, 'data_properties': data_properties, 'ofile': ofile}


class PreprocessAdapterFromNpy(DataLoader):
Expand Down Expand Up @@ -207,7 +207,7 @@ def generate_train_batch(self):

data = torch.from_numpy(data)

return {'data': data, 'data_properites': props, 'ofile': ofname}
return {'data': data, 'data_properties': props, 'ofile': ofname}


def preprocess_fromnpy_save_to_queue(list_of_images: List[np.ndarray],
Expand Down Expand Up @@ -238,7 +238,7 @@ def preprocess_fromnpy_save_to_queue(list_of_images: List[np.ndarray],

data = torch.from_numpy(data).contiguous().float()

item = {'data': data, 'data_properites': list_of_image_properties[idx],
item = {'data': data, 'data_properties': list_of_image_properties[idx],
'ofile': truncated_ofnames[idx] if truncated_ofnames is not None else None}
success = False
while not success:
Expand Down
4 changes: 2 additions & 2 deletions nnunetv2/inference/examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@
img4, props4 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_144_0000.nii.gz')])


# each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properites' keys!
# each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys!
# If 'ofile' is None, the result will be returned instead of written to a file
# the iterator is responsible for performing the correct preprocessing!
# note how the iterator here does not use multiprocessing -> preprocessing will be done in the main thread!
Expand All @@ -95,7 +95,7 @@ def my_iterator(list_of_input_arrs, list_of_input_props):
predictor.plans_manager,
predictor.configuration_manager,
predictor.dataset_json)
yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properites': p, 'ofile': None}
yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properties': p, 'ofile': None}


ret = predictor.predict_from_data_iterator(my_iterator([img, img2, img3, img4], [props, props2, props3, props4]),
Expand Down
8 changes: 4 additions & 4 deletions nnunetv2/inference/predict_from_raw_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ def predict_from_data_iterator(self,
save_probabilities: bool = False,
num_processes_segmentation_export: int = default_num_processes):
"""
each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properites' keys!
each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys!
If 'ofile' is None, the result will be returned instead of written to a file
"""
with multiprocessing.get_context("spawn").Pool(num_processes_segmentation_export) as export_pool:
Expand All @@ -354,7 +354,7 @@ def predict_from_data_iterator(self,

print(f'perform_everything_on_gpu: {self.perform_everything_on_gpu}')

properties = preprocessed['data_properites']
properties = preprocessed['data_properties']

# let's not get into a runaway situation where the GPU predicts so fast that the disk has to b swamped with
# npy files
Expand Down Expand Up @@ -430,14 +430,14 @@ def predict_single_npy_array(self, input_image: np.ndarray, image_properties: di
if self.verbose:
print('resampling to original shape')
if output_file_truncated is not None:
export_prediction_from_logits(predicted_logits, dct['data_properites'], self.configuration_manager,
export_prediction_from_logits(predicted_logits, dct['data_properties'], self.configuration_manager,
self.plans_manager, self.dataset_json, output_file_truncated,
save_or_return_probabilities)
else:
ret = convert_predicted_logits_to_segmentation_with_correct_shape(predicted_logits, self.plans_manager,
self.configuration_manager,
self.label_manager,
dct['data_properites'],
dct['data_properties'],
return_probabilities=
save_or_return_probabilities)
if save_or_return_probabilities:
Expand Down
4 changes: 2 additions & 2 deletions nnunetv2/inference/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ cons:
img2, props2 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_146_0000.nii.gz')])
img3, props3 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_145_0000.nii.gz')])
img4, props4 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_144_0000.nii.gz')])
# each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properites' keys!
# each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys!
# If 'ofile' is None, the result will be returned instead of written to a file
# the iterator is responsible for performing the correct preprocessing!
# note how the iterator here does not use multiprocessing -> preprocessing will be done in the main thread!
Expand All @@ -199,7 +199,7 @@ cons:
predictor.plans_manager,
predictor.configuration_manager,
predictor.dataset_json)
yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properites': p, 'ofile': None}
yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properties': p, 'ofile': None}
ret = predictor.predict_from_data_iterator(my_iterator([img, img2, img3, img4], [props, props2, props3, props4]),
save_probabilities=False, num_processes_segmentation_export=3)
```
6 changes: 3 additions & 3 deletions nnunetv2/preprocessing/preprocessors/default_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,17 +128,17 @@ def run_case(self, image_files: List[str], seg_file: Union[str, None], plans_man
rw = plans_manager.image_reader_writer_class()

# load image(s)
data, data_properites = rw.read_images(image_files)
data, data_properties = rw.read_images(image_files)

# if possible, load seg
if seg_file is not None:
seg, _ = rw.read_seg(seg_file)
else:
seg = None

data, seg = self.run_case_npy(data, seg, data_properites, plans_manager, configuration_manager,
data, seg = self.run_case_npy(data, seg, data_properties, plans_manager, configuration_manager,
dataset_json)
return data, seg, data_properites
return data, seg, data_properties

def run_case_save(self, output_filename_truncated: str, image_files: List[str], seg_file: str,
plans_manager: PlansManager, configuration_manager: ConfigurationManager,
Expand Down

0 comments on commit 913705f

Please sign in to comment.