From fba12f4be7d4ac9ed1df8dd57ff16d5e85d1a9d3 Mon Sep 17 00:00:00 2001 From: akshitt Date: Wed, 4 May 2022 15:05:41 +0530 Subject: [PATCH 1/2] OOD files added --- .../custom_dataset-checkpoint.py | 650 ++++++++++++++++++ .../custom_dataset_medmnist-checkpoint.py | 326 +++++++++ .../dermamnist-checkpoint.py | 204 ++++++ .../.ipynb_checkpoints/medmnist-checkpoint.py | 146 ++++ .../organmnist-checkpoint.py | 139 ++++ .../organmnist3-checkpoint.py | 196 ++++++ .../pneumoniamnist-checkpoint.py | 158 +++++ trust/utils/dermamnist.py | 286 ++++++++ trust/utils/organmnist.py | 132 ++++ trust/utils/organmnist3.py | 196 ++++++ trust/utils/pneumoniamnist.py | 158 +++++ 11 files changed, 2591 insertions(+) create mode 100644 trust/utils/.ipynb_checkpoints/custom_dataset-checkpoint.py create mode 100644 trust/utils/.ipynb_checkpoints/custom_dataset_medmnist-checkpoint.py create mode 100644 trust/utils/.ipynb_checkpoints/dermamnist-checkpoint.py create mode 100644 trust/utils/.ipynb_checkpoints/medmnist-checkpoint.py create mode 100644 trust/utils/.ipynb_checkpoints/organmnist-checkpoint.py create mode 100644 trust/utils/.ipynb_checkpoints/organmnist3-checkpoint.py create mode 100644 trust/utils/.ipynb_checkpoints/pneumoniamnist-checkpoint.py create mode 100644 trust/utils/dermamnist.py create mode 100644 trust/utils/organmnist.py create mode 100644 trust/utils/organmnist3.py create mode 100644 trust/utils/pneumoniamnist.py diff --git a/trust/utils/.ipynb_checkpoints/custom_dataset-checkpoint.py b/trust/utils/.ipynb_checkpoints/custom_dataset-checkpoint.py new file mode 100644 index 0000000..a920b70 --- /dev/null +++ b/trust/utils/.ipynb_checkpoints/custom_dataset-checkpoint.py @@ -0,0 +1,650 @@ +import numpy as np +import os +import torch +import torchvision +from sklearn import datasets +from torch.utils.data import Dataset +from torchvision import datasets, transforms +import PIL.Image as Image +from .utils import * +np.random.seed(42) +torch.manual_seed(42) + +class DataHandler_MNIST(Dataset): + """ + Data Handler to load MNIST dataset. + This class extends :class:`torch.utils.data.Dataset` to handle + loading data even without labels + + Parameters + ---------- + X: numpy array + Data to be loaded + y: numpy array, optional + Labels to be loaded (default: None) + select: bool + True if loading data without labels, False otherwise + """ + + def __init__(self, X, Y=None, select=True, use_test_transform=False): + """ + Constructor + """ + self.select = select + self.use_test_transform=use_test_transform + self.training_gen_transform = transforms.Compose([transforms.Resize((32, 32)), transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + self.test_gen_transform = transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + if not self.select: + self.X = X + self.targets = Y + else: + self.X = X + + def __getitem__(self, index): + if not self.select: + x, y = self.X[index], self.targets[index] + x = Image.fromarray(x) + if self.use_test_transform: + x = self.test_gen_transform(x) + else: + x = self.training_gen_transform(x) + if(x.shape[0]==1): x = torch.repeat_interleave(x, 3, 0) + y=y.long() + return (x, y.long()) + + else: + x = self.X[index] + x = Image.fromarray(x) + if self.use_test_transform: + x = self.test_gen_transform(x) + else: + x = self.training_gen_transform(x) + if(x.shape[0]==1): x = torch.repeat_interleave(x, 3, 0) + return x + + def __len__(self): + return len(self.X) + +class DataHandler_CIFAR10(Dataset): + """ + Data Handler to load CIFAR10 dataset. + This class extends :class:`torch.utils.data.Dataset` to handle + loading data even without labels + + Parameters + ---------- + X: numpy array + Data to be loaded + y: numpy array, optional + Labels to be loaded (default: None) + select: bool + True if loading data without labels, False otherwise + """ + + def __init__(self, X, Y=None, select=True, use_test_transform = False): + """ + Constructor + """ + self.select = select + if(use_test_transform): + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) + else: + transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) + if not self.select: + self.X = X + self.targets = Y + self.transform = transform + else: + self.X = X + self.transform = transform + + def __getitem__(self, index): + if not self.select: + x, y = self.X[index], self.targets[index] + x = Image.fromarray(x) + x = self.transform(x) + return (x, y) + + else: + x = self.X[index] + x = Image.fromarray(x) + x = self.transform(x) + return x + + def __len__(self): + return len(self.X) + +class DataHandler_SVHN(Dataset): + """ + Data Handler to load SVHN dataset. + This class extends :class:`torch.utils.data.Dataset` to handle + loading data even without labels + + Parameters + ---------- + X: numpy array + Data to be loaded + y: numpy array, optional + Labels to be loaded (default: None) + select: bool + True if loading data without labels, False otherwise + """ + + def __init__(self, X, Y=None, select=True, use_test_transform=False): + """ + Constructor + """ + self.select = select + self.use_test_transform=use_test_transform + self.training_gen_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) + self.test_gen_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # ImageNet mean/std + if not self.select: + self.X = X + self.targets = Y + else: + self.X = X + + def __getitem__(self, index): + if not self.select: + x, y = self.X[index], self.targets[index] + x = Image.fromarray(np.transpose(x, (1, 2, 0))) + if self.use_test_transform: + x = self.test_gen_transform(x) + else: + x = self.training_gen_transform(x) + return (x, y) + + else: + x = self.X[index] + x = Image.fromarray(np.transpose(x, (1, 2, 0))) + if self.use_test_transform: + x = self.test_gen_transform(x) + else: + x = self.training_gen_transform(x) + return x + + def __len__(self): + return len(self.X) + +class DataHandler_UTKFace(Dataset): + """ + Data Handler to load UTKFace dataset. + This class extends :class:`torch.utils.data.Dataset` to handle + loading data even without labels + + Parameters + ---------- + X: numpy array + Data to be loaded + y: numpy array, optional + Labels to be loaded (default: None) + select: bool + True if loading data without labels, False otherwise + """ + def __init__(self, X, Y=None, select=True, use_test_transform = False): + """ + Constructor + """ + self.select = select + if(use_test_transform): + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # ImageNet mean/std + else: + transform = transforms.Compose([transforms.RandomCrop(200, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # ImageNet mean/std + if not self.select: + self.X = X + self.targets = Y + self.transform = transform + else: + self.X = X + self.transform = transform + + def __getitem__(self, index): + if not self.select: + x, y = self.X[index], self.targets[index] + x = Image.fromarray(np.transpose(x, (1,2,0))) + x = self.transform(x) + return (x, y) + + else: + x = self.X[index] + x = Image.fromarray(x) + x = self.transform(x) + return x + + def __len__(self): + return len(self.X) + +class DuplicateChannels(object): + """ + Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. + Converts a PIL Image or numpy.ndarray (H x W x C) in the range + [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] + if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) + or if the numpy.ndarray has dtype = np.uint8 + In the other cases, tensors are returned without scaling. + """ + + def __call__(self, pic): + """ + Args: + pic (PIL Image or numpy.ndarray): Image to be converted to tensor. + Returns: + Tensor: Converted image. + """ + return torch.repeat_interleave(pic.unsqueeze(1), 3, 1).float() + + def __repr__(self): + return self.__class__.__name__ + '()' + +def getOODtargets(targets, sel_cls_idx, ood_cls_id): + + ood_targets = [] + targets_list = list(targets) + for i in range(len(targets_list)): + if(targets_list[i] in list(sel_cls_idx)): + ood_targets.append(targets_list[i]) + else: + ood_targets.append(ood_cls_id) + print("num ood samples: ", ood_targets.count(ood_cls_id)) + return torch.Tensor(ood_targets) + +def create_ood_data(dset_name, fullset, testset, split_cfg, num_cls, augVal): + + np.random.seed(42) + train_idx = [] + val_idx = [] + lake_idx = [] + test_idx = [] + selected_classes = np.array(list(range(split_cfg['num_cls_idc']))) + for i in range(num_cls): #all_classes + if(dset_name=="mnist"): + full_idx_class = list(torch.where(torch.Tensor(fullset.targets.float()) == i)[0].cpu().numpy()) + else: + full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy()) + if(i in selected_classes): + if(dset_name=="mnist"): + test_idx_class = list(torch.where(torch.Tensor(testset.targets.float()) == i)[0].cpu().numpy()) + else: + test_idx_class = list(torch.where(torch.Tensor(testset.targets) == i)[0].cpu().numpy()) + test_idx += test_idx_class + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_idc_train'], replace=False)) + train_idx += class_train_idx + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_val'], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_lake'], replace=False)) + else: + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_ood_train'], replace=False)) #always 0 + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_ood_val'], replace=False)) #Only for CG ood val has samples + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_ood_lake'], replace=False)) #many ood samples in lake + + if(augVal and (i in selected_classes)): #augment with samples only from the imbalanced classes + train_idx += class_val_idx + val_idx += class_val_idx + lake_idx += class_lake_idx + if(dset_name=="mnist"): + train_set = SubsetWithTargetsSingleChannel(fullset, train_idx, torch.Tensor(fullset.targets.float())[train_idx]) + val_set = SubsetWithTargetsSingleChannel(fullset, val_idx, torch.Tensor(fullset.targets.float())[val_idx]) + lake_set = SubsetWithTargetsSingleChannel(fullset, lake_idx, getOODtargets(torch.Tensor(fullset.targets.float())[lake_idx], selected_classes, split_cfg['num_cls_idc'])) + test_set = SubsetWithTargetsSingleChannel(testset, test_idx, torch.Tensor(testset.targets.float())[test_idx]) + else: + train_set = SubsetWithTargets(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx]) + val_set = SubsetWithTargets(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx]) + lake_set = SubsetWithTargets(fullset, lake_idx, getOODtargets(torch.Tensor(fullset.targets)[lake_idx], selected_classes, split_cfg['num_cls_idc'])) + test_set = SubsetWithTargets(testset, test_idx, torch.Tensor(testset.targets)[test_idx]) + + return train_set, val_set, test_set, lake_set, selected_classes + +def create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal): + np.random.seed(42) + train_idx = [] + val_idx = [] + lake_idx = [] + if(dset_name=="mnist"): selected_classes=np.array([5,8]) + else: selected_classes = np.random.choice(np.arange(num_cls), size=split_cfg['num_cls_imbalance'], replace=False) #classes to imbalance + for i in range(num_cls): #all_classes + if(dset_name=="mnist"): + full_idx_class = list(torch.where(torch.Tensor(fullset.targets.float()) == i)[0].cpu().numpy()) + elif(dset_name=="svhn"): + full_idx_class = list(torch.where(torch.Tensor(fullset.labels) == i)[0].cpu().numpy()) + else: + full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy()) + if(i in selected_classes): + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_imbclass_train'], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_val'], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_lake'], replace=False)) + else: + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_class_train'], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_val'], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_lake'], replace=False)) + + train_idx += class_train_idx + if(augVal and (i in selected_classes)): #augment with samples only from the imbalanced classes + train_idx += class_val_idx + val_idx += class_val_idx + lake_idx += class_lake_idx + if(dset_name=="mnist"): + train_set = SubsetWithTargetsSingleChannel(fullset, train_idx, torch.Tensor(fullset.targets.float())[train_idx]) + val_set = SubsetWithTargetsSingleChannel(fullset, val_idx, torch.Tensor(fullset.targets.float())[val_idx]) + lake_set = SubsetWithTargetsSingleChannel(fullset, lake_idx, torch.Tensor(fullset.targets.float())[lake_idx]) + elif(dset_name=="svhn"): + train_set = SubsetWithTargets(fullset, train_idx, torch.Tensor(fullset.labels)[train_idx]) + val_set = SubsetWithTargets(fullset, val_idx, torch.Tensor(fullset.labels)[val_idx]) + lake_set = SubsetWithTargets(fullset, lake_idx, torch.Tensor(fullset.labels)[lake_idx]) + else: + train_set = SubsetWithTargets(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx]) + val_set = SubsetWithTargets(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx]) + lake_set = SubsetWithTargets(fullset, lake_idx, torch.Tensor(fullset.targets)[lake_idx]) + + return train_set, val_set, lake_set, selected_classes + +def getDuplicateData(dset_name, fullset, split_cfg): + num_rep=split_cfg['num_rep'] + if(dset_name=="mnist"): + # X = np.resize(fullset.data.float().cpu().numpy(), (len(fullset),32,32)) + X = fullset.data.numpy() + y = torch.from_numpy(np.array(fullset.targets.float())) + elif(dset_name == "svhn"): + X = fullset.data + y = torch.from_numpy(np.array(fullset.labels)) + else: + X = fullset.data + y = torch.from_numpy(np.array(fullset.targets)) + X_tr = X[:split_cfg['train_size']] + y_tr = y[:split_cfg['train_size']] + X_unlabeled = X[split_cfg['train_size']:len(X)-split_cfg['val_size']] + y_unlabeled = y[split_cfg['train_size']:len(X)-split_cfg['val_size']] + X_val = X[len(X)-split_cfg['val_size']:] + y_val = y[len(X)-split_cfg['val_size']:] + X_unlabeled_rep = np.repeat(X_unlabeled[:split_cfg['lake_subset_repeat_size']], num_rep, axis=0) + y_unlabeled_rep = np.repeat(y_unlabeled[:split_cfg['lake_subset_repeat_size']], num_rep, axis=0) + assert((X_unlabeled_rep[0]==X_unlabeled_rep[num_rep-1]).all()) + assert((y_unlabeled_rep[0]==y_unlabeled_rep[num_rep-1]).all()) + X_unlabeled_rep = np.concatenate((X_unlabeled_rep, X_unlabeled[split_cfg['lake_subset_repeat_size']:split_cfg['lake_size']]), axis=0) + y_unlabeled_rep = torch.from_numpy(np.concatenate((y_unlabeled_rep, y_unlabeled[split_cfg['lake_subset_repeat_size']:split_cfg['lake_size']]), axis=0)) + if(dset_name=="mnist"): + train_set = DataHandler_MNIST(X_tr, y_tr, False) + lake_set = DataHandler_MNIST(X_unlabeled_rep, y_unlabeled_rep, False) + val_set = DataHandler_MNIST(X_val, y_val, False) + elif(dset_name=="svhn"): + train_set = DataHandler_SVHN(X_tr, y_tr, False) + lake_set = DataHandler_SVHN(X_unlabeled_rep, y_unlabeled_rep, False) + val_set = DataHandler_SVHN(X_val, y_val, False) + else: + train_set = DataHandler_CIFAR10(X_tr, y_tr, False) + lake_set = DataHandler_CIFAR10(X_unlabeled_rep, y_unlabeled_rep, False) + val_set = DataHandler_CIFAR10(X_val, y_val, False) + return X_tr, y_tr, X_val, y_val, X_unlabeled_rep, y_unlabeled_rep, train_set, val_set, lake_set + +def getVanillaData(dset_name, fullset, split_cfg): + if(dset_name=="mnist"): + # X = np.resize(fullset.data.float().cpu().numpy(), (len(fullset),32,32)) + X = fullset.data.numpy() + y = torch.from_numpy(np.array(fullset.targets.float())) + else: + X = fullset.data + y = torch.from_numpy(np.array(fullset.targets)) + X_tr = X[:split_cfg['train_size']] + y_tr = y[:split_cfg['train_size']] + X_unlabeled = X[split_cfg['train_size']:len(X)-split_cfg['val_size']] + y_unlabeled = y[split_cfg['train_size']:len(X)-split_cfg['val_size']] + X_val = X[len(X)-split_cfg['val_size']:] + y_val = y[len(X)-split_cfg['val_size']:] + if(dset_name=="mnist"): + train_set = DataHandler_MNIST(X_tr, y_tr, False) + lake_set = DataHandler_MNIST(X_unlabeled, y_unlabeled, False) + val_set = DataHandler_MNIST(X_val, y_val, False) + else: + train_set = DataHandler_CIFAR10(X_tr, y_tr, False) + lake_set = DataHandler_CIFAR10(X_unlabeled, y_unlabeled, False) + val_set = DataHandler_CIFAR10(X_val, y_val, False) + return X_tr, y_tr, X_val, y_val, X_unlabeled[:split_cfg['lake_size']], y_unlabeled[:split_cfg['lake_size']], train_set, val_set, lake_set + +def create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal): + np.random.seed(42) + train_idx = [] + val_idx = [] + lake_idx = [] + selected_classes=split_cfg['sel_cls_idx'] + for i in range(num_cls): #all_classes + full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy()) + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_class_train'][i], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_val'][i], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_lake'][i], replace=False)) + + train_idx += class_train_idx + if(augVal and (i in selected_classes)): #augment with samples only from the imbalanced classes + train_idx += class_val_idx + val_idx += class_val_idx + lake_idx += class_lake_idx + train_set = SubsetWithTargets(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx]) + val_set = SubsetWithTargets(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx]) + lake_set = SubsetWithTargets(fullset, lake_idx, torch.Tensor(fullset.targets)[lake_idx]) + + return train_set, val_set, lake_set, selected_classes + +def load_dataset_custom(datadir, dset_name, feature, split_cfg, augVal=False, dataAug=True): + """ + Loads a common dataset with additional options to create class imbalances, out-of-distribution classes, and redundancies. + + Parameters + ---------- + datadir : string + The root directory in which the data is stored (or should be downloaded) + dset_name : string + The name of the dataset. This should be one of 'cifar10', 'mnist', 'svhn', 'cifar100', 'breast-density'. + feature : string + The modification that should be applied to the dataset. This should be one of 'classimb', 'ood', 'duplicate', 'vanilla' + split_cfg : dict + Contains information relating to the dataset splits that should be created. Some of the keys for this dictionary are as follows: + 'per_imbclass_train': int + The number of examples in the train set for each imbalanced class (classimb) + 'per_imbclass_val': int + The number of examples in the validation set for each imbalanced class (classimb) + 'per_imbclass_lake': int + The number of examples in the lake set for each imbalanced class (classimb) + 'per_class_train': int + The number of examples in the train set for each balanced class (classimb) + 'per_class_val': int + The number of examples in the validation set for each balanced class (classimb) + 'per_class_lake': int + The number of examples in the lake set for each balanced class (classimb) + 'sel_cls_idx': list + A list of classes that are affected by class imbalance. (classimb) + 'train_size': int + The size of the train set (vanilla, duplicate) + 'val_size': int + The size of the validation set (vanilla, duplicate) + 'lake_size': int + The size of the lake set (vanilla, duplicate) + 'num_rep': int + The number of times to repeat a selection in the lake set (duplicate) + 'lake_subset_repeat_size': int + The size of the repeated selection in the lake set (duplicate) + 'num_cls_imbalance': int + The number of classes to randomly affect by class imbalance. (classimb) + 'num_cls_idc': int + The number of in-distribution classes to keep (ood) + 'per_idc_train': int + The number of in-distribution examples to keep in the train set per class (ood) + 'per_idc_val': int + The number of in-distribution examples to keep in the validation set per class (ood) + 'per_idc_lake': int + The number of in-distribution examples to keep in the lake set per class (ood) + 'per_ood_train': int + The number of OOD examples to keep in the train set per class (ood) + 'per_ood_val': int + The number of OOD examples to keep in the validation set per class (ood) + 'per_ood_lake': int + The number of OOD examples to keep in the lake set per class (ood) + augVal : bool, optional + If True, the train set will also contain affected classes from the validation set. The default is False. + dataAug : bool, optional + If True, the all but the test set will be affected by random cropping and random horizontal flip. The default is True. + + Returns + ------- + tuple + Returns a train set, validation set, test set, lake set, and number of classes. Amount of returned items depends on specific configuration. + Each set is an instance of torch.utils.data.Dataset + """ + + if(not(os.path.exists(datadir))): + os.mkdir(datadir) + + if(dset_name=="cifar10"): + num_cls=10 + cifar_test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) + if(dataAug): + cifar_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) + else: + cifar_transform = cifar_test_transform + + fullset = torchvision.datasets.CIFAR10(root=datadir, train=True, download=True, transform=cifar_transform) + test_set = torchvision.datasets.CIFAR10(root=datadir, train=False, download=True, transform=cifar_test_transform) + if(feature=="classimb"): + if("sel_cls_idx" in split_cfg): + train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal) + else: + train_set, val_set, lake_set, imb_cls_idx = create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal) + print("CIFAR-10 Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + if(feature=="ood"): + train_set, val_set, test_set, lake_set, ood_cls_idx = create_ood_data(dset_name, fullset, test_set, split_cfg, num_cls, augVal) + print("CIFAR-10 Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set), "Test set: ", len(test_set)) + return train_set, val_set, test_set, lake_set, ood_cls_idx, split_cfg['num_cls_idc'] + if(feature=="vanilla"): + X_tr, y_tr, X_val, y_val, X_unlabeled, y_unlabeled, train_set, val_set, lake_set = getVanillaData(dset_name, fullset, split_cfg) + print("CIFAR-10 Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, num_cls + + if(feature=="duplicate"): + X_tr, y_tr, X_val, y_val, X_unlabeled_rep, y_unlabeled_rep, train_set, val_set, lake_set = getDuplicateData(dset_name, fullset, split_cfg) + print("CIFAR-10 Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, num_cls + + if(dset_name=="mnist"): + num_cls=10 + mnist_test_transform = transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + if(dataAug): + mnist_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + else: + mnist_transform = mnist_test_transform + fullset = torchvision.datasets.MNIST(root=datadir, train=True, download=True, transform=mnist_transform) + test_set = torchvision.datasets.MNIST(root=datadir, train=False, download=True, transform=mnist_test_transform) + # fullset.data = torch.repeat_interleave(fullset.data.unsqueeze(1), 3, 1).float() + if(feature=="classimb"): + if("sel_cls_idx" in split_cfg): + train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal) + else: + train_set, val_set, lake_set, imb_cls_idx = create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal) + print("MNIST Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + if(feature=="ood"): + train_set, val_set, test_set, lake_set, ood_cls_idx = create_ood_data(dset_name, fullset, test_set, split_cfg, num_cls, augVal) + print("MNIST Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set), "Test set: ", len(test_set)) + return train_set, val_set, test_set, lake_set, ood_cls_idx, split_cfg['num_cls_idc'] + if(feature=="vanilla"): + X_tr, y_tr, X_val, y_val, X_unlabeled, y_unlabeled, train_set, val_set, lake_set = getVanillaData(dset_name, fullset, split_cfg) + print("MNIST Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, num_cls + + if(feature=="duplicate"): + X_tr, y_tr, X_val, y_val, X_unlabeled_rep, y_unlabeled_rep, train_set, val_set, lake_set = getDuplicateData(dset_name, fullset, split_cfg) + print("MNIST Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, num_cls + + if(dset_name=="svhn"): + num_cls=10 + SVHN_test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) + if(dataAug): + SVHN_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) + else: + SVHN_transform = SVHN_test_transform + + fullset = torchvision.datasets.SVHN(root=datadir, split="train", download=True, transform=SVHN_transform) + test_set = torchvision.datasets.SVHN(root=datadir, split="test", download=True, transform=SVHN_test_transform) + if(feature=="classimb"): + if("sel_cls_idx" in split_cfg): + train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal) + else: + train_set, val_set, lake_set, imb_cls_idx = create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal) + print("SVHN Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + if(feature=="ood"): + train_set, val_set, test_set, lake_set, ood_cls_idx = create_ood_data(dset_name, fullset, test_set, split_cfg, num_cls, augVal) + print("SVHN Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set), "Test set: ", len(test_set)) + return train_set, val_set, test_set, lake_set, ood_cls_idx, split_cfg['num_cls_idc'] + if(feature=="vanilla"): + X_tr, y_tr, X_val, y_val, X_unlabeled, y_unlabeled, train_set, val_set, lake_set = getVanillaData(dset_name, fullset, split_cfg) + print("SVHN Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, num_cls + + if(feature=="duplicate"): + X_tr, y_tr, X_val, y_val, X_unlabeled_rep, y_unlabeled_rep, train_set, val_set, lake_set = getDuplicateData(dset_name, fullset, split_cfg) + print("SVHN Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, num_cls + + if(dset_name=="cifar100"): + num_cls=100 + cifar100_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + ]) + fullset = torchvision.datasets.CIFAR100(root=datadir, train=True, download=True, transform=cifar100_transform) + test_set = torchvision.datasets.CIFAR100(root=datadir, train=False, download=True, transform=cifar100_transform) + if(feature=="classimb"): + if("sel_cls_idx" in split_cfg): + train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal) + else: + train_set, val_set, lake_set, imb_cls_idx = create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal) + print("CIFAR-100 Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + if(feature=="ood"): + train_set, val_set, test_set, lake_set, ood_cls_idx = create_ood_data(dset_name, fullset, test_set, split_cfg, num_cls, augVal) + print("CIFAR-100 Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set), "Test set: ", len(test_set)) + return train_set, val_set, test_set, lake_set, ood_cls_idx, num_cls + + if(feature=="vanilla"): + X_tr, y_tr, X_val, y_val, X_unlabeled, y_unlabeled, train_set, val_set, lake_set = getVanillaData(dset_name, fullset, split_cfg) + print("CIFAR-100 Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, num_cls + + if(feature=="duplicate"): + X_tr, y_tr, X_val, y_val, X_unlabeled_rep, y_unlabeled_rep, train_set, val_set, lake_set = getDuplicateData(dset_name, fullset, split_cfg) + print("CIFAR-100 Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, num_cls + + + if(dset_name=="breast_density"): + num_cls=4 + input_size=224 + data_transforms = { + 'train': transforms.Compose([ + transforms.RandomResizedCrop(input_size), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + 'test': transforms.Compose([ + transforms.Resize(input_size), + transforms.CenterCrop(input_size), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + } + + fullset = datasets.ImageFolder(os.path.join(datadir, 'train'), data_transforms['train']) + test_set = datasets.ImageFolder(os.path.join(datadir, 'test'), data_transforms['test']) + if(feature=="classimb"): + train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal) + print("Breast-density Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls \ No newline at end of file diff --git a/trust/utils/.ipynb_checkpoints/custom_dataset_medmnist-checkpoint.py b/trust/utils/.ipynb_checkpoints/custom_dataset_medmnist-checkpoint.py new file mode 100644 index 0000000..efe5a73 --- /dev/null +++ b/trust/utils/.ipynb_checkpoints/custom_dataset_medmnist-checkpoint.py @@ -0,0 +1,326 @@ +''' +Customized dataset loading code for medical datasets at http://medmnist.com/ +''' + +import numpy as np +import os +import torch +import torchvision +from sklearn import datasets +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import StandardScaler +from torch.utils.data import Dataset, random_split +from torchvision import datasets, transforms +import PIL.Image as Image +from .utils import * +from .medmnist import PathMNIST, ChestMNIST, DermaMNIST, OCTMNIST, PneumoniaMNIST, RetinaMNIST, BreastMNIST, OrganMNISTAxial, OrganMNISTCoronal, OrganMNISTSagittal +np.random.seed(42) +torch.manual_seed(42) + +from torch.utils.data import Dataset + +name_to_class = { + "pathmnist": (PathMNIST,9), + "chestmnist": (ChestMNIST,14), + "dermamnist": (DermaMNIST,7), + "octmnist": (OCTMNIST,4), + "pneumoniamnist": (PneumoniaMNIST,2), + "retinamnist": (RetinaMNIST,5), + "breastmnist": (BreastMNIST,2), + "axial_organmnist": (OrganMNISTAxial,11), + "coronal_organmnist": (OrganMNISTCoronal,11), + "sagittal_organmnist": (OrganMNISTSagittal,11), + } + + +def create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal): + np.random.seed(42) + train_idx = [] + val_idx = [] + lake_idx = [] + selected_classes = np.random.choice(np.arange(num_cls), size=split_cfg['num_cls_imbalance'], replace=False) #classes to imbalance + for i in range(num_cls): #all_classes + full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy()) + if(i in selected_classes): + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_imbclass_train'], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_val'], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_lake'], replace=False)) + else: + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_class_train'], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_val'], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_lake'], replace=False)) + + train_idx += class_train_idx + if(augVal and (i in selected_classes)): #augment with samples only from the imbalanced classes + train_idx += class_val_idx + val_idx += class_val_idx + lake_idx += class_lake_idx + train_set = SubsetWithTargets(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx]) + val_set = SubsetWithTargets(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx]) + lake_set = SubsetWithTargets(fullset, lake_idx, torch.Tensor(fullset.targets)[lake_idx]) + return train_set, val_set, lake_set, selected_classes + + +def create_class_imb_bio(dset_name, fullset, split_cfg, num_cls, augVal): + np.random.seed(42) + train_idx = [] + val_idx = [] + lake_idx = [] + selected_classes=split_cfg['sel_cls_idx'] + for i in range(num_cls): #all_classes + full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy()) + if(i in selected_classes): + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_imbclass_train'][i], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_val'][i], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_lake'][i], replace=False)) + else: + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_class_train'][i], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_val'][i], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_lake'][i], replace=False)) + + train_idx += class_train_idx + if(augVal and (i in selected_classes)): #augment with samples only from the imbalanced classes + train_idx += class_val_idx + val_idx += class_val_idx + lake_idx += class_lake_idx + train_set = SubsetWithTargets(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx]) + val_set = SubsetWithTargets(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx]) + lake_set = SubsetWithTargets(fullset, lake_idx, torch.Tensor(fullset.targets)[lake_idx]) + return train_set, val_set, lake_set, selected_classes + +def create_class_imb_bio_with_testset(dset_name, fullset, testset, split_cfg, num_cls, augVal): + np.random.seed(42) + train_idx = [] + val_idx = [] + lake_idx = [] + test_idx = [] + selected_classes=split_cfg['sel_cls_idx'] + for i in range(num_cls): #all_classes + full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy()) + test_idx_class = list(torch.where(torch.Tensor(testset.targets) == i)[0].cpu().numpy()) + if(i in selected_classes): + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_imbclass_train'][i], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_val'][i], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_lake'][i], replace=False)) + class_test_idx = list(np.random.choice(np.array(test_idx_class), size=split_cfg['per_imbclass_test'][i], replace=False)) + else: + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_class_train'][i], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_val'][i], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_lake'][i], replace=False)) + class_test_idx = list(np.random.choice(np.array(test_idx_class), size=split_cfg['per_class_test'][i], replace=False)) + + train_idx += class_train_idx + test_idx += class_test_idx + if(augVal and (i in selected_classes)): #augment with samples only from the imbalanced classes + train_idx += class_val_idx + val_idx += class_val_idx + lake_idx += class_lake_idx + train_set = SubsetWithTargets(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx]) + val_set = SubsetWithTargets(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx]) + lake_set = SubsetWithTargets(fullset, lake_idx, torch.Tensor(fullset.targets)[lake_idx]) + test_set = SubsetWithTargets(testset, test_idx, torch.Tensor(testset.targets)[test_idx]) + return train_set, val_set, lake_set, test_set, selected_classes + +def create_longtail(dset_name, fullset, split_cfg, num_cls, augVal): + np.random.seed(42) + train_idx = [] + val_idx = [] + lake_idx = [] + selected_classes=split_cfg['sel_cls_idx'] + for i in range(num_cls): #all_classes + full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy()) + if(i in selected_classes): + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_imbclass_train'][i], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_val'][i], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_lake'][i], replace=False)) + else: + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_class_train'][i], replace=False)) + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_val'][i], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_lake'][i], replace=False)) + + train_idx += class_train_idx + if(augVal and (i in selected_classes)): #augment with samples only from the imbalanced classes + train_idx += class_val_idx + val_idx += class_val_idx + lake_idx += class_lake_idx + train_set = SubsetWithTargets(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx]) + val_set = SubsetWithTargets(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx]) + lake_set = SubsetWithTargets(fullset, lake_idx, torch.Tensor(fullset.targets)[lake_idx]) + return train_set, val_set, lake_set, selected_classes + +def load_biodataset_custom(datadir, dset_name, feature, split_cfg, augVal=False, dataAug=True): + """ + Loads a biomedical dataset with additional options to create class imbalances, out-of-distribution classes, and redundancies. + + Parameters + ---------- + datadir : string + The root directory in which the data is stored (or should be downloaded) + dset_name : string + The name of the dataset. This should be one of 'cifar10', 'mnist', 'svhn', 'cifar100', 'breast-density'. + feature : string + The modification that should be applied to the dataset. This should be one of 'classimb', 'ood', 'duplicate', 'vanilla' + split_cfg : dict + Contains information relating to the dataset splits that should be created. Some of the keys for this dictionary are as follows: + 'per_imbclass_train': int + The number of examples in the train set for each imbalanced class (classimb) + 'per_imbclass_val': int + The number of examples in the validation set for each imbalanced class (classimb) + 'per_imbclass_lake': int + The number of examples in the lake set for each imbalanced class (classimb) + 'per_class_train': int + The number of examples in the train set for each balanced class (classimb + 'per_class_val': int + The number of examples in the validation set for each balanced class (classimb) + 'per_class_lake': int + The number of examples in the lake set for each balanced class (classimb) + 'sel_cls_idx': list + A list of classes that are affected by class imbalance. (classimb) + 'train_size': int + The size of the train set (vanilla, duplicate) + 'val_size': int + The size of the validation set (vanilla, duplicate) + 'lake_size': int + The size of the lake set (vanilla, duplicate) + 'num_rep': int + The number of times to repeat a selection in the lake set (duplicate) + 'lake_subset_repeat_size': int + The size of the repeated selection in the lake set (duplicate) + 'num_cls_imbalance': int + The number of classes to randomly affect by class imbalance. (classimb) + 'num_cls_idc': int + The number of in-distribution classes to keep (ood) + 'per_idc_train': int + The number of in-distribution examples to keep in the train set per class (ood) + 'per_idc_val': int + The number of in-distribution examples to keep in the validation set per class (ood) + 'per_idc_lake': int + The number of in-distribution examples to keep in the lake set per class (ood) + 'per_ood_train': int + The number of OOD examples to keep in the train set per class (ood) + 'per_ood_val': int + The number of OOD examples to keep in the validation set per class (ood) + 'per_ood_lake': int + The number of OOD examples to keep in the lake set per class (ood) + augVal : bool, optional + If True, the train set will also contain affected classes from the validation set. The default is False. + dataAug : bool, optional + If True, the all but the test set will be affected by random cropping and random horizontal flip. The default is True. + + Returns + ------- + tuple + Returns a train set, validation set, test set, lake set, and number of classes. Amount of returned items depends on specific configuration. + Each set is an instance of torch.utils.data.Dataset + """ + if(not(os.path.exists(datadir))): + os.mkdir(datadir) + + if(dset_name[-5:]=="mnist"): + num_cls=name_to_class[dset_name][1] + datadir = datadir + input_size = 32 + data_transforms = { + 'train' : transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomVerticalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=[0.5],std=[0.5]) + ]), + 'test' : transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.5],std=[0.5]) + ]) + } + + Dataclass = name_to_class[dset_name][0] + fullset = Dataclass(root=datadir,split="train",transform=data_transforms['train'],download=False) + test_set = Dataclass(root=datadir,split="test",transform=data_transforms['test'],download=False) + + if(feature=="classimb"): + train_set, val_set, lake_set, imb_cls_idx = create_class_imb_bio(dset_name, fullset, split_cfg, num_cls, augVal) + print(dset_name+" Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + elif(feature=="longtail"): + train_set, val_set, lake_set, imb_cls_idx = create_longtail(dset_name, fullset, split_cfg, num_cls, augVal) + print(dset_name+" Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + + if(dset_name=="breast_cancer"): + num_cls=2 + data_dir = datadir + input_size=224 + data_transforms = { + 'train': transforms.Compose([ + transforms.RandomResizedCrop(input_size), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + 'test': transforms.Compose([ + transforms.Resize(input_size), + transforms.CenterCrop(input_size), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + } + + fullset = datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms['train']) + test_set = datasets.ImageFolder(os.path.join(data_dir, 'test'), data_transforms['test']) + if(feature=="classimb"): + train_set, val_set, lake_set, imb_cls_idx = create_class_imb_bio(dset_name, fullset, split_cfg, num_cls, augVal) + print("Breast-Cancer Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + elif(feature=="longtail"): + train_set, val_set, lake_set, imb_cls_idx = create_longtail(dset_name, fullset, split_cfg, num_cls, augVal) + print("Breast-Cancer Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + + + + if(dset_name=="breast_density"): + num_cls=4 + data_dir = datadir + input_size=224 + data_transforms = { + 'train': transforms.Compose([ + transforms.RandomResizedCrop(input_size), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + 'test': transforms.Compose([ + transforms.Resize(input_size), + transforms.CenterCrop(input_size), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + } + + fullset = datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms['train']) + test_set = datasets.ImageFolder(os.path.join(data_dir, 'test'), data_transforms['test']) + if(feature=="classimb"): + train_set, val_set, lake_set, imb_cls_idx = create_class_imb_bio(dset_name, fullset, split_cfg, num_cls, augVal) + print("Breast-density Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + elif(feature=="longtail"): + train_set, val_set, lake_set, imb_cls_idx = create_longtail(dset_name, fullset, split_cfg, num_cls, augVal) + print("Breast-density Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set)) + return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls + \ No newline at end of file diff --git a/trust/utils/.ipynb_checkpoints/dermamnist-checkpoint.py b/trust/utils/.ipynb_checkpoints/dermamnist-checkpoint.py new file mode 100644 index 0000000..3ae147d --- /dev/null +++ b/trust/utils/.ipynb_checkpoints/dermamnist-checkpoint.py @@ -0,0 +1,204 @@ +import numpy as np +import os +import torch +import torchvision +from sklearn import datasets +from torchvision import datasets, transforms +import PIL.Image as Image +from .utils import * +np.random.seed(42) +torch.manual_seed(42) +from torchvision.datasets import cifar +from torch.utils.data import Dataset, Subset, ConcatDataset, DataLoader + +class DermaDataset(Dataset): + def __init__(self, data, root='/mnt/data2/akshit/data/', transform=None): + self.root = root + self.transform = transform + self.images = data['images'] + self.targets = data['labels'].flatten() + + + def __len__(self): + return len(self.targets) + + def __getitem__(self, idx): + img = Image.fromarray(np.uint8(self.images[idx])).convert('RGB') + if self.transform: + img = self.transform(img) + label = self.targets[idx] + return img, label + + +def getOODtargets(targets, sel_cls_idx, ood_cls_id): + + ood_targets = [] + targets_list = list(targets) + for i in range(len(targets_list)): + if(targets_list[i] in list(sel_cls_idx)): + ood_targets.append(targets_list[i]) + else: + ood_targets.append(ood_cls_id) + print("num ood samples: ", ood_targets.count(ood_cls_id)) + return torch.Tensor(ood_targets) + +def create_ood_data(fullset, testset, split_cfg, num_cls, augVal): + + np.random.seed(42) + train_idx = [] + val_idx = [] + lake_idx = [] + test_idx = [] + selected_classes = np.array(list(range(split_cfg['num_cls_idc']))) + for i in range(num_cls): #all_classes + full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy()) + if(i in selected_classes): + test_idx_class = list(torch.where(torch.Tensor(testset.targets) == i)[0].cpu().numpy()) + test_idx += test_idx_class + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_idc_train'], replace=True)) + train_idx += class_train_idx + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_val'], replace=True)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_lake'], replace=True)) + else: + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_ood_train'], replace=False)) #always 0 + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_ood_val'], replace=False)) #Only for CG ood val has samples + remain_idx = list(set(remain_idx) - set(class_val_idx)) + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_ood_lake'], replace=False)) #many ood samples in lake + + if(augVal and (i in selected_classes)): #augment with samples only from the imbalanced classes + train_idx += class_val_idx + val_idx += class_val_idx + lake_idx += class_lake_idx + + train_set = SubsetWithTargets(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx]) + val_set = SubsetWithTargets(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx]) + lake_set = SubsetWithTargets(fullset, lake_idx, getOODtargets(torch.Tensor(fullset.targets)[lake_idx], selected_classes, split_cfg['num_cls_idc'])) + test_set = SubsetWithTargets(testset, test_idx, torch.Tensor(testset.targets)[test_idx]) + + return train_set, val_set, test_set, lake_set, selected_classes + +############ +# OOD TYPE 1 +############ + +def load_dataset_custom_1(datadir, feature, split_cfg, augVal=False, dataAug=True): + + num_cls = 8 + path = '/mnt/data2/akshit/' + download_path = '/mnt/data2/akshit/data/cifar10' + train_data = np.load(f'{path}data/dermamnist/dm_train.npz', allow_pickle=True) + val_data = np.load(f'{path}data/dermamnist/dm_val.npz', allow_pickle=True) + test_data = np.load(f'{path}data/dermamnist/dm_test_balanced.npz', allow_pickle=True) + ptrain={ + 'images': np.concatenate((train_data['images'],val_data['images'])), + 'labels': np.concatenate((train_data['labels'],val_data['labels'])) + } + + # Define the number of classes in our modified CIFAR10, which is 6. We also define our ID classes + cifar_training_transform = transforms.Compose([transforms.RandomCrop(28), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) + cifar_test_transform = transforms.Compose([transforms.Resize(28), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) + cifar_label_transform = lambda x: 7 + + # Get the dataset objects from PyTorch. Here, CIFAR10 is downloaded, and the transform is applied when points + # are retrieved. + cifar10_full_train = cifar.CIFAR10(download_path, train=True, download=False, transform=cifar_training_transform, target_transform=cifar_label_transform) + cifar10_test = cifar.CIFAR10(download_path, train=False, download=False, transform=cifar_test_transform, target_transform=cifar_label_transform) + + + derma_full_train = DermaDataset(data=ptrain, transform=cifar_training_transform) + derma_test = DermaDataset(data=test_data, transform=cifar_test_transform) + + fullset = ConcatDataset([derma_full_train, cifar10_full_train]) + fullset.targets = np.append(derma_full_train.targets, [7 for i in range(50000)]) + test_set = derma_test + + if(feature=="ood"): + train_set, val_set, test_set, lake_set, ood_cls_idx = create_ood_data(fullset, test_set, split_cfg, num_cls, augVal) + print("Custom dataset stats: Train size: ", len(train_set), "Val size: ", len(val_set), "Lake size: ", len(lake_set), "Test set: ", len(test_set)) + return train_set, val_set, test_set, lake_set, ood_cls_idx, split_cfg['num_cls_idc'] + +############# +## Taking examples from different classes instead of oversampling +############# +def create_ood_data_2(fullset, testset, split_cfg, num_cls, augVal): + + np.random.seed(42) + train_idx = [] + val_idx = [] + lake_idx = [] + test_idx = [] + selected_classes = np.array(list(range(split_cfg['num_cls_idc']))) + for i in range(num_cls): #all_classes + full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy()) + if(i in selected_classes): + test_idx_class = list(torch.where(torch.Tensor(testset.targets) == i)[0].cpu().numpy()) + test_idx += test_idx_class + class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_idc_train'], replace=False)) + train_idx += class_train_idx + remain_idx = list(set(full_idx_class) - set(class_train_idx)) + + class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_val'], replace=False)) + remain_idx = list(set(remain_idx) - set(class_val_idx)) + ## Taking examples from different classes instead of oversampling + if len(remain_idx)>=split_cfg['per_idc_lake']: + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_lake'], replace=False)) + elif len(remain_idx)=split_cfg['per_idc_lake']: + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_lake'], replace=False)) + elif len(remain_idx)=split_cfg['per_idc_lake']: + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_lake'], replace=False)) + elif len(remain_idx)=split_cfg['per_idc_lake']: + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_lake'], replace=False)) + elif len(remain_idx)=split_cfg['per_idc_lake']: + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_lake'], replace=False)) + elif len(remain_idx)=split_cfg['per_idc_lake']: + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_lake'], replace=False)) + elif len(remain_idx)=split_cfg['per_idc_lake']: + class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_idc_lake'], replace=False)) + elif len(remain_idx) Date: Tue, 17 May 2022 13:18:22 +0530 Subject: [PATCH 2/2] Tutorials added --- tutorials/trust_ood_demo_dermamnist.ipynb | 2466 +++++++++++++++++++++ tutorials/trust_ood_demo_organmnist.ipynb | 1796 +++++++++++++++ 2 files changed, 4262 insertions(+) create mode 100644 tutorials/trust_ood_demo_dermamnist.ipynb create mode 100644 tutorials/trust_ood_demo_organmnist.ipynb diff --git a/tutorials/trust_ood_demo_dermamnist.ipynb b/tutorials/trust_ood_demo_dermamnist.ipynb new file mode 100644 index 0000000..e99edb9 --- /dev/null +++ b/tutorials/trust_ood_demo_dermamnist.ipynb @@ -0,0 +1,2466 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Avoiding Unrelated Out-of-Distribution Images " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Importing Libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import h5py\n", + "import time\n", + "import random\n", + "import datetime\n", + "import copy\n", + "import numpy as np\n", + "import os\n", + "import csv\n", + "import json\n", + "import subprocess\n", + "import sys\n", + "import PIL.Image as Image\n", + "import torch\n", + "import torch.backends.cudnn as cudnn\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import torch.optim as optim\n", + "import torchvision\n", + "import torchvision.models as models\n", + "from torchvision.datasets import cifar\n", + "from matplotlib import pyplot as plt\n", + "sys.path.append('/mnt/data2/akshit/distil/')\n", + "sys.path.append('/mnt/data2/akshit/trust/')\n", + "from distil.utils.models.resnet import ResNet18\n", + "from trust.utils.dermamnist import load_dataset_custom_2 as load_dataset_custom\n", + "from torch.utils.data import Dataset, Subset, ConcatDataset, DataLoader\n", + "from torch.utils.data import Subset\n", + "from torch.autograd import Variable\n", + "import tqdm\n", + "from math import floor\n", + "from sklearn.metrics.pairwise import cosine_similarity, pairwise_distances\n", + "from distil.active_learning_strategies.scmi import SCMI\n", + "from distil.active_learning_strategies.smi import SMI\n", + "from distil.active_learning_strategies.badge import BADGE\n", + "from distil.active_learning_strategies.entropy_sampling import EntropySampling\n", + "from distil.active_learning_strategies.gradmatch_active import GradMatchActive\n", + "from distil.active_learning_strategies.glister import GLISTER\n", + "from trust.strategies.random_sampling import RandomSampling\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Defining Parameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "device_id = 0\n", + "magnification = 1\n", + "device = \"cuda:\"+str(device_id) if torch.cuda.is_available() else \"cpu\"\n", + "datkbuildPath = \"./datk/build\"\n", + "exePath = \"cifarSubsetSelector\"\n", + "print(\"Using Device:\", device)\n", + "doublePrecision = True\n", + "linearLayer = True\n", + "miscls = True\n", + "# handler = DataHandler_CIFAR10\n", + "augTarget = True\n", + "embedding_type = \"gradients\"\n", + "\n", + "num_cls=7 \n", + "budget=70\n", + "num_epochs = int(10)\n", + "split_cfg = {'num_cls_idc':7, 'per_idc_train':15, 'per_idc_val':2, 'per_idc_lake':180, 'per_ood_train':0, 'per_ood_val':0, 'per_ood_lake':5000}\n", + "\n", + "initModelPath = \"/mnt/data2/akshit/Derma/weights/\" + data_name + \"_\" + feature + \"_\" + model_name + \"_\" + str(learning_rate) + \"_\" + str(split_cfg[\"per_idc_train\"]) + \"_\" + str(split_cfg[\"per_idc_val\"]) + \"_\" + str(split_cfg[\"num_cls_idc\"])\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Helper Functions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def model_eval_loss(data_loader, model, criterion):\n", + " total_loss = 0\n", + " with torch.no_grad():\n", + " for batch_idx, (inputs, targets) in enumerate(data_loader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " total_loss += loss.item()\n", + " return total_loss\n", + "\n", + "def init_weights(m):\n", + "# torch.manual_seed(35)\n", + " if isinstance(m, nn.Conv2d):\n", + " torch.nn.init.xavier_uniform_(m.weight)\n", + " elif isinstance(m, nn.Linear):\n", + " torch.nn.init.xavier_uniform_(m.weight)\n", + " m.bias.data.fill_(0.01)\n", + "\n", + "def weight_reset(m):\n", + " if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n", + " m.reset_parameters()\n", + " \n", + "def create_model(name, num_cls, device, embedding_type):\n", + " if name == 'ResNet18':\n", + " if embedding_type == \"gradients\":\n", + " model = ResNet18(num_cls)\n", + " else:\n", + " model = models.resnet18()\n", + " elif name == 'MnistNet':\n", + " model = MnistNet()\n", + " elif name == 'ResNet164':\n", + " model = ResNet164(num_cls)\n", + " model.apply(init_weights)\n", + " model = model.to(device)\n", + " return model\n", + "\n", + "def loss_function():\n", + " criterion = nn.CrossEntropyLoss()\n", + " criterion_nored = nn.CrossEntropyLoss(reduction='none')\n", + " return criterion, criterion_nored\n", + "\n", + "def optimizer_with_scheduler(model, num_epochs, learning_rate, m=0.9, wd=5e-4):\n", + " optimizer = optim.SGD(model.parameters(), lr=learning_rate,\n", + " momentum=m, weight_decay=wd)\n", + " scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)\n", + " return optimizer, scheduler\n", + "\n", + "def optimizer_without_scheduler(model, learning_rate, m=0.9, wd=5e-4):\n", + "# optimizer = optim.Adam(model.parameters(),weight_decay=wd)\n", + " optimizer = optim.SGD(model.parameters(), lr=learning_rate,\n", + " momentum=m, weight_decay=wd)\n", + " return optimizer\n", + "\n", + "def generate_cumulative_timing(mod_timing):\n", + " tmp = 0\n", + " mod_cum_timing = np.zeros(len(mod_timing))\n", + " for i in range(len(mod_timing)):\n", + " tmp += mod_timing[i]\n", + " mod_cum_timing[i] = tmp\n", + " return mod_cum_timing/3600\n", + "\n", + "def find_err_per_class(test_set, val_set, final_val_classifications, final_val_predictions, final_tst_classifications, \n", + " final_tst_predictions, saveDir, prefix):\n", + " #find queries from the validation set that are erroneous\n", + "# saveDir = os.path.join(saveDir, prefix)\n", + "# if(not(os.path.exists(saveDir))):\n", + "# os.mkdir(saveDir)\n", + " val_err_idx = list(np.where(np.array(final_val_classifications) == False)[0])\n", + " tst_err_idx = list(np.where(np.array(final_tst_classifications) == False)[0])\n", + " val_class_err_idxs = []\n", + " tst_err_log = []\n", + " val_err_log = []\n", + " for i in range(num_cls):\n", + " if(feature==\"ood\"): tst_class_idxs = list(torch.where(torch.Tensor(test_set.targets.float()) == i)[0].cpu().numpy())\n", + " if(feature==\"classimb\"): tst_class_idxs = list(torch.where(torch.Tensor(test_set.targets) == i)[0].cpu().numpy())\n", + " val_class_idxs = list(torch.where(torch.Tensor(val_set.targets.float()) == i)[0].cpu().numpy())\n", + " #err classifications per class\n", + " val_err_class_idx = set(val_err_idx).intersection(set(val_class_idxs))\n", + " tst_err_class_idx = set(tst_err_idx).intersection(set(tst_class_idxs))\n", + " if(len(val_class_idxs)>0):\n", + " val_error_perc = round((len(val_err_class_idx)/len(val_class_idxs))*100,2)\n", + " else:\n", + " val_error_perc = 0\n", + " \n", + " tst_error_perc = round((len(tst_err_class_idx)/len(tst_class_idxs))*100,2)\n", + " print(\"val, test error% for class \", i, \" : \", val_error_perc, tst_error_perc)\n", + " val_class_err_idxs.append(val_err_class_idx)\n", + " tst_err_log.append(tst_error_perc)\n", + " val_err_log.append(val_error_perc)\n", + " tst_err_log.append(sum(tst_err_log)/len(tst_err_log))\n", + " val_err_log.append(sum(val_err_log)/len(val_err_log))\n", + " return tst_err_log, val_err_log, val_class_err_idxs\n", + "\n", + "def aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget, augrandom=False):\n", + " all_lake_idx = list(range(len(lake_set)))\n", + " if(not(len(subset)==budget) and augrandom):\n", + " print(\"Budget not filled, adding \", str(int(budget) - len(subset)), \" randomly.\")\n", + " remain_budget = int(budget) - len(subset)\n", + " remain_lake_idx = list(set(all_lake_idx) - set(subset))\n", + " random_subset_idx = list(np.random.choice(np.array(remain_lake_idx), size=int(remain_budget), replace=False))\n", + " subset += random_subset_idx\n", + " lake_ss = SubsetWithTargets(true_lake_set, subset, torch.Tensor(true_lake_set.targets.float())[subset])\n", + " if(feature==\"ood\"): \n", + " ood_lake_idx = list(set(lake_subset_idxs)-set(subset))\n", + " private_set = SubsetWithTargets(true_lake_set, ood_lake_idx, torch.Tensor(np.array([split_cfg['num_cls_idc']]*len(ood_lake_idx))).float())\n", + " remain_lake_idx = list(set(all_lake_idx) - set(lake_subset_idxs))\n", + " remain_lake_set = SubsetWithTargets(lake_set, remain_lake_idx, torch.Tensor(lake_set.targets.float())[remain_lake_idx])\n", + " remain_true_lake_set = SubsetWithTargets(true_lake_set, remain_lake_idx, torch.Tensor(true_lake_set.targets.float())[remain_lake_idx])\n", + " print(len(lake_ss),len(remain_lake_set),len(lake_set))\n", + " if(feature!=\"ood\"): assert((len(lake_ss)+len(remain_lake_set))==len(lake_set))\n", + " aug_train_set = torch.utils.data.ConcatDataset([train_set, lake_ss])\n", + " if(feature==\"ood\"): \n", + " return aug_train_set, remain_lake_set, remain_true_lake_set, private_set, lake_ss\n", + " else:\n", + " return aug_train_set, remain_lake_set, remain_true_lake_set, lake_ss\n", + " \n", + "def getQuerySet(val_set, val_class_err_idxs, imb_cls_idx, miscls):\n", + " miscls_idx = []\n", + " if(miscls):\n", + " for i in range(len(val_class_err_idxs)):\n", + " if i in imb_cls_idx:\n", + " miscls_idx += val_class_err_idxs[i]\n", + " print(\"total misclassified ex from imb classes: \", len(miscls_idx))\n", + " else:\n", + " for i in imb_cls_idx:\n", + " imb_cls_samples = list(torch.where(torch.Tensor(val_set.targets.float()) == i)[0].cpu().numpy())\n", + " miscls_idx += imb_cls_samples\n", + " print(\"total samples from imb classes as targets: \", len(miscls_idx))\n", + " return Subset(val_set, miscls_idx)\n", + "\n", + "def getPrivateSet(lake_set, subset, private_set):\n", + " #augment prev private set and current subset\n", + " new_private_set = SubsetWithTargets(lake_set, subset, torch.Tensor(lake_set.targets.float())[subset])\n", + "# new_private_set = Subset(lake_set, subset)\n", + " total_private_set = torch.utils.data.ConcatDataset([private_set, new_private_set])\n", + " return total_private_set\n", + "\n", + "def remove_ood_points(lake_set, subset, idc_idx):\n", + " idx_subset = []\n", + " subset_cls = torch.Tensor(lake_set.targets.float())[subset]\n", + " for i in idc_idx:\n", + " idc_subset_idx = list(torch.where(subset_cls == i)[0].cpu().numpy())\n", + " idx_subset += list(np.array(subset)[idc_subset_idx])\n", + " print(len(idx_subset),\"/\",len(subset), \" idc points.\")\n", + " return idx_subset\n", + "\n", + "def getPerClassSel(lake_set, subset, num_cls):\n", + " perClsSel = []\n", + " subset_cls = torch.Tensor(lake_set.targets.float())[subset]\n", + " for i in range(num_cls):\n", + " cls_subset_idx = list(torch.where(subset_cls == i)[0].cpu().numpy())\n", + " perClsSel.append(len(cls_subset_idx))\n", + " return perClsSel\n", + "\n", + "\n", + "def train_model_al(datkbuildPath, exePath, num_epochs, dataset_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run,\n", + " device, computeErrorLog, strategy=\"SIM\", sf=\"\"):\n", + "# torch.manual_seed(42)\n", + "# np.random.seed(42)\n", + " print(strategy, sf)\n", + " #load the dataset based on type of feature\n", + " train_set, val_set, test_set, lake_set, sel_cls_idx, num_cls = load_dataset_custom(datadir, feature, split_cfg, False, True)\n", + " print(\"selected classes are: \", sel_cls_idx)\n", + "\n", + " if(feature==\"ood\"): num_cls+=1 #Add one class for OOD class\n", + " N = len(train_set)\n", + " trn_batch_size = 20\n", + " val_batch_size = 10\n", + " tst_batch_size = 100\n", + "\n", + " trainloader = torch.utils.data.DataLoader(train_set, batch_size=trn_batch_size,\n", + " shuffle=True, pin_memory=True)\n", + "\n", + " valloader = torch.utils.data.DataLoader(val_set, batch_size=val_batch_size, \n", + " shuffle=False, pin_memory=True)\n", + "\n", + " tstloader = torch.utils.data.DataLoader(test_set, batch_size=tst_batch_size,\n", + " shuffle=False, pin_memory=True)\n", + " \n", + " lakeloader = torch.utils.data.DataLoader(lake_set, batch_size=tst_batch_size,\n", + " shuffle=False, pin_memory=True)\n", + " true_lake_set = copy.deepcopy(lake_set)\n", + " # Budget for subset selection\n", + " bud = budget\n", + " \n", + " # Variables to store accuracies\n", + " fulltrn_losses = np.zeros(num_epochs)\n", + " val_losses = np.zeros(num_epochs)\n", + " tst_losses = np.zeros(num_epochs)\n", + " timing = np.zeros(num_epochs)\n", + " val_acc = np.zeros(num_epochs)\n", + " full_trn_acc = np.zeros(num_epochs)\n", + " tst_acc = np.zeros(num_epochs)\n", + " final_tst_predictions = []\n", + " final_tst_classifications = []\n", + " best_val_acc = -1\n", + " csvlog = []\n", + " val_csvlog = []\n", + " # Results logging file\n", + " print_every = 3\n", + " all_logs_dir = './SMI_active_learning_results/' + dataset_name + '/' + feature + '/'+ sf + '/' + str(bud) + '/' + str(run)\n", + " print(\"Saving results to: \", all_logs_dir)\n", + " subprocess.run([\"mkdir\", \"-p\", all_logs_dir])\n", + " exp_name = dataset_name + \"_\" + feature + \"_\" + strategy + \"_\" + str(len(sel_cls_idx)) +\"_\" + sf + '_budget:' + str(bud) + '_epochs:' + str(num_epochs) + '_linear:' + str(linearLayer) + '_runs' + str(run)\n", + " print(exp_name)\n", + " res_dict = {\"dataset\":data_name, \n", + " \"feature\":feature, \n", + " \"sel_func\":sf,\n", + " \"sel_budget\":budget, \n", + " \"num_selections\":num_epochs, \n", + " \"model\":model_name, \n", + " \"learning_rate\":learning_rate, \n", + " \"setting\":split_cfg, \n", + " \"all_class_acc\":None, \n", + " \"test_acc\":[],\n", + " \"sel_per_cls\":[], \n", + " \"sel_cls_idx\":sel_cls_idx.tolist()}\n", + " # Model Creation\n", + " model = create_model(model_name, num_cls, device, embedding_type)\n", + " model1 = create_model(model_name, num_cls, device, embedding_type)\n", + " \n", + " # Loss Functions\n", + " criterion, criterion_nored = loss_function()\n", + " \n", + " strategy_args = {'batch_size': 20, 'device':'cuda', 'num_partitions':1, 'wrapped_strategy_class': None, \n", + " 'embedding_type':'gradients', 'keep_embedding':False, 'budget':'budget'}\n", + " unlabeled_lake_set = LabeledToUnlabeledDataset(lake_set)\n", + " if(strategy == \"AL\"):\n", + " if(sf==\"badge\"):\n", + " strategy_sel = BADGE(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " elif(sf==\"us\"):\n", + " strategy_sel = EntropySampling(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " elif(sf==\"glister\" or sf==\"glister-tss\"):\n", + " strategy_sel = GLISTER(train_set, unlabeled_lake_set, model, num_cls, strategy_args, val_set, typeOf='rand', lam=0.1)\n", + " elif(sf==\"gradmatch-tss\"):\n", + " strategy_sel = GradMatchActive(train_set, unlabeled_lake_set, model, num_cls, strategy_args, val_set)\n", + " elif(sf==\"coreset\"):\n", + " strategy_sel = CoreSet(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " elif(sf==\"leastconf\"):\n", + " strategy_sel = LeastConfidence(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " elif(sf==\"margin\"):\n", + " strategy_sel = MarginSampling(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " if(strategy == \"SIM\"):\n", + " if(sf.endswith(\"mic\")):\n", + " strategy_args['scmi_function'] = sf.split(\"mic\")[0] + \"cmi\"\n", + " strategy_sel = SCMI(train_set, unlabeled_lake_set, val_set, val_set, model, num_cls, strategy_args)\n", + " elif(sf.endswith(\"mi\")):\n", + " strategy_args['smi_function'] = sf\n", + " strategy_sel = SMI(train_set, unlabeled_lake_set, val_set, model, num_cls, strategy_args)\n", + " if(strategy == \"random\"):\n", + " strategy_sel = RandomSampling(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " \n", + " strategy_args['verbose'] = False\n", + " strategy_args['optimizer'] = \"LazyGreedy\"\n", + "\n", + " # Getting the optimizer and scheduler\n", + "# optimizer, scheduler = optimizer_with_scheduler(model, num_epochs, learning_rate)\n", + " optimizer = optimizer_without_scheduler(model, learning_rate)\n", + " private_set = []\n", + "\n", + " for i in range(num_epochs):\n", + " print(\"AL epoch: \", i)\n", + " tst_loss = 0\n", + " tst_correct = 0\n", + " tst_total = 0\n", + " val_loss = 0\n", + " val_correct = 0\n", + " val_total = 0\n", + " \n", + " if(i==0):\n", + " print(\"initial training epoch\")\n", + " if(os.path.exists(initModelPath)):\n", + " model.load_state_dict(torch.load(initModelPath, map_location=device))\n", + " print(\"Init model loaded from disk, skipping init training: \", initModelPath)\n", + " model.eval()\n", + " with torch.no_grad():\n", + " final_val_predictions = []\n", + " final_val_classifications = []\n", + " for batch_idx, (inputs, targets) in enumerate(valloader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " val_loss += loss.item()\n", + " if(feature==\"ood\"): \n", + " _, predicted = outputs[...,:-1].max(1)\n", + " else:\n", + " _, predicted = outputs.max(1)\n", + " val_total += targets.size(0)\n", + " val_correct += predicted.eq(targets).sum().item()\n", + " final_val_predictions += list(predicted.cpu().numpy())\n", + " final_val_classifications += list(predicted.eq(targets).cpu().numpy())\n", + " \n", + " final_tst_predictions = []\n", + " final_tst_classifications = []\n", + " for batch_idx, (inputs, targets) in enumerate(tstloader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " tst_loss += loss.item()\n", + " if(feature==\"ood\"): \n", + " _, predicted = outputs[...,:-1].max(1)\n", + " else:\n", + " _, predicted = outputs.max(1)\n", + " tst_total += targets.size(0)\n", + " tst_correct += predicted.eq(targets).sum().item()\n", + " final_tst_predictions += list(predicted.cpu().numpy())\n", + " final_tst_classifications += list(predicted.eq(targets).cpu().numpy()) \n", + " best_val_acc = (val_correct/val_total)\n", + " val_acc[i] = val_correct / val_total\n", + " tst_acc[i] = tst_correct / tst_total\n", + " val_losses[i] = val_loss\n", + " tst_losses[i] = tst_loss\n", + " res_dict[\"test_acc\"].append(tst_acc[i])\n", + " continue\n", + " else:\n", + " unlabeled_lake_set = LabeledToUnlabeledDataset(lake_set)\n", + " strategy_sel.update_data(train_set, unlabeled_lake_set)\n", + " #compute the error log before every selection\n", + " if(computeErrorLog):\n", + " tst_err_log, val_err_log, val_class_err_idxs = find_err_per_class(test_set, val_set, final_val_classifications, final_val_predictions, final_tst_classifications, final_tst_predictions, all_logs_dir, sf+\"_\"+str(bud))\n", + " csvlog.append(tst_err_log)\n", + " val_csvlog.append(val_err_log)\n", + " ####SIM####\n", + " if(strategy==\"SIM\" or strategy==\"SF\"):\n", + " if(sf.endswith(\"mi\")):\n", + " if(feature==\"classimb\"):\n", + " #make a dataloader for the misclassifications - only for experiments with targets\n", + " miscls_set = getQuerySet(val_set, val_class_err_idxs, sel_cls_idx, miscls)\n", + " strategy_sel.update_queries(miscls_set)\n", + " elif(sf.endswith(\"mic\")): #configured for the OOD setting\n", + " print(\"val set targets: \", val_set.targets)\n", + " strategy_sel.update_queries(val_set) #In-dist samples are in Val \n", + " if(len(private_set)!=0):\n", + " print(\"private set targets: \", private_set.targets)\n", + " strategy_sel.update_privates(private_set)\n", + "\n", + " ###AL###\n", + " elif(strategy==\"AL\"):\n", + " if(sf==\"glister-tss\" or sf==\"gradmatch-tss\"):\n", + " miscls_set = getQuerySet(val_set, val_class_err_idxs, sel_cls_idx, miscls)\n", + " strategy_sel.update_queries(miscls_set)\n", + " print(\"reinit AL with targeted miscls samples\")\n", + " \n", + " elif(strategy==\"random\"):\n", + " subset = np.random.choice(np.array(list(range(len(lake_set)))), size=budget, replace=False)\n", + " \n", + " strategy_sel.update_model(model)\n", + " subset = strategy_sel.select(budget)\n", + "# print(\"True targets of subset: \", torch.Tensor(true_lake_set.targets.float())[subset])\n", + "# hypothesized_targets = strategy_sel.predict(unlabeled_lake_set)\n", + "# print(\"Hypothesized targets of subset: \", hypothesized_targets)\n", + " lake_subset_idxs = subset #indices wrt to lake that need to be removed from the lake\n", + " if(feature==\"ood\"): #remove ood points from the subset\n", + " subset = remove_ood_points(true_lake_set, subset, sel_cls_idx)\n", + " \n", + " print(\"selEpoch: %d, Selection Ended at:\" % (i), str(datetime.datetime.now()))\n", + " perClsSel = getPerClassSel(true_lake_set, lake_subset_idxs, num_cls)\n", + " res_dict['sel_per_cls'].append(perClsSel)\n", + " \n", + " #augment the train_set with selected indices from the lake\n", + " if(feature==\"classimb\"):\n", + " train_set, lake_set, true_lake_set, add_val_set = aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget, True) #aug train with random if budget is not filled\n", + " if(augTarget): val_set = ConcatWithTargets(val_set, add_val_set)\n", + " elif(feature==\"ood\"):\n", + " train_set, lake_set, true_lake_set, new_private_set, add_val_set = aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget)\n", + " train_set = torch.utils.data.ConcatDataset([train_set, new_private_set]) #Add the OOD samples with a common OOD class\n", + " val_set = ConcatWithTargets(val_set, add_val_set)\n", + " if(len(private_set)!=0):\n", + " private_set = ConcatWithTargets(private_set, new_private_set)\n", + " else:\n", + " private_set = new_private_set\n", + " else:\n", + " train_set, lake_set, true_lake_set = aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget)\n", + " print(\"After augmentation, size of train_set: \", len(train_set), \" lake set: \", len(lake_set), \" val set: \", len(val_set))\n", + " \n", + "# Reinit train and lake loaders with new splits and reinit the model\n", + " trainloader = torch.utils.data.DataLoader(train_set, batch_size=trn_batch_size, shuffle=True, pin_memory=True)\n", + " lakeloader = torch.utils.data.DataLoader(lake_set, batch_size=tst_batch_size, shuffle=False, pin_memory=True)\n", + "\n", + " if(augTarget):\n", + " valloader = torch.utils.data.DataLoader(val_set, batch_size=len(val_set), shuffle=False, pin_memory=True)\n", + " model = create_model(model_name, num_cls, device, strategy_args['embedding_type'])\n", + " optimizer = optimizer_without_scheduler(model, learning_rate)\n", + " \n", + " #Start training\n", + " start_time = time.time()\n", + " num_ep=1\n", + " while(full_trn_acc[i]<0.99 and num_ep<300):\n", + " model.train()\n", + " for batch_idx, (inputs, targets) in enumerate(trainloader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " # Variables in Pytorch are differentiable.\n", + " inputs, target = Variable(inputs), Variable(inputs)\n", + " # This will zero out the gradients for this batch.\n", + " optimizer.zero_grad()\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " loss.backward()\n", + " optimizer.step()\n", + "# scheduler.step()\n", + " \n", + " full_trn_loss = 0\n", + " full_trn_correct = 0\n", + " full_trn_total = 0\n", + " model.eval()\n", + " with torch.no_grad():\n", + " for batch_idx, (inputs, targets) in enumerate(trainloader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " full_trn_loss += loss.item()\n", + " _, predicted = outputs.max(1)\n", + " full_trn_total += targets.size(0)\n", + " full_trn_correct += predicted.eq(targets).sum().item()\n", + " full_trn_acc[i] = full_trn_correct / full_trn_total\n", + " print(\"Selection Epoch \", i, \" Training epoch [\" , num_ep, \"]\" , \" Training Acc: \", full_trn_acc[i], end=\"\\r\")\n", + " num_ep+=1\n", + " timing[i] = time.time() - start_time\n", + " with torch.no_grad():\n", + " final_val_predictions = []\n", + " final_val_classifications = []\n", + " for batch_idx, (inputs, targets) in enumerate(valloader): #Compute Val accuracy\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " val_loss += loss.item()\n", + " if(feature==\"ood\"): \n", + " _, predicted = outputs[...,:-1].max(1)\n", + " else:\n", + " _, predicted = outputs.max(1)\n", + " val_total += targets.size(0)\n", + " val_correct += predicted.eq(targets).sum().item()\n", + " final_val_predictions += list(predicted.cpu().numpy())\n", + " final_val_classifications += list(predicted.eq(targets).cpu().numpy())\n", + "\n", + " final_tst_predictions = []\n", + " final_tst_classifications = []\n", + " for batch_idx, (inputs, targets) in enumerate(tstloader): #Compute test accuracy\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " tst_loss += loss.item()\n", + " if(feature==\"ood\"): \n", + " _, predicted = outputs[...,:-1].max(1)\n", + " else:\n", + " _, predicted = outputs.max(1)\n", + " tst_total += targets.size(0)\n", + " tst_correct += predicted.eq(targets).sum().item()\n", + " final_tst_predictions += list(predicted.cpu().numpy())\n", + " final_tst_classifications += list(predicted.eq(targets).cpu().numpy()) \n", + " val_acc[i] = val_correct / val_total\n", + " tst_acc[i] = tst_correct / tst_total\n", + " val_losses[i] = val_loss\n", + " fulltrn_losses[i] = full_trn_loss\n", + " tst_losses[i] = tst_loss\n", + " full_val_acc = list(np.array(val_acc))\n", + " full_timing = list(np.array(timing))\n", + " res_dict[\"test_acc\"].append(tst_acc[i])\n", + " print('Epoch:', i + 1, 'FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time:', full_trn_loss, full_trn_acc[i], val_loss, val_acc[i], tst_loss, tst_acc[i], timing[i])\n", + " if(i==0):\n", + " torch.save(model.state_dict(), initModelPath) #save initial train model if not present\n", + " if(computeErrorLog):\n", + " tst_err_log, val_err_log, val_class_err_idxs = find_err_per_class(test_set, val_set, final_val_classifications, final_val_predictions, final_tst_classifications, final_tst_predictions, all_logs_dir, sf+\"_\"+str(bud))\n", + " csvlog.append(tst_err_log)\n", + " val_csvlog.append(val_err_log)\n", + " print(csvlog)\n", + " res_dict[\"all_class_acc\"] = csvlog\n", + " res_dict[\"all_val_class_acc\"] = val_csvlog\n", + " with open(os.path.join(all_logs_dir, exp_name+\".csv\"), \"w\") as f:\n", + " writer = csv.writer(f)\n", + " writer.writerows(csvlog)\n", + " #save results dir with test acc and per class selections\n", + " with open(os.path.join(all_logs_dir, exp_name+\".json\"), 'w') as fp:\n", + " json.dump(res_dict, fp)\n", + " plt.xlabel('AL epochs')\n", + " plt.ylabel('Test Accuracy')\n", + " plt.plot(tst_acc, label=f'{strategy}-{sf}')\n", + " plt.title('Budget:'+str(budget)+' Trainset:'+ str(split_cfg['per_idc_train']))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### FL2MI" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "SIM fl2mi\n", + "num ood samples: 5000\n", + "Custom dataset stats: Train size: 105 Val size: 14 Lake size: 6071 Test set: 245\n", + "selected classes are: [0 1 2 3 4 5 6]\n", + "Saving results to: ./SMI_active_learning_results/dermamnist/ood/fl2mi/70/fkna_3\n", + "dermamnist_ood_SIM_7_fl2mi_budget:70_epochs:10_linear:True_runsfkna_3\n", + "AL epoch: 0\n", + "initial training epoch\n", + "Init model loaded from disk, skipping init training: /mnt/data2/akshit/Derma/weights/dermamnist_ood_ResNet18_0.01_15_2_7\n", + "AL epoch: 1\n", + "val, test error% for class 0 : 0.0 57.14\n", + "val, test error% for class 1 : 100.0 68.57\n", + "val, test error% for class 2 : 100.0 94.29\n", + "val, test error% for class 3 : 0.0 62.86\n", + "val, test error% for class 4 : 100.0 40.0\n", + "val, test error% for class 5 : 100.0 71.43\n", + "val, test error% for class 6 : 100.0 65.71\n", + "65 / 70 idc points.\n", + "selEpoch: 1, Selection Ended at: 2022-01-30 19:29:13.601104\n", + "65 6001 6071\n", + "After augmentation, size of train_set: 175 lake set: 6001 val set: 79\n", + "Selection Epoch 1 Training epoch [ 1 ] Training Acc: 0.24571428571428572\n", + "Selection Epoch 1 Training epoch [ 2 ] Training Acc: 0.2342857142857143\n", + "Selection Epoch 1 Training epoch [ 3 ] Training Acc: 0.10857142857142857\n", + "Selection Epoch 1 Training epoch [ 4 ] Training Acc: 0.6228571428571429\n", + "Selection Epoch 1 Training epoch [ 5 ] Training Acc: 0.56\n", + "Selection Epoch 1 Training epoch [ 6 ] Training Acc: 0.9085714285714286\n", + "Selection Epoch 1 Training epoch [ 7 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 1 Training epoch [ 8 ] Training Acc: 0.96\n", + "Selection Epoch 1 Training epoch [ 9 ] Training Acc: 0.8742857142857143\n", + "Selection Epoch 1 Training epoch [ 10 ] Training Acc: 0.9314285714285714\n", + "Selection Epoch 1 Training epoch [ 11 ] Training Acc: 0.9885714285714285\n", + "Selection Epoch 1 Training epoch [ 12 ] Training Acc: 0.9314285714285714\n", + "Selection Epoch 1 Training epoch [ 13 ] Training Acc: 0.9828571428571429\n", + "Selection Epoch 1 Training epoch [ 14 ] Training Acc: 0.9828571428571429\n", + "Selection Epoch 1 Training epoch [ 15 ] Training Acc: 0.9885714285714285\n", + "Selection Epoch 1 Training epoch [ 16 ] Training Acc: 0.9942857142857143\n", + "Epoch: 2 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.26401002751663327 0.9942857142857143 0.6622737050056458 0.8860759493670886 9.055487871170044 0.37142857142857144 18.861233234405518\n", + "AL epoch: 2\n", + "val, test error% for class 0 : 4.55 65.71\n", + "val, test error% for class 1 : 15.38 54.29\n", + "val, test error% for class 2 : 7.69 71.43\n", + "val, test error% for class 3 : 11.11 68.57\n", + "val, test error% for class 4 : 22.22 71.43\n", + "val, test error% for class 5 : 0.0 42.86\n", + "val, test error% for class 6 : 66.67 65.71\n", + "69 / 70 idc points.\n", + "selEpoch: 2, Selection Ended at: 2022-01-30 19:30:01.165050\n", + "69 5931 6001\n", + "After augmentation, size of train_set: 245 lake set: 5931 val set: 148\n", + "Selection Epoch 2 Training epoch [ 1 ] Training Acc: 0.1510204081632653\n", + "Selection Epoch 2 Training epoch [ 2 ] Training Acc: 0.23265306122448978\n", + "Selection Epoch 2 Training epoch [ 3 ] Training Acc: 0.3142857142857143\n", + "Selection Epoch 2 Training epoch [ 4 ] Training Acc: 0.363265306122449\n", + "Selection Epoch 2 Training epoch [ 5 ] Training Acc: 0.39591836734693875\n", + "Selection Epoch 2 Training epoch [ 6 ] Training Acc: 0.3346938775510204\n", + "Selection Epoch 2 Training epoch [ 7 ] Training Acc: 0.34285714285714286\n", + "Selection Epoch 2 Training epoch [ 8 ] Training Acc: 0.4775510204081633\n", + "Selection Epoch 2 Training epoch [ 9 ] Training Acc: 0.37551020408163266\n", + "Selection Epoch 2 Training epoch [ 10 ] Training Acc: 0.5102040816326531\n", + "Selection Epoch 2 Training epoch [ 11 ] Training Acc: 0.6408163265306123\n", + "Selection Epoch 2 Training epoch [ 12 ] Training Acc: 0.5673469387755102\n", + "Selection Epoch 2 Training epoch [ 13 ] Training Acc: 0.7306122448979592\n", + "Selection Epoch 2 Training epoch [ 14 ] Training Acc: 0.4816326530612245\n", + "Selection Epoch 2 Training epoch [ 15 ] Training Acc: 0.7346938775510204\n", + "Selection Epoch 2 Training epoch [ 16 ] Training Acc: 0.8530612244897959\n", + "Selection Epoch 2 Training epoch [ 17 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 2 Training epoch [ 18 ] Training Acc: 0.8244897959183674\n", + "Selection Epoch 2 Training epoch [ 19 ] Training Acc: 0.746938775510204\n", + "Selection Epoch 2 Training epoch [ 20 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 2 Training epoch [ 21 ] Training Acc: 0.9061224489795918\n", + "Selection Epoch 2 Training epoch [ 22 ] Training Acc: 0.8571428571428571\n", + "Selection Epoch 2 Training epoch [ 23 ] Training Acc: 0.926530612244898\n", + "Selection Epoch 2 Training epoch [ 24 ] Training Acc: 0.9102040816326531\n", + "Selection Epoch 2 Training epoch [ 25 ] Training Acc: 0.8244897959183674\n", + "Selection Epoch 2 Training epoch [ 26 ] Training Acc: 0.9183673469387755\n", + "Selection Epoch 2 Training epoch [ 27 ] Training Acc: 0.8244897959183674\n", + "Selection Epoch 2 Training epoch [ 28 ] Training Acc: 0.8734693877551021\n", + "Selection Epoch 2 Training epoch [ 29 ] Training Acc: 0.9346938775510204\n", + "Selection Epoch 2 Training epoch [ 30 ] Training Acc: 0.9183673469387755\n", + "Selection Epoch 2 Training epoch [ 31 ] Training Acc: 0.8938775510204081\n", + "Selection Epoch 2 Training epoch [ 32 ] Training Acc: 0.8775510204081632\n", + "Selection Epoch 2 Training epoch [ 33 ] Training Acc: 0.9591836734693877\n", + "Selection Epoch 2 Training epoch [ 34 ] Training Acc: 0.9918367346938776\n", + "Epoch: 3 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.5280177568492945 0.9918367346938776 0.3218691051006317 0.918918918918919 11.65410041809082 0.39183673469387753 56.265161991119385\n", + "AL epoch: 3\n", + "val, test error% for class 0 : 2.78 42.86\n", + "val, test error% for class 1 : 4.35 48.57\n", + "val, test error% for class 2 : 10.53 82.86\n", + "val, test error% for class 3 : 6.25 65.71\n", + "val, test error% for class 4 : 11.76 60.0\n", + "val, test error% for class 5 : 9.68 60.0\n", + "val, test error% for class 6 : 33.33 65.71\n", + "34 / 70 idc points.\n", + "selEpoch: 3, Selection Ended at: 2022-01-30 19:31:25.109276\n", + "34 5861 5931\n", + "After augmentation, size of train_set: 315 lake set: 5861 val set: 182\n", + "Selection Epoch 3 Training epoch [ 1 ] Training Acc: 0.326984126984127\n", + "Selection Epoch 3 Training epoch [ 2 ] Training Acc: 0.4380952380952381\n", + "Selection Epoch 3 Training epoch [ 3 ] Training Acc: 0.4095238095238095\n", + "Selection Epoch 3 Training epoch [ 4 ] Training Acc: 0.43174603174603177\n", + "Selection Epoch 3 Training epoch [ 5 ] Training Acc: 0.5015873015873016\n", + "Selection Epoch 3 Training epoch [ 6 ] Training Acc: 0.5619047619047619\n", + "Selection Epoch 3 Training epoch [ 7 ] Training Acc: 0.6095238095238096\n", + "Selection Epoch 3 Training epoch [ 8 ] Training Acc: 0.5619047619047619\n", + "Selection Epoch 3 Training epoch [ 9 ] Training Acc: 0.6063492063492063\n", + "Selection Epoch 3 Training epoch [ 10 ] Training Acc: 0.6476190476190476\n", + "Selection Epoch 3 Training epoch [ 11 ] Training Acc: 0.6825396825396826\n", + "Selection Epoch 3 Training epoch [ 12 ] Training Acc: 0.6698412698412698\n", + "Selection Epoch 3 Training epoch [ 13 ] Training Acc: 0.8317460317460318\n", + "Selection Epoch 3 Training epoch [ 14 ] Training Acc: 0.8031746031746032\n", + "Selection Epoch 3 Training epoch [ 15 ] Training Acc: 0.9079365079365079\n", + "Selection Epoch 3 Training epoch [ 16 ] Training Acc: 0.8539682539682539\n", + "Selection Epoch 3 Training epoch [ 17 ] Training Acc: 0.8253968253968254\n", + "Selection Epoch 3 Training epoch [ 18 ] Training Acc: 0.8666666666666667\n", + "Selection Epoch 3 Training epoch [ 19 ] Training Acc: 0.9682539682539683\n", + "Selection Epoch 3 Training epoch [ 20 ] Training Acc: 0.9714285714285714\n", + "Selection Epoch 3 Training epoch [ 21 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 3 Training epoch [ 22 ] Training Acc: 0.7936507936507936\n", + "Selection Epoch 3 Training epoch [ 23 ] Training Acc: 0.9174603174603174\n", + "Selection Epoch 3 Training epoch [ 24 ] Training Acc: 0.9841269841269841\n", + "Selection Epoch 3 Training epoch [ 25 ] Training Acc: 0.9777777777777777\n", + "Selection Epoch 3 Training epoch [ 26 ] Training Acc: 0.9492063492063492\n", + "Selection Epoch 3 Training epoch [ 27 ] Training Acc: 0.9682539682539683\n", + "Selection Epoch 3 Training epoch [ 28 ] Training Acc: 0.9777777777777777\n", + "Selection Epoch 3 Training epoch [ 29 ] Training Acc: 0.9714285714285714\n", + "Selection Epoch 3 Training epoch [ 30 ] Training Acc: 0.9555555555555556\n", + "Selection Epoch 3 Training epoch [ 31 ] Training Acc: 0.9936507936507937\n", + "Epoch: 4 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.422930464905221 0.9936507936507937 0.2534492313861847 0.945054945054945 11.837823390960693 0.3673469387755102 66.24585270881653\n", + "AL epoch: 4\n", + "val, test error% for class 0 : 0.0 37.14\n", + "val, test error% for class 1 : 7.69 62.86\n", + "val, test error% for class 2 : 3.12 71.43\n", + "val, test error% for class 3 : 19.05 88.57\n", + "val, test error% for class 4 : 4.76 65.71\n", + "val, test error% for class 5 : 3.12 54.29\n", + "val, test error% for class 6 : 11.11 62.86\n", + "70 / 70 idc points.\n", + "selEpoch: 4, Selection Ended at: 2022-01-30 19:32:59.265434\n", + "70 5791 5861\n", + "After augmentation, size of train_set: 385 lake set: 5791 val set: 252\n", + "Selection Epoch 4 Training epoch [ 1 ] Training Acc: 0.11688311688311688\n", + "Selection Epoch 4 Training epoch [ 2 ] Training Acc: 0.2623376623376623\n", + "Selection Epoch 4 Training epoch [ 3 ] Training Acc: 0.45194805194805193\n", + "Selection Epoch 4 Training epoch [ 4 ] Training Acc: 0.5428571428571428\n", + "Selection Epoch 4 Training epoch [ 5 ] Training Acc: 0.45194805194805193\n", + "Selection Epoch 4 Training epoch [ 6 ] Training Acc: 0.41818181818181815\n", + "Selection Epoch 4 Training epoch [ 7 ] Training Acc: 0.5636363636363636\n", + "Selection Epoch 4 Training epoch [ 8 ] Training Acc: 0.5688311688311688\n", + "Selection Epoch 4 Training epoch [ 9 ] Training Acc: 0.5896103896103896\n", + "Selection Epoch 4 Training epoch [ 10 ] Training Acc: 0.5948051948051948\n", + "Selection Epoch 4 Training epoch [ 11 ] Training Acc: 0.7272727272727273\n", + "Selection Epoch 4 Training epoch [ 12 ] Training Acc: 0.7194805194805195\n", + "Selection Epoch 4 Training epoch [ 13 ] Training Acc: 0.6779220779220779\n", + "Selection Epoch 4 Training epoch [ 14 ] Training Acc: 0.8441558441558441\n", + "Selection Epoch 4 Training epoch [ 15 ] Training Acc: 0.8155844155844156\n", + "Selection Epoch 4 Training epoch [ 16 ] Training Acc: 0.8103896103896104\n", + "Selection Epoch 4 Training epoch [ 17 ] Training Acc: 0.7506493506493507\n", + "Selection Epoch 4 Training epoch [ 18 ] Training Acc: 0.8597402597402597\n", + "Selection Epoch 4 Training epoch [ 19 ] Training Acc: 0.9090909090909091\n", + "Selection Epoch 4 Training epoch [ 20 ] Training Acc: 0.6831168831168831\n", + "Selection Epoch 4 Training epoch [ 21 ] Training Acc: 0.7402597402597403\n", + "Selection Epoch 4 Training epoch [ 22 ] Training Acc: 0.9324675324675324\n", + "Selection Epoch 4 Training epoch [ 23 ] Training Acc: 0.9142857142857143\n", + "Selection Epoch 4 Training epoch [ 24 ] Training Acc: 0.9376623376623376\n", + "Selection Epoch 4 Training epoch [ 25 ] Training Acc: 0.9012987012987013\n", + "Selection Epoch 4 Training epoch [ 26 ] Training Acc: 0.7974025974025974\n", + "Selection Epoch 4 Training epoch [ 27 ] Training Acc: 0.9844155844155844\n", + "Selection Epoch 4 Training epoch [ 28 ] Training Acc: 0.9818181818181818\n", + "Selection Epoch 4 Training epoch [ 29 ] Training Acc: 0.9896103896103896\n", + "Selection Epoch 4 Training epoch [ 30 ] Training Acc: 0.987012987012987\n", + "Selection Epoch 4 Training epoch [ 31 ] Training Acc: 0.9896103896103896\n", + "Selection Epoch 4 Training epoch [ 32 ] Training Acc: 0.9948051948051948\n", + "Epoch: 5 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.363018142816145 0.9948051948051948 0.22979700565338135 0.9603174603174603 9.733516931533813 0.40408163265306124 82.98356890678406\n", + "AL epoch: 5\n", + "val, test error% for class 0 : 1.79 68.57\n", + "val, test error% for class 1 : 5.13 68.57\n", + "val, test error% for class 2 : 4.55 80.0\n", + "val, test error% for class 3 : 0.0 57.14\n", + "val, test error% for class 4 : 7.41 74.29\n", + "val, test error% for class 5 : 0.0 22.86\n", + "val, test error% for class 6 : 23.08 45.71\n", + "69 / 70 idc points.\n", + "selEpoch: 5, Selection Ended at: 2022-01-30 19:34:49.251567\n", + "69 5721 5791\n", + "After augmentation, size of train_set: 455 lake set: 5721 val set: 321\n", + "Selection Epoch 5 Training epoch [ 1 ] Training Acc: 0.27472527472527475\n", + "Selection Epoch 5 Training epoch [ 2 ] Training Acc: 0.4065934065934066\n", + "Selection Epoch 5 Training epoch [ 3 ] Training Acc: 0.389010989010989\n", + "Selection Epoch 5 Training epoch [ 4 ] Training Acc: 0.45274725274725275\n", + "Selection Epoch 5 Training epoch [ 5 ] Training Acc: 0.5208791208791209\n", + "Selection Epoch 5 Training epoch [ 6 ] Training Acc: 0.5164835164835165\n", + "Selection Epoch 5 Training epoch [ 7 ] Training Acc: 0.46813186813186813\n", + "Selection Epoch 5 Training epoch [ 8 ] Training Acc: 0.5406593406593406\n", + "Selection Epoch 5 Training epoch [ 9 ] Training Acc: 0.6395604395604395\n", + "Selection Epoch 5 Training epoch [ 10 ] Training Acc: 0.6373626373626373\n", + "Selection Epoch 5 Training epoch [ 11 ] Training Acc: 0.6593406593406593\n", + "Selection Epoch 5 Training epoch [ 12 ] Training Acc: 0.6417582417582418\n", + "Selection Epoch 5 Training epoch [ 13 ] Training Acc: 0.7582417582417582\n", + "Selection Epoch 5 Training epoch [ 14 ] Training Acc: 0.7538461538461538\n", + "Selection Epoch 5 Training epoch [ 15 ] Training Acc: 0.8483516483516483\n", + "Selection Epoch 5 Training epoch [ 16 ] Training Acc: 0.810989010989011\n", + "Selection Epoch 5 Training epoch [ 17 ] Training Acc: 0.832967032967033\n", + "Selection Epoch 5 Training epoch [ 18 ] Training Acc: 0.8571428571428571\n", + "Selection Epoch 5 Training epoch [ 19 ] Training Acc: 0.810989010989011\n", + "Selection Epoch 5 Training epoch [ 20 ] Training Acc: 0.8637362637362638\n", + "Selection Epoch 5 Training epoch [ 21 ] Training Acc: 0.9274725274725275\n", + "Selection Epoch 5 Training epoch [ 22 ] Training Acc: 0.8901098901098901\n", + "Selection Epoch 5 Training epoch [ 23 ] Training Acc: 0.9626373626373627\n", + "Selection Epoch 5 Training epoch [ 24 ] Training Acc: 0.9186813186813186\n", + "Selection Epoch 5 Training epoch [ 25 ] Training Acc: 0.9230769230769231\n", + "Selection Epoch 5 Training epoch [ 26 ] Training Acc: 0.9164835164835164\n", + "Selection Epoch 5 Training epoch [ 27 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 5 Training epoch [ 28 ] Training Acc: 0.8901098901098901\n", + "Selection Epoch 5 Training epoch [ 29 ] Training Acc: 0.9978021978021978\n", + "Epoch: 6 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.32946652022656053 0.9978021978021978 0.15124733746051788 0.9626168224299065 6.823351621627808 0.47346938775510206 90.22029900550842\n", + "AL epoch: 6\n", + "val, test error% for class 0 : 1.64 48.57\n", + "val, test error% for class 1 : 2.13 51.43\n", + "val, test error% for class 2 : 3.51 65.71\n", + "val, test error% for class 3 : 3.23 65.71\n", + "val, test error% for class 4 : 6.67 60.0\n", + "val, test error% for class 5 : 3.9 40.0\n", + "val, test error% for class 6 : 11.11 37.14\n", + "70 / 70 idc points.\n", + "selEpoch: 6, Selection Ended at: 2022-01-30 19:36:46.328098\n", + "70 5651 5721\n", + "After augmentation, size of train_set: 525 lake set: 5651 val set: 391\n", + "Selection Epoch 6 Training epoch [ 1 ] Training Acc: 0.2571428571428571\n", + "Selection Epoch 6 Training epoch [ 2 ] Training Acc: 0.3238095238095238\n", + "Selection Epoch 6 Training epoch [ 3 ] Training Acc: 0.46285714285714286\n", + "Selection Epoch 6 Training epoch [ 4 ] Training Acc: 0.44761904761904764\n", + "Selection Epoch 6 Training epoch [ 5 ] Training Acc: 0.4990476190476191\n", + "Selection Epoch 6 Training epoch [ 6 ] Training Acc: 0.5733333333333334\n", + "Selection Epoch 6 Training epoch [ 7 ] Training Acc: 0.5657142857142857\n", + "Selection Epoch 6 Training epoch [ 8 ] Training Acc: 0.49523809523809526\n", + "Selection Epoch 6 Training epoch [ 9 ] Training Acc: 0.40190476190476193\n", + "Selection Epoch 6 Training epoch [ 10 ] Training Acc: 0.5542857142857143\n", + "Selection Epoch 6 Training epoch [ 11 ] Training Acc: 0.6666666666666666\n", + "Selection Epoch 6 Training epoch [ 12 ] Training Acc: 0.7142857142857143\n", + "Selection Epoch 6 Training epoch [ 13 ] Training Acc: 0.7714285714285715\n", + "Selection Epoch 6 Training epoch [ 14 ] Training Acc: 0.7314285714285714\n", + "Selection Epoch 6 Training epoch [ 15 ] Training Acc: 0.7638095238095238\n", + "Selection Epoch 6 Training epoch [ 16 ] Training Acc: 0.9352380952380952\n", + "Selection Epoch 6 Training epoch [ 17 ] Training Acc: 0.9295238095238095\n", + "Selection Epoch 6 Training epoch [ 18 ] Training Acc: 0.9276190476190476\n", + "Selection Epoch 6 Training epoch [ 19 ] Training Acc: 0.9561904761904761\n", + "Selection Epoch 6 Training epoch [ 20 ] Training Acc: 0.8628571428571429\n", + "Selection Epoch 6 Training epoch [ 21 ] Training Acc: 0.8609523809523809\n", + "Selection Epoch 6 Training epoch [ 22 ] Training Acc: 0.9180952380952381\n", + "Selection Epoch 6 Training epoch [ 23 ] Training Acc: 0.979047619047619\n", + "Selection Epoch 6 Training epoch [ 24 ] Training Acc: 0.9485714285714286\n", + "Selection Epoch 6 Training epoch [ 25 ] Training Acc: 0.9238095238095239\n", + "Selection Epoch 6 Training epoch [ 26 ] Training Acc: 0.9904761904761905\n", + "Epoch: 7 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.1539905744139105 0.9904761904761905 0.10048390924930573 0.9769820971867008 7.810051679611206 0.46938775510204084 92.48000264167786\n", + "AL epoch: 7\n", + "val, test error% for class 0 : 0.0 42.86\n", + "val, test error% for class 1 : 1.67 51.43\n", + "val, test error% for class 2 : 2.9 68.57\n", + "val, test error% for class 3 : 2.5 65.71\n", + "val, test error% for class 4 : 4.88 62.86\n", + "val, test error% for class 5 : 1.23 28.57\n", + "val, test error% for class 6 : 9.52 51.43\n", + "69 / 70 idc points.\n", + "selEpoch: 7, Selection Ended at: 2022-01-30 19:38:45.388319\n", + "69 5581 5651\n", + "After augmentation, size of train_set: 595 lake set: 5581 val set: 460\n", + "Selection Epoch 7 Training epoch [ 1 ] Training Acc: 0.21512605042016808\n", + "Selection Epoch 7 Training epoch [ 2 ] Training Acc: 0.2739495798319328\n", + "Selection Epoch 7 Training epoch [ 3 ] Training Acc: 0.47058823529411764\n", + "Selection Epoch 7 Training epoch [ 4 ] Training Acc: 0.346218487394958\n", + "Selection Epoch 7 Training epoch [ 5 ] Training Acc: 0.5042016806722689\n", + "Selection Epoch 7 Training epoch [ 6 ] Training Acc: 0.5126050420168067\n", + "Selection Epoch 7 Training epoch [ 7 ] Training Acc: 0.5260504201680672\n", + "Selection Epoch 7 Training epoch [ 8 ] Training Acc: 0.5579831932773109\n", + "Selection Epoch 7 Training epoch [ 9 ] Training Acc: 0.5529411764705883\n", + "Selection Epoch 7 Training epoch [ 10 ] Training Acc: 0.5983193277310924\n", + "Selection Epoch 7 Training epoch [ 11 ] Training Acc: 0.6873949579831933\n", + "Selection Epoch 7 Training epoch [ 12 ] Training Acc: 0.6890756302521008\n", + "Selection Epoch 7 Training epoch [ 13 ] Training Acc: 0.6823529411764706\n", + "Selection Epoch 7 Training epoch [ 14 ] Training Acc: 0.7495798319327731\n", + "Selection Epoch 7 Training epoch [ 15 ] Training Acc: 0.7781512605042017\n", + "Selection Epoch 7 Training epoch [ 16 ] Training Acc: 0.8470588235294118\n", + "Selection Epoch 7 Training epoch [ 17 ] Training Acc: 0.865546218487395\n", + "Selection Epoch 7 Training epoch [ 18 ] Training Acc: 0.8050420168067227\n", + "Selection Epoch 7 Training epoch [ 19 ] Training Acc: 0.8991596638655462\n", + "Selection Epoch 7 Training epoch [ 20 ] Training Acc: 0.9243697478991597\n", + "Selection Epoch 7 Training epoch [ 21 ] Training Acc: 0.9092436974789916\n", + "Selection Epoch 7 Training epoch [ 22 ] Training Acc: 0.8756302521008403\n", + "Selection Epoch 7 Training epoch [ 23 ] Training Acc: 0.8168067226890756\n", + "Selection Epoch 7 Training epoch [ 24 ] Training Acc: 0.8907563025210085\n", + "Selection Epoch 7 Training epoch [ 25 ] Training Acc: 0.9563025210084034\n", + "Selection Epoch 7 Training epoch [ 26 ] Training Acc: 0.9798319327731092\n", + "Selection Epoch 7 Training epoch [ 27 ] Training Acc: 0.9697478991596639\n", + "Selection Epoch 7 Training epoch [ 28 ] Training Acc: 0.9915966386554622\n", + "Epoch: 8 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.8081282736966386 0.9915966386554622 0.0887976735830307 0.9760869565217392 6.270045280456543 0.5714285714285714 113.10239100456238\n", + "AL epoch: 8\n", + "val, test error% for class 0 : 1.05 22.86\n", + "val, test error% for class 1 : 2.82 60.0\n", + "val, test error% for class 2 : 5.06 77.14\n", + "val, test error% for class 3 : 0.0 31.43\n", + "val, test error% for class 4 : 4.0 48.57\n", + "val, test error% for class 5 : 0.0 34.29\n", + "val, test error% for class 6 : 8.0 25.71\n", + "69 / 70 idc points.\n", + "selEpoch: 8, Selection Ended at: 2022-01-30 19:41:04.975484\n", + "69 5511 5581\n", + "After augmentation, size of train_set: 665 lake set: 5511 val set: 529\n", + "Selection Epoch 8 Training epoch [ 1 ] Training Acc: 0.26165413533834586\n", + "Selection Epoch 8 Training epoch [ 2 ] Training Acc: 0.42105263157894735\n", + "Selection Epoch 8 Training epoch [ 3 ] Training Acc: 0.47969924812030074\n", + "Selection Epoch 8 Training epoch [ 4 ] Training Acc: 0.3849624060150376\n", + "Selection Epoch 8 Training epoch [ 5 ] Training Acc: 0.5203007518796993\n", + "Selection Epoch 8 Training epoch [ 6 ] Training Acc: 0.512781954887218\n", + "Selection Epoch 8 Training epoch [ 7 ] Training Acc: 0.5834586466165413\n", + "Selection Epoch 8 Training epoch [ 8 ] Training Acc: 0.5894736842105263\n", + "Selection Epoch 8 Training epoch [ 9 ] Training Acc: 0.6421052631578947\n", + "Selection Epoch 8 Training epoch [ 10 ] Training Acc: 0.6571428571428571\n", + "Selection Epoch 8 Training epoch [ 11 ] Training Acc: 0.7142857142857143\n", + "Selection Epoch 8 Training epoch [ 12 ] Training Acc: 0.6270676691729323\n", + "Selection Epoch 8 Training epoch [ 13 ] Training Acc: 0.7669172932330827\n", + "Selection Epoch 8 Training epoch [ 14 ] Training Acc: 0.5428571428571428\n", + "Selection Epoch 8 Training epoch [ 15 ] Training Acc: 0.849624060150376\n", + "Selection Epoch 8 Training epoch [ 16 ] Training Acc: 0.825563909774436\n", + "Selection Epoch 8 Training epoch [ 17 ] Training Acc: 0.7368421052631579\n", + "Selection Epoch 8 Training epoch [ 18 ] Training Acc: 0.9353383458646617\n", + "Selection Epoch 8 Training epoch [ 19 ] Training Acc: 0.8706766917293233\n", + "Selection Epoch 8 Training epoch [ 20 ] Training Acc: 0.9609022556390977\n", + "Selection Epoch 8 Training epoch [ 21 ] Training Acc: 0.9278195488721804\n", + "Selection Epoch 8 Training epoch [ 22 ] Training Acc: 0.9323308270676691\n", + "Selection Epoch 8 Training epoch [ 23 ] Training Acc: 0.9834586466165414\n", + "Selection Epoch 8 Training epoch [ 24 ] Training Acc: 0.9593984962406015\n", + "Selection Epoch 8 Training epoch [ 25 ] Training Acc: 0.825563909774436\n", + "Selection Epoch 8 Training epoch [ 26 ] Training Acc: 0.9578947368421052\n", + "Selection Epoch 8 Training epoch [ 27 ] Training Acc: 0.9954887218045113\n", + "Epoch: 9 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.8665523931849748 0.9954887218045113 0.07644832879304886 0.9829867674858223 6.26385235786438 0.5591836734693878 121.08070039749146\n", + "AL epoch: 9\n", + "val, test error% for class 0 : 0.88 34.29\n", + "val, test error% for class 1 : 1.19 31.43\n", + "val, test error% for class 2 : 1.19 68.57\n", + "val, test error% for class 3 : 1.79 40.0\n", + "val, test error% for class 4 : 3.33 57.14\n", + "val, test error% for class 5 : 1.96 40.0\n", + "val, test error% for class 6 : 3.33 37.14\n", + "61 / 70 idc points.\n", + "selEpoch: 9, Selection Ended at: 2022-01-30 19:43:32.245282\n", + "61 5441 5511\n", + "After augmentation, size of train_set: 735 lake set: 5441 val set: 590\n", + "Selection Epoch 9 Training epoch [ 1 ] Training Acc: 0.24897959183673468\n", + "Selection Epoch 9 Training epoch [ 2 ] Training Acc: 0.37551020408163266\n", + "Selection Epoch 9 Training epoch [ 3 ] Training Acc: 0.41768707482993195\n", + "Selection Epoch 9 Training epoch [ 4 ] Training Acc: 0.4013605442176871\n", + "Selection Epoch 9 Training epoch [ 5 ] Training Acc: 0.5360544217687074\n", + "Selection Epoch 9 Training epoch [ 6 ] Training Acc: 0.4312925170068027\n", + "Selection Epoch 9 Training epoch [ 7 ] Training Acc: 0.5183673469387755\n", + "Selection Epoch 9 Training epoch [ 8 ] Training Acc: 0.5795918367346938\n", + "Selection Epoch 9 Training epoch [ 9 ] Training Acc: 0.5795918367346938\n", + "Selection Epoch 9 Training epoch [ 10 ] Training Acc: 0.5591836734693878\n", + "Selection Epoch 9 Training epoch [ 11 ] Training Acc: 0.6204081632653061\n", + "Selection Epoch 9 Training epoch [ 12 ] Training Acc: 0.6884353741496598\n", + "Selection Epoch 9 Training epoch [ 13 ] Training Acc: 0.7224489795918367\n", + "Selection Epoch 9 Training epoch [ 14 ] Training Acc: 0.7605442176870748\n", + "Selection Epoch 9 Training epoch [ 15 ] Training Acc: 0.7020408163265306\n", + "Selection Epoch 9 Training epoch [ 16 ] Training Acc: 0.7659863945578231\n", + "Selection Epoch 9 Training epoch [ 17 ] Training Acc: 0.8666666666666667\n", + "Selection Epoch 9 Training epoch [ 18 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 9 Training epoch [ 19 ] Training Acc: 0.8312925170068027\n", + "Selection Epoch 9 Training epoch [ 20 ] Training Acc: 0.8367346938775511\n", + "Selection Epoch 9 Training epoch [ 21 ] Training Acc: 0.9170068027210885\n", + "Selection Epoch 9 Training epoch [ 22 ] Training Acc: 0.9469387755102041\n", + "Selection Epoch 9 Training epoch [ 23 ] Training Acc: 0.926530612244898\n", + "Selection Epoch 9 Training epoch [ 24 ] Training Acc: 0.9455782312925171\n", + "Selection Epoch 9 Training epoch [ 25 ] Training Acc: 0.9292517006802721\n", + "Selection Epoch 9 Training epoch [ 26 ] Training Acc: 0.9455782312925171\n", + "Selection Epoch 9 Training epoch [ 27 ] Training Acc: 0.9863945578231292\n", + "Selection Epoch 9 Training epoch [ 28 ] Training Acc: 0.9972789115646259\n", + "Epoch: 10 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.5676570564974099 0.9972789115646259 0.07341747730970383 0.9830508474576272 6.155284881591797 0.6081632653061224 140.13217616081238\n", + "val, test error% for class 0 : 0.82 42.86\n", + "val, test error% for class 1 : 2.13 37.14\n", + "val, test error% for class 2 : 2.06 65.71\n", + "val, test error% for class 3 : 1.67 37.14\n", + "val, test error% for class 4 : 2.7 28.57\n", + "val, test error% for class 5 : 0.92 28.57\n", + "val, test error% for class 6 : 2.94 34.29\n", + "[[57.14, 68.57, 94.29, 62.86, 40.0, 71.43, 65.71, 65.71428571428571], [65.71, 54.29, 71.43, 68.57, 71.43, 42.86, 65.71, 62.857142857142854], [42.86, 48.57, 82.86, 65.71, 60.0, 60.0, 65.71, 60.815714285714286], [37.14, 62.86, 71.43, 88.57, 65.71, 54.29, 62.86, 63.26571428571429], [68.57, 68.57, 80.0, 57.14, 74.29, 22.86, 45.71, 59.59142857142857], [48.57, 51.43, 65.71, 65.71, 60.0, 40.0, 37.14, 52.65142857142856], [42.86, 51.43, 68.57, 65.71, 62.86, 28.57, 51.43, 53.06142857142857], [22.86, 60.0, 77.14, 31.43, 48.57, 34.29, 25.71, 42.857142857142854], [34.29, 31.43, 68.57, 40.0, 57.14, 40.0, 37.14, 44.08142857142857], [42.86, 37.14, 65.71, 37.14, 28.57, 28.57, 34.29, 39.18285714285714]]\n", + "\n" + ] + } + ], + "source": [ + "train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, \"SIM\",'fl2mi')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### FL1MI" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "SIM fl1mi\n", + "num ood samples: 5000\n", + "Custom dataset stats: Train size: 105 Val size: 14 Lake size: 6071 Test set: 245\n", + "selected classes are: [0 1 2 3 4 5 6]\n", + "Saving results to: ./SMI_active_learning_results/dermamnist/ood/fl1mi/70/fkna_3\n", + "dermamnist_ood_SIM_7_fl1mi_budget:70_epochs:10_linear:True_runsfkna_3\n", + "AL epoch: 0\n", + "initial training epoch\n", + "Init model loaded from disk, skipping init training: /mnt/data2/akshit/Derma/weights/dermamnist_ood_ResNet18_0.01_15_2_7\n", + "AL epoch: 1\n", + "val, test error% for class 0 : 0.0 57.14\n", + "val, test error% for class 1 : 100.0 68.57\n", + "val, test error% for class 2 : 100.0 94.29\n", + "val, test error% for class 3 : 0.0 62.86\n", + "val, test error% for class 4 : 100.0 40.0\n", + "val, test error% for class 5 : 100.0 71.43\n", + "val, test error% for class 6 : 100.0 65.71\n", + "32 / 70 idc points.\n", + "selEpoch: 1, Selection Ended at: 2022-01-30 19:46:42.803918\n", + "32 6001 6071\n", + "After augmentation, size of train_set: 175 lake set: 6001 val set: 46\n", + "Selection Epoch 1 Training epoch [ 1 ] Training Acc: 0.21714285714285714\n", + "Selection Epoch 1 Training epoch [ 2 ] Training Acc: 0.2571428571428571\n", + "Selection Epoch 1 Training epoch [ 3 ] Training Acc: 0.38285714285714284\n", + "Selection Epoch 1 Training epoch [ 4 ] Training Acc: 0.44\n", + "Selection Epoch 1 Training epoch [ 5 ] Training Acc: 0.49714285714285716\n", + "Selection Epoch 1 Training epoch [ 6 ] Training Acc: 0.4514285714285714\n", + "Selection Epoch 1 Training epoch [ 7 ] Training Acc: 0.6342857142857142\n", + "Selection Epoch 1 Training epoch [ 8 ] Training Acc: 0.5885714285714285\n", + "Selection Epoch 1 Training epoch [ 9 ] Training Acc: 0.7257142857142858\n", + "Selection Epoch 1 Training epoch [ 10 ] Training Acc: 0.5028571428571429\n", + "Selection Epoch 1 Training epoch [ 11 ] Training Acc: 0.6857142857142857\n", + "Selection Epoch 1 Training epoch [ 12 ] Training Acc: 0.7028571428571428\n", + "Selection Epoch 1 Training epoch [ 13 ] Training Acc: 0.7542857142857143\n", + "Selection Epoch 1 Training epoch [ 14 ] Training Acc: 0.8057142857142857\n", + "Selection Epoch 1 Training epoch [ 15 ] Training Acc: 0.8742857142857143\n", + "Selection Epoch 1 Training epoch [ 16 ] Training Acc: 0.8742857142857143\n", + "Selection Epoch 1 Training epoch [ 17 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 1 Training epoch [ 18 ] Training Acc: 0.9085714285714286\n", + "Selection Epoch 1 Training epoch [ 19 ] Training Acc: 0.9771428571428571\n", + "Selection Epoch 1 Training epoch [ 20 ] Training Acc: 0.9314285714285714\n", + "Selection Epoch 1 Training epoch [ 21 ] Training Acc: 0.9028571428571428\n", + "Selection Epoch 1 Training epoch [ 22 ] Training Acc: 0.8171428571428572\n", + "Selection Epoch 1 Training epoch [ 23 ] Training Acc: 0.8914285714285715\n", + "Selection Epoch 1 Training epoch [ 24 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 1 Training epoch [ 25 ] Training Acc: 0.9885714285714285\n", + "Selection Epoch 1 Training epoch [ 26 ] Training Acc: 0.9828571428571429\n", + "Selection Epoch 1 Training epoch [ 27 ] Training Acc: 1.0\n", + "Epoch: 2 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.20072524342685938 1.0 0.8597127795219421 0.8043478260869565 9.483999490737915 0.35918367346938773 32.24341368675232\n", + "AL epoch: 2\n", + "val, test error% for class 0 : 9.09 60.0\n", + "val, test error% for class 1 : 66.67 60.0\n", + "val, test error% for class 2 : 0.0 77.14\n", + "val, test error% for class 3 : 0.0 48.57\n", + "val, test error% for class 4 : 25.0 45.71\n", + "val, test error% for class 5 : 28.57 80.0\n", + "val, test error% for class 6 : 100.0 77.14\n", + "45 / 70 idc points.\n", + "selEpoch: 2, Selection Ended at: 2022-01-30 19:48:02.904854\n", + "45 5931 6001\n", + "After augmentation, size of train_set: 245 lake set: 5931 val set: 91\n", + "Selection Epoch 2 Training epoch [ 1 ] Training Acc: 0.3469387755102041\n", + "Selection Epoch 2 Training epoch [ 2 ] Training Acc: 0.363265306122449\n", + "Selection Epoch 2 Training epoch [ 3 ] Training Acc: 0.4\n", + "Selection Epoch 2 Training epoch [ 4 ] Training Acc: 0.39591836734693875\n", + "Selection Epoch 2 Training epoch [ 5 ] Training Acc: 0.44081632653061226\n", + "Selection Epoch 2 Training epoch [ 6 ] Training Acc: 0.47346938775510206\n", + "Selection Epoch 2 Training epoch [ 7 ] Training Acc: 0.49387755102040815\n", + "Selection Epoch 2 Training epoch [ 8 ] Training Acc: 0.5061224489795918\n", + "Selection Epoch 2 Training epoch [ 9 ] Training Acc: 0.4448979591836735\n", + "Selection Epoch 2 Training epoch [ 10 ] Training Acc: 0.5551020408163265\n", + "Selection Epoch 2 Training epoch [ 11 ] Training Acc: 0.5591836734693878\n", + "Selection Epoch 2 Training epoch [ 12 ] Training Acc: 0.5755102040816327\n", + "Selection Epoch 2 Training epoch [ 13 ] Training Acc: 0.5755102040816327\n", + "Selection Epoch 2 Training epoch [ 14 ] Training Acc: 0.5714285714285714\n", + "Selection Epoch 2 Training epoch [ 15 ] Training Acc: 0.6204081632653061\n", + "Selection Epoch 2 Training epoch [ 16 ] Training Acc: 0.6040816326530613\n", + "Selection Epoch 2 Training epoch [ 17 ] Training Acc: 0.6244897959183674\n", + "Selection Epoch 2 Training epoch [ 18 ] Training Acc: 0.6081632653061224\n", + "Selection Epoch 2 Training epoch [ 19 ] Training Acc: 0.5959183673469388\n", + "Selection Epoch 2 Training epoch [ 20 ] Training Acc: 0.6653061224489796\n", + "Selection Epoch 2 Training epoch [ 21 ] Training Acc: 0.5795918367346938\n", + "Selection Epoch 2 Training epoch [ 22 ] Training Acc: 0.7387755102040816\n", + "Selection Epoch 2 Training epoch [ 23 ] Training Acc: 0.7428571428571429\n", + "Selection Epoch 2 Training epoch [ 24 ] Training Acc: 0.7836734693877551\n", + "Selection Epoch 2 Training epoch [ 25 ] Training Acc: 0.763265306122449\n", + "Selection Epoch 2 Training epoch [ 26 ] Training Acc: 0.7959183673469388\n", + "Selection Epoch 2 Training epoch [ 27 ] Training Acc: 0.6979591836734694\n", + "Selection Epoch 2 Training epoch [ 28 ] Training Acc: 0.8489795918367347\n", + "Selection Epoch 2 Training epoch [ 29 ] Training Acc: 0.8122448979591836\n", + "Selection Epoch 2 Training epoch [ 30 ] Training Acc: 0.8979591836734694\n", + "Selection Epoch 2 Training epoch [ 31 ] Training Acc: 0.9387755102040817\n", + "Selection Epoch 2 Training epoch [ 32 ] Training Acc: 0.889795918367347\n", + "Selection Epoch 2 Training epoch [ 33 ] Training Acc: 0.9224489795918367\n", + "Selection Epoch 2 Training epoch [ 34 ] Training Acc: 0.8938775510204081\n", + "Selection Epoch 2 Training epoch [ 35 ] Training Acc: 0.8938775510204081\n", + "Selection Epoch 2 Training epoch [ 36 ] Training Acc: 0.9183673469387755\n", + "Selection Epoch 2 Training epoch [ 37 ] Training Acc: 0.7836734693877551\n", + "Selection Epoch 2 Training epoch [ 38 ] Training Acc: 0.7673469387755102\n", + "Selection Epoch 2 Training epoch [ 39 ] Training Acc: 0.9387755102040817\n", + "Selection Epoch 2 Training epoch [ 40 ] Training Acc: 0.9306122448979591\n", + "Selection Epoch 2 Training epoch [ 41 ] Training Acc: 0.9306122448979591\n", + "Selection Epoch 2 Training epoch [ 42 ] Training Acc: 0.9224489795918367\n", + "Selection Epoch 2 Training epoch [ 43 ] Training Acc: 0.9673469387755103\n", + "Selection Epoch 2 Training epoch [ 44 ] Training Acc: 0.9918367346938776\n", + "Epoch: 3 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.980945535004139 0.9918367346938776 0.41221916675567627 0.9010989010989011 7.374663829803467 0.40408163265306124 73.41077280044556\n", + "AL epoch: 3\n", + "val, test error% for class 0 : 6.67 57.14\n", + "val, test error% for class 1 : 14.29 48.57\n", + "val, test error% for class 2 : 5.0 77.14\n", + "val, test error% for class 3 : 9.09 57.14\n", + "val, test error% for class 4 : 25.0 71.43\n", + "val, test error% for class 5 : 0.0 48.57\n", + "val, test error% for class 6 : 28.57 57.14\n", + "46 / 70 idc points.\n", + "selEpoch: 3, Selection Ended at: 2022-01-30 19:50:02.278661\n", + "46 5861 5931\n", + "After augmentation, size of train_set: 315 lake set: 5861 val set: 137\n", + "Selection Epoch 3 Training epoch [ 1 ] Training Acc: 0.3746031746031746\n", + "Selection Epoch 3 Training epoch [ 2 ] Training Acc: 0.473015873015873\n", + "Selection Epoch 3 Training epoch [ 3 ] Training Acc: 0.5079365079365079\n", + "Selection Epoch 3 Training epoch [ 4 ] Training Acc: 0.5238095238095238\n", + "Selection Epoch 3 Training epoch [ 5 ] Training Acc: 0.5428571428571428\n", + "Selection Epoch 3 Training epoch [ 6 ] Training Acc: 0.6444444444444445\n", + "Selection Epoch 3 Training epoch [ 7 ] Training Acc: 0.5936507936507937\n", + "Selection Epoch 3 Training epoch [ 8 ] Training Acc: 0.6825396825396826\n", + "Selection Epoch 3 Training epoch [ 9 ] Training Acc: 0.6126984126984127\n", + "Selection Epoch 3 Training epoch [ 10 ] Training Acc: 0.653968253968254\n", + "Selection Epoch 3 Training epoch [ 11 ] Training Acc: 0.6730158730158731\n", + "Selection Epoch 3 Training epoch [ 12 ] Training Acc: 0.7492063492063492\n", + "Selection Epoch 3 Training epoch [ 13 ] Training Acc: 0.7301587301587301\n", + "Selection Epoch 3 Training epoch [ 14 ] Training Acc: 0.746031746031746\n", + "Selection Epoch 3 Training epoch [ 15 ] Training Acc: 0.7301587301587301\n", + "Selection Epoch 3 Training epoch [ 16 ] Training Acc: 0.6253968253968254\n", + "Selection Epoch 3 Training epoch [ 17 ] Training Acc: 0.8380952380952381\n", + "Selection Epoch 3 Training epoch [ 18 ] Training Acc: 0.8158730158730159\n", + "Selection Epoch 3 Training epoch [ 19 ] Training Acc: 0.8380952380952381\n", + "Selection Epoch 3 Training epoch [ 20 ] Training Acc: 0.7682539682539683\n", + "Selection Epoch 3 Training epoch [ 21 ] Training Acc: 0.8603174603174604\n", + "Selection Epoch 3 Training epoch [ 22 ] Training Acc: 0.8444444444444444\n", + "Selection Epoch 3 Training epoch [ 23 ] Training Acc: 0.946031746031746\n", + "Selection Epoch 3 Training epoch [ 24 ] Training Acc: 0.9396825396825397\n", + "Selection Epoch 3 Training epoch [ 25 ] Training Acc: 0.9619047619047619\n", + "Selection Epoch 3 Training epoch [ 26 ] Training Acc: 0.9746031746031746\n", + "Selection Epoch 3 Training epoch [ 27 ] Training Acc: 0.9650793650793651\n", + "Selection Epoch 3 Training epoch [ 28 ] Training Acc: 0.9396825396825397\n", + "Selection Epoch 3 Training epoch [ 29 ] Training Acc: 0.9079365079365079\n", + "Selection Epoch 3 Training epoch [ 30 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 3 Training epoch [ 31 ] Training Acc: 0.9492063492063492\n", + "Selection Epoch 3 Training epoch [ 32 ] Training Acc: 0.9873015873015873\n", + "Selection Epoch 3 Training epoch [ 33 ] Training Acc: 0.9904761904761905\n", + "Epoch: 4 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.7322989469394088 0.9904761904761905 0.313280314207077 0.927007299270073 9.698065042495728 0.47346938775510206 71.7606942653656\n", + "AL epoch: 4\n", + "val, test error% for class 0 : 0.0 40.0\n", + "val, test error% for class 1 : 16.67 51.43\n", + "val, test error% for class 2 : 8.0 77.14\n", + "val, test error% for class 3 : 23.08 80.0\n", + "val, test error% for class 4 : 0.0 45.71\n", + "val, test error% for class 5 : 3.23 25.71\n", + "val, test error% for class 6 : 15.38 48.57\n", + "39 / 70 idc points.\n", + "selEpoch: 4, Selection Ended at: 2022-01-30 19:52:01.283006\n", + "39 5791 5861\n", + "After augmentation, size of train_set: 385 lake set: 5791 val set: 176\n", + "Selection Epoch 4 Training epoch [ 1 ] Training Acc: 0.43116883116883115\n", + "Selection Epoch 4 Training epoch [ 2 ] Training Acc: 0.535064935064935\n", + "Selection Epoch 4 Training epoch [ 3 ] Training Acc: 0.36883116883116884\n", + "Selection Epoch 4 Training epoch [ 4 ] Training Acc: 0.535064935064935\n", + "Selection Epoch 4 Training epoch [ 5 ] Training Acc: 0.5688311688311688\n", + "Selection Epoch 4 Training epoch [ 6 ] Training Acc: 0.5818181818181818\n", + "Selection Epoch 4 Training epoch [ 7 ] Training Acc: 0.5974025974025974\n", + "Selection Epoch 4 Training epoch [ 8 ] Training Acc: 0.5662337662337662\n", + "Selection Epoch 4 Training epoch [ 9 ] Training Acc: 0.5506493506493506\n", + "Selection Epoch 4 Training epoch [ 10 ] Training Acc: 0.5116883116883116\n", + "Selection Epoch 4 Training epoch [ 11 ] Training Acc: 0.5896103896103896\n", + "Selection Epoch 4 Training epoch [ 12 ] Training Acc: 0.5688311688311688\n", + "Selection Epoch 4 Training epoch [ 13 ] Training Acc: 0.625974025974026\n", + "Selection Epoch 4 Training epoch [ 14 ] Training Acc: 0.6441558441558441\n", + "Selection Epoch 4 Training epoch [ 15 ] Training Acc: 0.6415584415584416\n", + "Selection Epoch 4 Training epoch [ 16 ] Training Acc: 0.6753246753246753\n", + "Selection Epoch 4 Training epoch [ 17 ] Training Acc: 0.6961038961038961\n", + "Selection Epoch 4 Training epoch [ 18 ] Training Acc: 0.7116883116883117\n", + "Selection Epoch 4 Training epoch [ 19 ] Training Acc: 0.7090909090909091\n", + "Selection Epoch 4 Training epoch [ 20 ] Training Acc: 0.6649350649350649\n", + "Selection Epoch 4 Training epoch [ 21 ] Training Acc: 0.7064935064935065\n", + "Selection Epoch 4 Training epoch [ 22 ] Training Acc: 0.7454545454545455\n", + "Selection Epoch 4 Training epoch [ 23 ] Training Acc: 0.7740259740259741\n", + "Selection Epoch 4 Training epoch [ 24 ] Training Acc: 0.7792207792207793\n", + "Selection Epoch 4 Training epoch [ 25 ] Training Acc: 0.8207792207792208\n", + "Selection Epoch 4 Training epoch [ 26 ] Training Acc: 0.8181818181818182\n", + "Selection Epoch 4 Training epoch [ 27 ] Training Acc: 0.8337662337662337\n", + "Selection Epoch 4 Training epoch [ 28 ] Training Acc: 0.8311688311688312\n", + "Selection Epoch 4 Training epoch [ 29 ] Training Acc: 0.8545454545454545\n", + "Selection Epoch 4 Training epoch [ 30 ] Training Acc: 0.8441558441558441\n", + "Selection Epoch 4 Training epoch [ 31 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 4 Training epoch [ 32 ] Training Acc: 0.8077922077922078\n", + "Selection Epoch 4 Training epoch [ 33 ] Training Acc: 0.8831168831168831\n", + "Selection Epoch 4 Training epoch [ 34 ] Training Acc: 0.9376623376623376\n", + "Selection Epoch 4 Training epoch [ 35 ] Training Acc: 0.9792207792207792\n", + "Selection Epoch 4 Training epoch [ 36 ] Training Acc: 0.9636363636363636\n", + "Selection Epoch 4 Training epoch [ 37 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 4 Training epoch [ 38 ] Training Acc: 0.812987012987013\n", + "Selection Epoch 4 Training epoch [ 39 ] Training Acc: 0.8935064935064935\n", + "Selection Epoch 4 Training epoch [ 40 ] Training Acc: 0.9506493506493506\n", + "Selection Epoch 4 Training epoch [ 41 ] Training Acc: 0.9922077922077922\n", + "Epoch: 5 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.5869989711791277 0.9922077922077922 0.21238836646080017 0.9602272727272727 5.676886439323425 0.4775510204081633 106.60622644424438\n", + "AL epoch: 5\n", + "val, test error% for class 0 : 0.0 51.43\n", + "val, test error% for class 1 : 15.0 68.57\n", + "val, test error% for class 2 : 3.45 74.29\n", + "val, test error% for class 3 : 7.14 62.86\n", + "val, test error% for class 4 : 3.85 28.57\n", + "val, test error% for class 5 : 0.0 37.14\n", + "val, test error% for class 6 : 6.67 42.86\n", + "40 / 70 idc points.\n", + "selEpoch: 5, Selection Ended at: 2022-01-30 19:54:34.791421\n", + "40 5721 5791\n", + "After augmentation, size of train_set: 455 lake set: 5721 val set: 216\n", + "Selection Epoch 5 Training epoch [ 1 ] Training Acc: 0.3824175824175824\n", + "Selection Epoch 5 Training epoch [ 2 ] Training Acc: 0.46373626373626375\n", + "Selection Epoch 5 Training epoch [ 3 ] Training Acc: 0.5230769230769231\n", + "Selection Epoch 5 Training epoch [ 4 ] Training Acc: 0.6065934065934065\n", + "Selection Epoch 5 Training epoch [ 5 ] Training Acc: 0.6087912087912087\n", + "Selection Epoch 5 Training epoch [ 6 ] Training Acc: 0.6065934065934065\n", + "Selection Epoch 5 Training epoch [ 7 ] Training Acc: 0.6307692307692307\n", + "Selection Epoch 5 Training epoch [ 8 ] Training Acc: 0.6505494505494506\n", + "Selection Epoch 5 Training epoch [ 9 ] Training Acc: 0.6197802197802198\n", + "Selection Epoch 5 Training epoch [ 10 ] Training Acc: 0.643956043956044\n", + "Selection Epoch 5 Training epoch [ 11 ] Training Acc: 0.6615384615384615\n", + "Selection Epoch 5 Training epoch [ 12 ] Training Acc: 0.7054945054945055\n", + "Selection Epoch 5 Training epoch [ 13 ] Training Acc: 0.6857142857142857\n", + "Selection Epoch 5 Training epoch [ 14 ] Training Acc: 0.7758241758241758\n", + "Selection Epoch 5 Training epoch [ 15 ] Training Acc: 0.7494505494505495\n", + "Selection Epoch 5 Training epoch [ 16 ] Training Acc: 0.7802197802197802\n", + "Selection Epoch 5 Training epoch [ 17 ] Training Acc: 0.8263736263736263\n", + "Selection Epoch 5 Training epoch [ 18 ] Training Acc: 0.8153846153846154\n", + "Selection Epoch 5 Training epoch [ 19 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 5 Training epoch [ 20 ] Training Acc: 0.9296703296703297\n", + "Selection Epoch 5 Training epoch [ 21 ] Training Acc: 0.9494505494505494\n", + "Selection Epoch 5 Training epoch [ 22 ] Training Acc: 0.9296703296703297\n", + "Selection Epoch 5 Training epoch [ 23 ] Training Acc: 0.9494505494505494\n", + "Selection Epoch 5 Training epoch [ 24 ] Training Acc: 0.9626373626373627\n", + "Selection Epoch 5 Training epoch [ 25 ] Training Acc: 0.9318681318681319\n", + "Selection Epoch 5 Training epoch [ 26 ] Training Acc: 0.9802197802197802\n", + "Selection Epoch 5 Training epoch [ 27 ] Training Acc: 0.9824175824175824\n", + "Selection Epoch 5 Training epoch [ 28 ] Training Acc: 0.978021978021978\n", + "Selection Epoch 5 Training epoch [ 29 ] Training Acc: 0.9868131868131869\n", + "Selection Epoch 5 Training epoch [ 30 ] Training Acc: 0.9692307692307692\n", + "Selection Epoch 5 Training epoch [ 31 ] Training Acc: 0.9868131868131869\n", + "Selection Epoch 5 Training epoch [ 32 ] Training Acc: 0.9956043956043956\n", + "Epoch: 6 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.47249227232532576 0.9956043956043956 0.1736769676208496 0.9629629629629629 8.632215023040771 0.4163265306122449 100.9219708442688\n", + "AL epoch: 6\n", + "val, test error% for class 0 : 0.0 51.43\n", + "val, test error% for class 1 : 7.41 54.29\n", + "val, test error% for class 2 : 2.78 74.29\n", + "val, test error% for class 3 : 5.56 65.71\n", + "val, test error% for class 4 : 6.25 51.43\n", + "val, test error% for class 5 : 0.0 31.43\n", + "val, test error% for class 6 : 12.5 80.0\n", + "51 / 70 idc points.\n", + "selEpoch: 6, Selection Ended at: 2022-01-30 19:57:03.944071\n", + "51 5651 5721\n", + "After augmentation, size of train_set: 525 lake set: 5651 val set: 267\n", + "Selection Epoch 6 Training epoch [ 1 ] Training Acc: 0.4095238095238095\n", + "Selection Epoch 6 Training epoch [ 2 ] Training Acc: 0.518095238095238\n", + "Selection Epoch 6 Training epoch [ 3 ] Training Acc: 0.5504761904761905\n", + "Selection Epoch 6 Training epoch [ 4 ] Training Acc: 0.5066666666666667\n", + "Selection Epoch 6 Training epoch [ 5 ] Training Acc: 0.5314285714285715\n", + "Selection Epoch 6 Training epoch [ 6 ] Training Acc: 0.5904761904761905\n", + "Selection Epoch 6 Training epoch [ 7 ] Training Acc: 0.5847619047619048\n", + "Selection Epoch 6 Training epoch [ 8 ] Training Acc: 0.6285714285714286\n", + "Selection Epoch 6 Training epoch [ 9 ] Training Acc: 0.6304761904761905\n", + "Selection Epoch 6 Training epoch [ 10 ] Training Acc: 0.6419047619047619\n", + "Selection Epoch 6 Training epoch [ 11 ] Training Acc: 0.6266666666666667\n", + "Selection Epoch 6 Training epoch [ 12 ] Training Acc: 0.6190476190476191\n", + "Selection Epoch 6 Training epoch [ 13 ] Training Acc: 0.6647619047619048\n", + "Selection Epoch 6 Training epoch [ 14 ] Training Acc: 0.6914285714285714\n", + "Selection Epoch 6 Training epoch [ 15 ] Training Acc: 0.6095238095238096\n", + "Selection Epoch 6 Training epoch [ 16 ] Training Acc: 0.6666666666666666\n", + "Selection Epoch 6 Training epoch [ 17 ] Training Acc: 0.6933333333333334\n", + "Selection Epoch 6 Training epoch [ 18 ] Training Acc: 0.6704761904761904\n", + "Selection Epoch 6 Training epoch [ 19 ] Training Acc: 0.6819047619047619\n", + "Selection Epoch 6 Training epoch [ 20 ] Training Acc: 0.7104761904761905\n", + "Selection Epoch 6 Training epoch [ 21 ] Training Acc: 0.7238095238095238\n", + "Selection Epoch 6 Training epoch [ 22 ] Training Acc: 0.7657142857142857\n", + "Selection Epoch 6 Training epoch [ 23 ] Training Acc: 0.7885714285714286\n", + "Selection Epoch 6 Training epoch [ 24 ] Training Acc: 0.8\n", + "Selection Epoch 6 Training epoch [ 25 ] Training Acc: 0.7619047619047619\n", + "Selection Epoch 6 Training epoch [ 26 ] Training Acc: 0.7828571428571428\n", + "Selection Epoch 6 Training epoch [ 27 ] Training Acc: 0.8666666666666667\n", + "Selection Epoch 6 Training epoch [ 28 ] Training Acc: 0.8723809523809524\n", + "Selection Epoch 6 Training epoch [ 29 ] Training Acc: 0.8438095238095238\n", + "Selection Epoch 6 Training epoch [ 30 ] Training Acc: 0.8742857142857143\n", + "Selection Epoch 6 Training epoch [ 31 ] Training Acc: 0.8895238095238095\n", + "Selection Epoch 6 Training epoch [ 32 ] Training Acc: 0.9047619047619048\n", + "Selection Epoch 6 Training epoch [ 33 ] Training Acc: 0.9161904761904762\n", + "Selection Epoch 6 Training epoch [ 34 ] Training Acc: 0.8571428571428571\n", + "Selection Epoch 6 Training epoch [ 35 ] Training Acc: 0.9371428571428572\n", + "Selection Epoch 6 Training epoch [ 36 ] Training Acc: 0.9257142857142857\n", + "Selection Epoch 6 Training epoch [ 37 ] Training Acc: 0.9847619047619047\n", + "Selection Epoch 6 Training epoch [ 38 ] Training Acc: 0.9809523809523809\n", + "Selection Epoch 6 Training epoch [ 39 ] Training Acc: 0.9104761904761904\n", + "Selection Epoch 6 Training epoch [ 40 ] Training Acc: 0.9714285714285714\n", + "Selection Epoch 6 Training epoch [ 41 ] Training Acc: 0.9771428571428571\n", + "Selection Epoch 6 Training epoch [ 42 ] Training Acc: 0.9542857142857143\n", + "Selection Epoch 6 Training epoch [ 43 ] Training Acc: 0.9847619047619047\n", + "Selection Epoch 6 Training epoch [ 44 ] Training Acc: 0.9733333333333334\n", + "Selection Epoch 6 Training epoch [ 45 ] Training Acc: 0.9885714285714285\n", + "Selection Epoch 6 Training epoch [ 46 ] Training Acc: 0.9904761904761905\n", + "Epoch: 7 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.8446809083689004 0.9904761904761905 0.18282820284366608 0.9625468164794008 6.165082812309265 0.5510204081632653 166.28034925460815\n", + "AL epoch: 7\n", + "val, test error% for class 0 : 3.85 45.71\n", + "val, test error% for class 1 : 9.09 57.14\n", + "val, test error% for class 2 : 2.38 71.43\n", + "val, test error% for class 3 : 0.0 45.71\n", + "val, test error% for class 4 : 2.86 34.29\n", + "val, test error% for class 5 : 3.51 34.29\n", + "val, test error% for class 6 : 4.0 25.71\n", + "41 / 70 idc points.\n", + "selEpoch: 7, Selection Ended at: 2022-01-30 20:00:35.348632\n", + "41 5581 5651\n", + "After augmentation, size of train_set: 595 lake set: 5581 val set: 308\n", + "Selection Epoch 7 Training epoch [ 1 ] Training Acc: 0.492436974789916\n", + "Selection Epoch 7 Training epoch [ 2 ] Training Acc: 0.5109243697478991\n", + "Selection Epoch 7 Training epoch [ 3 ] Training Acc: 0.5294117647058824\n", + "Selection Epoch 7 Training epoch [ 4 ] Training Acc: 0.584873949579832\n", + "Selection Epoch 7 Training epoch [ 5 ] Training Acc: 0.5478991596638656\n", + "Selection Epoch 7 Training epoch [ 6 ] Training Acc: 0.6201680672268908\n", + "Selection Epoch 7 Training epoch [ 7 ] Training Acc: 0.6436974789915967\n", + "Selection Epoch 7 Training epoch [ 8 ] Training Acc: 0.6016806722689075\n", + "Selection Epoch 7 Training epoch [ 9 ] Training Acc: 0.6352941176470588\n", + "Selection Epoch 7 Training epoch [ 10 ] Training Acc: 0.6252100840336134\n", + "Selection Epoch 7 Training epoch [ 11 ] Training Acc: 0.6941176470588235\n", + "Selection Epoch 7 Training epoch [ 12 ] Training Acc: 0.7025210084033613\n", + "Selection Epoch 7 Training epoch [ 13 ] Training Acc: 0.6621848739495798\n", + "Selection Epoch 7 Training epoch [ 14 ] Training Acc: 0.6100840336134454\n", + "Selection Epoch 7 Training epoch [ 15 ] Training Acc: 0.6873949579831933\n", + "Selection Epoch 7 Training epoch [ 16 ] Training Acc: 0.7126050420168067\n", + "Selection Epoch 7 Training epoch [ 17 ] Training Acc: 0.7243697478991596\n", + "Selection Epoch 7 Training epoch [ 18 ] Training Acc: 0.7865546218487395\n", + "Selection Epoch 7 Training epoch [ 19 ] Training Acc: 0.7747899159663866\n", + "Selection Epoch 7 Training epoch [ 20 ] Training Acc: 0.7697478991596639\n", + "Selection Epoch 7 Training epoch [ 21 ] Training Acc: 0.8252100840336134\n", + "Selection Epoch 7 Training epoch [ 22 ] Training Acc: 0.8436974789915966\n", + "Selection Epoch 7 Training epoch [ 23 ] Training Acc: 0.826890756302521\n", + "Selection Epoch 7 Training epoch [ 24 ] Training Acc: 0.8\n", + "Selection Epoch 7 Training epoch [ 25 ] Training Acc: 0.8823529411764706\n", + "Selection Epoch 7 Training epoch [ 26 ] Training Acc: 0.8789915966386554\n", + "Selection Epoch 7 Training epoch [ 27 ] Training Acc: 0.9260504201680673\n", + "Selection Epoch 7 Training epoch [ 28 ] Training Acc: 0.9109243697478991\n", + "Selection Epoch 7 Training epoch [ 29 ] Training Acc: 0.9563025210084034\n", + "Selection Epoch 7 Training epoch [ 30 ] Training Acc: 0.9109243697478991\n", + "Selection Epoch 7 Training epoch [ 31 ] Training Acc: 0.9159663865546218\n", + "Selection Epoch 7 Training epoch [ 32 ] Training Acc: 0.9831932773109243\n", + "Selection Epoch 7 Training epoch [ 33 ] Training Acc: 0.9680672268907563\n", + "Selection Epoch 7 Training epoch [ 34 ] Training Acc: 0.9697478991596639\n", + "Selection Epoch 7 Training epoch [ 35 ] Training Acc: 0.9714285714285714\n", + "Selection Epoch 7 Training epoch [ 36 ] Training Acc: 0.9815126050420168\n", + "Selection Epoch 7 Training epoch [ 37 ] Training Acc: 0.9798319327731092\n", + "Selection Epoch 7 Training epoch [ 38 ] Training Acc: 0.9815126050420168\n", + "Selection Epoch 7 Training epoch [ 39 ] Training Acc: 0.9915966386554622\n", + "Epoch: 8 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.0943625147920102 0.9915966386554622 0.15737999975681305 0.974025974025974 6.74507474899292 0.46122448979591835 164.87032008171082\n", + "AL epoch: 8\n", + "val, test error% for class 0 : 0.0 48.57\n", + "val, test error% for class 1 : 2.56 51.43\n", + "val, test error% for class 2 : 0.0 60.0\n", + "val, test error% for class 3 : 7.41 74.29\n", + "val, test error% for class 4 : 7.32 91.43\n", + "val, test error% for class 5 : 0.0 22.86\n", + "val, test error% for class 6 : 6.25 28.57\n", + "49 / 70 idc points.\n", + "selEpoch: 8, Selection Ended at: 2022-01-30 20:04:08.607132\n", + "49 5511 5581\n", + "After augmentation, size of train_set: 665 lake set: 5511 val set: 357\n", + "Selection Epoch 8 Training epoch [ 1 ] Training Acc: 0.48120300751879697\n", + "Selection Epoch 8 Training epoch [ 2 ] Training Acc: 0.5533834586466165\n", + "Selection Epoch 8 Training epoch [ 3 ] Training Acc: 0.6210526315789474\n", + "Selection Epoch 8 Training epoch [ 4 ] Training Acc: 0.606015037593985\n", + "Selection Epoch 8 Training epoch [ 5 ] Training Acc: 0.6421052631578947\n", + "Selection Epoch 8 Training epoch [ 6 ] Training Acc: 0.6526315789473685\n", + "Selection Epoch 8 Training epoch [ 7 ] Training Acc: 0.6796992481203008\n", + "Selection Epoch 8 Training epoch [ 8 ] Training Acc: 0.7127819548872181\n", + "Selection Epoch 8 Training epoch [ 9 ] Training Acc: 0.6827067669172933\n", + "Selection Epoch 8 Training epoch [ 10 ] Training Acc: 0.7593984962406015\n", + "Selection Epoch 8 Training epoch [ 11 ] Training Acc: 0.7112781954887218\n", + "Selection Epoch 8 Training epoch [ 12 ] Training Acc: 0.7954887218045112\n", + "Selection Epoch 8 Training epoch [ 13 ] Training Acc: 0.7879699248120301\n", + "Selection Epoch 8 Training epoch [ 14 ] Training Acc: 0.8631578947368421\n", + "Selection Epoch 8 Training epoch [ 15 ] Training Acc: 0.8195488721804511\n", + "Selection Epoch 8 Training epoch [ 16 ] Training Acc: 0.8842105263157894\n", + "Selection Epoch 8 Training epoch [ 17 ] Training Acc: 0.8601503759398497\n", + "Selection Epoch 8 Training epoch [ 18 ] Training Acc: 0.8\n", + "Selection Epoch 8 Training epoch [ 19 ] Training Acc: 0.8631578947368421\n", + "Selection Epoch 8 Training epoch [ 20 ] Training Acc: 0.9142857142857143\n", + "Selection Epoch 8 Training epoch [ 21 ] Training Acc: 0.9473684210526315\n", + "Selection Epoch 8 Training epoch [ 22 ] Training Acc: 0.9849624060150376\n", + "Selection Epoch 8 Training epoch [ 23 ] Training Acc: 0.9503759398496241\n", + "Selection Epoch 8 Training epoch [ 24 ] Training Acc: 0.9909774436090225\n", + "Epoch: 9 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.4118697177618742 0.9909774436090225 0.14683537185192108 0.9635854341736695 5.9241719245910645 0.5183673469387755 115.37063694000244\n", + "AL epoch: 9\n", + "val, test error% for class 0 : 0.0 31.43\n", + "val, test error% for class 1 : 6.67 60.0\n", + "val, test error% for class 2 : 3.57 71.43\n", + "val, test error% for class 3 : 9.38 62.86\n", + "val, test error% for class 4 : 4.35 57.14\n", + "val, test error% for class 5 : 1.32 31.43\n", + "val, test error% for class 6 : 5.56 22.86\n", + "45 / 70 idc points.\n", + "selEpoch: 9, Selection Ended at: 2022-01-30 20:06:49.782869\n", + "45 5441 5511\n", + "After augmentation, size of train_set: 735 lake set: 5441 val set: 402\n", + "Selection Epoch 9 Training epoch [ 1 ] Training Acc: 0.5374149659863946\n", + "Selection Epoch 9 Training epoch [ 2 ] Training Acc: 0.563265306122449\n", + "Selection Epoch 9 Training epoch [ 3 ] Training Acc: 0.4775510204081633\n", + "Selection Epoch 9 Training epoch [ 4 ] Training Acc: 0.5986394557823129\n", + "Selection Epoch 9 Training epoch [ 5 ] Training Acc: 0.6149659863945578\n", + "Selection Epoch 9 Training epoch [ 6 ] Training Acc: 0.6312925170068027\n", + "Selection Epoch 9 Training epoch [ 7 ] Training Acc: 0.636734693877551\n", + "Selection Epoch 9 Training epoch [ 8 ] Training Acc: 0.6394557823129252\n", + "Selection Epoch 9 Training epoch [ 9 ] Training Acc: 0.601360544217687\n", + "Selection Epoch 9 Training epoch [ 10 ] Training Acc: 0.6557823129251701\n", + "Selection Epoch 9 Training epoch [ 11 ] Training Acc: 0.6639455782312925\n", + "Selection Epoch 9 Training epoch [ 12 ] Training Acc: 0.673469387755102\n", + "Selection Epoch 9 Training epoch [ 13 ] Training Acc: 0.6435374149659864\n", + "Selection Epoch 9 Training epoch [ 14 ] Training Acc: 0.7047619047619048\n", + "Selection Epoch 9 Training epoch [ 15 ] Training Acc: 0.691156462585034\n", + "Selection Epoch 9 Training epoch [ 16 ] Training Acc: 0.7428571428571429\n", + "Selection Epoch 9 Training epoch [ 17 ] Training Acc: 0.7877551020408163\n", + "Selection Epoch 9 Training epoch [ 18 ] Training Acc: 0.7714285714285715\n", + "Selection Epoch 9 Training epoch [ 19 ] Training Acc: 0.7278911564625851\n", + "Selection Epoch 9 Training epoch [ 20 ] Training Acc: 0.8394557823129252\n", + "Selection Epoch 9 Training epoch [ 21 ] Training Acc: 0.8108843537414966\n", + "Selection Epoch 9 Training epoch [ 22 ] Training Acc: 0.8503401360544217\n", + "Selection Epoch 9 Training epoch [ 23 ] Training Acc: 0.8598639455782313\n", + "Selection Epoch 9 Training epoch [ 24 ] Training Acc: 0.8707482993197279\n", + "Selection Epoch 9 Training epoch [ 25 ] Training Acc: 0.8870748299319728\n", + "Selection Epoch 9 Training epoch [ 26 ] Training Acc: 0.9442176870748299\n", + "Selection Epoch 9 Training epoch [ 27 ] Training Acc: 0.8231292517006803\n", + "Selection Epoch 9 Training epoch [ 28 ] Training Acc: 0.9251700680272109\n", + "Selection Epoch 9 Training epoch [ 29 ] Training Acc: 0.9510204081632653\n", + "Selection Epoch 9 Training epoch [ 30 ] Training Acc: 0.9074829931972789\n", + "Selection Epoch 9 Training epoch [ 31 ] Training Acc: 0.9755102040816327\n", + "Selection Epoch 9 Training epoch [ 32 ] Training Acc: 0.9795918367346939\n", + "Selection Epoch 9 Training epoch [ 33 ] Training Acc: 0.9741496598639455\n", + "Selection Epoch 9 Training epoch [ 34 ] Training Acc: 0.9931972789115646\n", + "Epoch: 10 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.2808888105209917 0.9931972789115646 0.10089294612407684 0.9776119402985075 6.141527593135834 0.5591836734693878 180.90921258926392\n", + "val, test error% for class 0 : 1.41 34.29\n", + "val, test error% for class 1 : 0.0 40.0\n", + "val, test error% for class 2 : 1.59 71.43\n", + "val, test error% for class 3 : 5.41 42.86\n", + "val, test error% for class 4 : 5.88 74.29\n", + "val, test error% for class 5 : 0.0 22.86\n", + "val, test error% for class 6 : 5.13 22.86\n", + "[[57.14, 68.57, 94.29, 62.86, 40.0, 71.43, 65.71, 65.71428571428571], [60.0, 60.0, 77.14, 48.57, 45.71, 80.0, 77.14, 64.08], [57.14, 48.57, 77.14, 57.14, 71.43, 48.57, 57.14, 59.589999999999996], [40.0, 51.43, 77.14, 80.0, 45.71, 25.71, 48.57, 52.65142857142856], [51.43, 68.57, 74.29, 62.86, 28.57, 37.14, 42.86, 52.24571428571429], [51.43, 54.29, 74.29, 65.71, 51.43, 31.43, 80.0, 58.36857142857143], [45.71, 57.14, 71.43, 45.71, 34.29, 34.29, 25.71, 44.89714285714285], [48.57, 51.43, 60.0, 74.29, 91.43, 22.86, 28.57, 53.87857142857143], [31.43, 60.0, 71.43, 62.86, 57.14, 31.43, 22.86, 48.16428571428572], [34.29, 40.0, 71.43, 42.86, 74.29, 22.86, 22.86, 44.08428571428572]]\n", + "\n", + "\n" + ] + } + ], + "source": [ + "train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, \"SIM\",'fl1mi')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### AL BADGE" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AL badge\n", + "num ood samples: 5000\n", + "Custom dataset stats: Train size: 105 Val size: 14 Lake size: 6071 Test set: 245\n", + "selected classes are: [0 1 2 3 4 5 6]\n", + "Saving results to: ./SMI_active_learning_results/dermamnist/ood/badge/70/fkna_3\n", + "dermamnist_ood_AL_7_badge_budget:70_epochs:10_linear:True_runsfkna_3\n", + "AL epoch: 0\n", + "initial training epoch\n", + "Init model loaded from disk, skipping init training: /mnt/data2/akshit/Derma/weights/dermamnist_ood_ResNet18_0.01_15_2_7\n", + "AL epoch: 1\n", + "val, test error% for class 0 : 0.0 57.14\n", + "val, test error% for class 1 : 100.0 68.57\n", + "val, test error% for class 2 : 100.0 94.29\n", + "val, test error% for class 3 : 0.0 62.86\n", + "val, test error% for class 4 : 100.0 40.0\n", + "val, test error% for class 5 : 100.0 71.43\n", + "val, test error% for class 6 : 100.0 65.71\n", + "22 / 70 idc points.\n", + "selEpoch: 1, Selection Ended at: 2022-01-30 20:10:24.980089\n", + "22 6001 6071\n", + "After augmentation, size of train_set: 175 lake set: 6001 val set: 36\n", + "Selection Epoch 1 Training epoch [ 1 ] Training Acc: 0.2742857142857143\n", + "Selection Epoch 1 Training epoch [ 2 ] Training Acc: 0.3942857142857143\n", + "Selection Epoch 1 Training epoch [ 3 ] Training Acc: 0.3142857142857143\n", + "Selection Epoch 1 Training epoch [ 4 ] Training Acc: 0.38285714285714284\n", + "Selection Epoch 1 Training epoch [ 5 ] Training Acc: 0.5028571428571429\n", + "Selection Epoch 1 Training epoch [ 6 ] Training Acc: 0.6057142857142858\n", + "Selection Epoch 1 Training epoch [ 7 ] Training Acc: 0.56\n", + "Selection Epoch 1 Training epoch [ 8 ] Training Acc: 0.68\n", + "Selection Epoch 1 Training epoch [ 9 ] Training Acc: 0.8114285714285714\n", + "Selection Epoch 1 Training epoch [ 10 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 1 Training epoch [ 11 ] Training Acc: 0.92\n", + "Selection Epoch 1 Training epoch [ 12 ] Training Acc: 0.92\n", + "Selection Epoch 1 Training epoch [ 13 ] Training Acc: 0.7428571428571429\n", + "Selection Epoch 1 Training epoch [ 14 ] Training Acc: 0.8914285714285715\n", + "Selection Epoch 1 Training epoch [ 15 ] Training Acc: 0.9714285714285714\n", + "Selection Epoch 1 Training epoch [ 16 ] Training Acc: 0.9942857142857143\n", + "Epoch: 2 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.4240907598286867 0.9942857142857143 1.0728578567504883 0.75 9.416767835617065 0.2897959183673469 20.903384685516357\n", + "AL epoch: 2\n", + "val, test error% for class 0 : 22.22 77.14\n", + "val, test error% for class 1 : 25.0 62.86\n", + "val, test error% for class 2 : 0.0 62.86\n", + "val, test error% for class 3 : 20.0 57.14\n", + "val, test error% for class 4 : 100.0 82.86\n", + "val, test error% for class 5 : 14.29 80.0\n", + "val, test error% for class 6 : 50.0 74.29\n", + "42 / 70 idc points.\n", + "selEpoch: 2, Selection Ended at: 2022-01-30 20:11:16.924406\n", + "42 5931 6001\n", + "After augmentation, size of train_set: 245 lake set: 5931 val set: 78\n", + "Selection Epoch 2 Training epoch [ 1 ] Training Acc: 0.39591836734693875\n", + "Selection Epoch 2 Training epoch [ 2 ] Training Acc: 0.3183673469387755\n", + "Selection Epoch 2 Training epoch [ 3 ] Training Acc: 0.4489795918367347\n", + "Selection Epoch 2 Training epoch [ 4 ] Training Acc: 0.46938775510204084\n", + "Selection Epoch 2 Training epoch [ 5 ] Training Acc: 0.4897959183673469\n", + "Selection Epoch 2 Training epoch [ 6 ] Training Acc: 0.5428571428571428\n", + "Selection Epoch 2 Training epoch [ 7 ] Training Acc: 0.5836734693877551\n", + "Selection Epoch 2 Training epoch [ 8 ] Training Acc: 0.6285714285714286\n", + "Selection Epoch 2 Training epoch [ 9 ] Training Acc: 0.5387755102040817\n", + "Selection Epoch 2 Training epoch [ 10 ] Training Acc: 0.6\n", + "Selection Epoch 2 Training epoch [ 11 ] Training Acc: 0.4857142857142857\n", + "Selection Epoch 2 Training epoch [ 12 ] Training Acc: 0.636734693877551\n", + "Selection Epoch 2 Training epoch [ 13 ] Training Acc: 0.6489795918367347\n", + "Selection Epoch 2 Training epoch [ 14 ] Training Acc: 0.7020408163265306\n", + "Selection Epoch 2 Training epoch [ 15 ] Training Acc: 0.5795918367346938\n", + "Selection Epoch 2 Training epoch [ 16 ] Training Acc: 0.6204081632653061\n", + "Selection Epoch 2 Training epoch [ 17 ] Training Acc: 0.7346938775510204\n", + "Selection Epoch 2 Training epoch [ 18 ] Training Acc: 0.7877551020408163\n", + "Selection Epoch 2 Training epoch [ 19 ] Training Acc: 0.763265306122449\n", + "Selection Epoch 2 Training epoch [ 20 ] Training Acc: 0.8122448979591836\n", + "Selection Epoch 2 Training epoch [ 21 ] Training Acc: 0.8204081632653061\n", + "Selection Epoch 2 Training epoch [ 22 ] Training Acc: 0.889795918367347\n", + "Selection Epoch 2 Training epoch [ 23 ] Training Acc: 0.8979591836734694\n", + "Selection Epoch 2 Training epoch [ 24 ] Training Acc: 0.6857142857142857\n", + "Selection Epoch 2 Training epoch [ 25 ] Training Acc: 0.7510204081632653\n", + "Selection Epoch 2 Training epoch [ 26 ] Training Acc: 0.8\n", + "Selection Epoch 2 Training epoch [ 27 ] Training Acc: 0.8979591836734694\n", + "Selection Epoch 2 Training epoch [ 28 ] Training Acc: 0.7918367346938775\n", + "Selection Epoch 2 Training epoch [ 29 ] Training Acc: 0.8734693877551021\n", + "Selection Epoch 2 Training epoch [ 30 ] Training Acc: 0.8204081632653061\n", + "Selection Epoch 2 Training epoch [ 31 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 2 Training epoch [ 32 ] Training Acc: 0.9346938775510204\n", + "Selection Epoch 2 Training epoch [ 33 ] Training Acc: 0.9755102040816327\n", + "Selection Epoch 2 Training epoch [ 34 ] Training Acc: 0.9510204081632653\n", + "Selection Epoch 2 Training epoch [ 35 ] Training Acc: 0.9714285714285714\n", + "Selection Epoch 2 Training epoch [ 36 ] Training Acc: 0.9836734693877551\n", + "Selection Epoch 2 Training epoch [ 37 ] Training Acc: 0.9959183673469387\n", + "Epoch: 3 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.36594894155859947 0.9959183673469387 0.7012478113174438 0.8717948717948718 10.333462238311768 0.34285714285714286 64.63306999206543\n", + "AL epoch: 3\n", + "val, test error% for class 0 : 14.29 71.43\n", + "val, test error% for class 1 : 22.22 51.43\n", + "val, test error% for class 2 : 14.29 85.71\n", + "val, test error% for class 3 : 0.0 68.57\n", + "val, test error% for class 4 : 20.0 62.86\n", + "val, test error% for class 5 : 4.76 42.86\n", + "val, test error% for class 6 : 20.0 77.14\n", + "60 / 70 idc points.\n", + "selEpoch: 3, Selection Ended at: 2022-01-30 20:12:51.455065\n", + "60 5861 5931\n", + "After augmentation, size of train_set: 315 lake set: 5861 val set: 138\n", + "Selection Epoch 3 Training epoch [ 1 ] Training Acc: 0.4634920634920635\n", + "Selection Epoch 3 Training epoch [ 2 ] Training Acc: 0.43174603174603177\n", + "Selection Epoch 3 Training epoch [ 3 ] Training Acc: 0.42857142857142855\n", + "Selection Epoch 3 Training epoch [ 4 ] Training Acc: 0.47619047619047616\n", + "Selection Epoch 3 Training epoch [ 5 ] Training Acc: 0.4793650793650794\n", + "Selection Epoch 3 Training epoch [ 6 ] Training Acc: 0.6222222222222222\n", + "Selection Epoch 3 Training epoch [ 7 ] Training Acc: 0.6063492063492063\n", + "Selection Epoch 3 Training epoch [ 8 ] Training Acc: 0.6730158730158731\n", + "Selection Epoch 3 Training epoch [ 9 ] Training Acc: 0.7238095238095238\n", + "Selection Epoch 3 Training epoch [ 10 ] Training Acc: 0.6825396825396826\n", + "Selection Epoch 3 Training epoch [ 11 ] Training Acc: 0.8253968253968254\n", + "Selection Epoch 3 Training epoch [ 12 ] Training Acc: 0.780952380952381\n", + "Selection Epoch 3 Training epoch [ 13 ] Training Acc: 0.8253968253968254\n", + "Selection Epoch 3 Training epoch [ 14 ] Training Acc: 0.707936507936508\n", + "Selection Epoch 3 Training epoch [ 15 ] Training Acc: 0.8222222222222222\n", + "Selection Epoch 3 Training epoch [ 16 ] Training Acc: 0.9333333333333333\n", + "Selection Epoch 3 Training epoch [ 17 ] Training Acc: 0.9746031746031746\n", + "Selection Epoch 3 Training epoch [ 18 ] Training Acc: 0.9841269841269841\n", + "Selection Epoch 3 Training epoch [ 19 ] Training Acc: 0.9809523809523809\n", + "Selection Epoch 3 Training epoch [ 20 ] Training Acc: 0.9809523809523809\n", + "Selection Epoch 3 Training epoch [ 21 ] Training Acc: 0.9746031746031746\n", + "Selection Epoch 3 Training epoch [ 22 ] Training Acc: 0.9650793650793651\n", + "Selection Epoch 3 Training epoch [ 23 ] Training Acc: 0.9365079365079365\n", + "Selection Epoch 3 Training epoch [ 24 ] Training Acc: 0.9523809523809523\n", + "Selection Epoch 3 Training epoch [ 25 ] Training Acc: 0.9238095238095239\n", + "Selection Epoch 3 Training epoch [ 26 ] Training Acc: 0.9555555555555556\n", + "Selection Epoch 3 Training epoch [ 27 ] Training Acc: 0.9841269841269841\n", + "Selection Epoch 3 Training epoch [ 28 ] Training Acc: 0.9650793650793651\n", + "Selection Epoch 3 Training epoch [ 29 ] Training Acc: 0.9873015873015873\n", + "Selection Epoch 3 Training epoch [ 30 ] Training Acc: 0.9904761904761905\n", + "Epoch: 4 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.4349481950048357 0.9904761904761905 0.39447537064552307 0.9347826086956522 8.325363636016846 0.3673469387755102 68.78965926170349\n", + "AL epoch: 4\n", + "val, test error% for class 0 : 3.7 60.0\n", + "val, test error% for class 1 : 5.0 54.29\n", + "val, test error% for class 2 : 5.88 71.43\n", + "val, test error% for class 3 : 22.22 74.29\n", + "val, test error% for class 4 : 9.09 71.43\n", + "val, test error% for class 5 : 3.45 51.43\n", + "val, test error% for class 6 : 7.14 60.0\n", + "57 / 70 idc points.\n", + "selEpoch: 4, Selection Ended at: 2022-01-30 20:14:31.943259\n", + "57 5791 5861\n", + "After augmentation, size of train_set: 385 lake set: 5791 val set: 195\n", + "Selection Epoch 4 Training epoch [ 1 ] Training Acc: 0.2883116883116883\n", + "Selection Epoch 4 Training epoch [ 2 ] Training Acc: 0.44415584415584414\n", + "Selection Epoch 4 Training epoch [ 3 ] Training Acc: 0.4805194805194805\n", + "Selection Epoch 4 Training epoch [ 4 ] Training Acc: 0.5038961038961038\n", + "Selection Epoch 4 Training epoch [ 5 ] Training Acc: 0.5454545454545454\n", + "Selection Epoch 4 Training epoch [ 6 ] Training Acc: 0.5246753246753246\n", + "Selection Epoch 4 Training epoch [ 7 ] Training Acc: 0.5298701298701298\n", + "Selection Epoch 4 Training epoch [ 8 ] Training Acc: 0.5974025974025974\n", + "Selection Epoch 4 Training epoch [ 9 ] Training Acc: 0.5688311688311688\n", + "Selection Epoch 4 Training epoch [ 10 ] Training Acc: 0.6441558441558441\n", + "Selection Epoch 4 Training epoch [ 11 ] Training Acc: 0.5558441558441558\n", + "Selection Epoch 4 Training epoch [ 12 ] Training Acc: 0.6181818181818182\n", + "Selection Epoch 4 Training epoch [ 13 ] Training Acc: 0.6961038961038961\n", + "Selection Epoch 4 Training epoch [ 14 ] Training Acc: 0.6961038961038961\n", + "Selection Epoch 4 Training epoch [ 15 ] Training Acc: 0.5246753246753246\n", + "Selection Epoch 4 Training epoch [ 16 ] Training Acc: 0.7350649350649351\n", + "Selection Epoch 4 Training epoch [ 17 ] Training Acc: 0.787012987012987\n", + "Selection Epoch 4 Training epoch [ 18 ] Training Acc: 0.7454545454545455\n", + "Selection Epoch 4 Training epoch [ 19 ] Training Acc: 0.7584415584415585\n", + "Selection Epoch 4 Training epoch [ 20 ] Training Acc: 0.8181818181818182\n", + "Selection Epoch 4 Training epoch [ 21 ] Training Acc: 0.7506493506493507\n", + "Selection Epoch 4 Training epoch [ 22 ] Training Acc: 0.7844155844155845\n", + "Selection Epoch 4 Training epoch [ 23 ] Training Acc: 0.812987012987013\n", + "Selection Epoch 4 Training epoch [ 24 ] Training Acc: 0.8181818181818182\n", + "Selection Epoch 4 Training epoch [ 25 ] Training Acc: 0.8805194805194805\n", + "Selection Epoch 4 Training epoch [ 26 ] Training Acc: 0.9012987012987013\n", + "Selection Epoch 4 Training epoch [ 27 ] Training Acc: 0.9454545454545454\n", + "Selection Epoch 4 Training epoch [ 28 ] Training Acc: 0.9792207792207792\n", + "Selection Epoch 4 Training epoch [ 29 ] Training Acc: 0.9766233766233766\n", + "Selection Epoch 4 Training epoch [ 30 ] Training Acc: 0.8831168831168831\n", + "Selection Epoch 4 Training epoch [ 31 ] Training Acc: 0.8935064935064935\n", + "Selection Epoch 4 Training epoch [ 32 ] Training Acc: 0.974025974025974\n", + "Selection Epoch 4 Training epoch [ 33 ] Training Acc: 0.9818181818181818\n", + "Selection Epoch 4 Training epoch [ 34 ] Training Acc: 0.9766233766233766\n", + "Selection Epoch 4 Training epoch [ 35 ] Training Acc: 0.9792207792207792\n", + "Selection Epoch 4 Training epoch [ 36 ] Training Acc: 0.9584415584415584\n", + "Selection Epoch 4 Training epoch [ 37 ] Training Acc: 0.9792207792207792\n", + "Selection Epoch 4 Training epoch [ 38 ] Training Acc: 0.9506493506493506\n", + "Selection Epoch 4 Training epoch [ 39 ] Training Acc: 0.9948051948051948\n", + "Epoch: 5 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.46714835055172443 0.9948051948051948 0.18999356031417847 0.9641025641025641 9.024426937103271 0.45714285714285713 106.73501777648926\n", + "AL epoch: 5\n", + "val, test error% for class 0 : 0.0 40.0\n", + "val, test error% for class 1 : 3.33 51.43\n", + "val, test error% for class 2 : 7.69 57.14\n", + "val, test error% for class 3 : 25.0 97.14\n", + "val, test error% for class 4 : 3.45 62.86\n", + "val, test error% for class 5 : 0.0 28.57\n", + "val, test error% for class 6 : 0.0 42.86\n", + "62 / 70 idc points.\n", + "selEpoch: 5, Selection Ended at: 2022-01-30 20:16:48.642851\n", + "62 5721 5791\n", + "After augmentation, size of train_set: 455 lake set: 5721 val set: 257\n", + "Selection Epoch 5 Training epoch [ 1 ] Training Acc: 0.3076923076923077\n", + "Selection Epoch 5 Training epoch [ 2 ] Training Acc: 0.3252747252747253\n", + "Selection Epoch 5 Training epoch [ 3 ] Training Acc: 0.421978021978022\n", + "Selection Epoch 5 Training epoch [ 4 ] Training Acc: 0.4967032967032967\n", + "Selection Epoch 5 Training epoch [ 5 ] Training Acc: 0.5472527472527473\n", + "Selection Epoch 5 Training epoch [ 6 ] Training Acc: 0.4901098901098901\n", + "Selection Epoch 5 Training epoch [ 7 ] Training Acc: 0.5692307692307692\n", + "Selection Epoch 5 Training epoch [ 8 ] Training Acc: 0.6043956043956044\n", + "Selection Epoch 5 Training epoch [ 9 ] Training Acc: 0.6263736263736264\n", + "Selection Epoch 5 Training epoch [ 10 ] Training Acc: 0.6835164835164835\n", + "Selection Epoch 5 Training epoch [ 11 ] Training Acc: 0.589010989010989\n", + "Selection Epoch 5 Training epoch [ 12 ] Training Acc: 0.7054945054945055\n", + "Selection Epoch 5 Training epoch [ 13 ] Training Acc: 0.7318681318681318\n", + "Selection Epoch 5 Training epoch [ 14 ] Training Acc: 0.7868131868131868\n", + "Selection Epoch 5 Training epoch [ 15 ] Training Acc: 0.7670329670329671\n", + "Selection Epoch 5 Training epoch [ 16 ] Training Acc: 0.7494505494505495\n", + "Selection Epoch 5 Training epoch [ 17 ] Training Acc: 0.46813186813186813\n", + "Selection Epoch 5 Training epoch [ 18 ] Training Acc: 0.621978021978022\n", + "Selection Epoch 5 Training epoch [ 19 ] Training Acc: 0.7406593406593407\n", + "Selection Epoch 5 Training epoch [ 20 ] Training Acc: 0.8417582417582418\n", + "Selection Epoch 5 Training epoch [ 21 ] Training Acc: 0.8505494505494505\n", + "Selection Epoch 5 Training epoch [ 22 ] Training Acc: 0.8747252747252747\n", + "Selection Epoch 5 Training epoch [ 23 ] Training Acc: 0.9296703296703297\n", + "Selection Epoch 5 Training epoch [ 24 ] Training Acc: 0.9560439560439561\n", + "Selection Epoch 5 Training epoch [ 25 ] Training Acc: 0.9582417582417583\n", + "Selection Epoch 5 Training epoch [ 26 ] Training Acc: 0.8725274725274725\n", + "Selection Epoch 5 Training epoch [ 27 ] Training Acc: 0.9472527472527472\n", + "Selection Epoch 5 Training epoch [ 28 ] Training Acc: 0.8505494505494505\n", + "Selection Epoch 5 Training epoch [ 29 ] Training Acc: 0.9758241758241758\n", + "Selection Epoch 5 Training epoch [ 30 ] Training Acc: 0.9318681318681319\n", + "Selection Epoch 5 Training epoch [ 31 ] Training Acc: 0.9736263736263736\n", + "Selection Epoch 5 Training epoch [ 32 ] Training Acc: 0.9824175824175824\n", + "Selection Epoch 5 Training epoch [ 33 ] Training Acc: 0.9868131868131869\n", + "Selection Epoch 5 Training epoch [ 34 ] Training Acc: 0.9934065934065934\n", + "Epoch: 6 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.5745432096300647 0.9934065934065934 0.16478925943374634 0.9571984435797666 7.188232898712158 0.47346938775510206 113.22043752670288\n", + "AL epoch: 6\n", + "val, test error% for class 0 : 2.08 45.71\n", + "val, test error% for class 1 : 8.33 65.71\n", + "val, test error% for class 2 : 2.44 65.71\n", + "val, test error% for class 3 : 13.33 62.86\n", + "val, test error% for class 4 : 2.5 54.29\n", + "val, test error% for class 5 : 2.04 37.14\n", + "val, test error% for class 6 : 7.14 37.14\n", + "51 / 70 idc points.\n", + "selEpoch: 6, Selection Ended at: 2022-01-30 20:19:11.176896\n", + "51 5651 5721\n", + "After augmentation, size of train_set: 525 lake set: 5651 val set: 308\n", + "Selection Epoch 6 Training epoch [ 1 ] Training Acc: 0.09142857142857143\n", + "Selection Epoch 6 Training epoch [ 2 ] Training Acc: 0.5066666666666667\n", + "Selection Epoch 6 Training epoch [ 3 ] Training Acc: 0.45904761904761904\n", + "Selection Epoch 6 Training epoch [ 4 ] Training Acc: 0.4380952380952381\n", + "Selection Epoch 6 Training epoch [ 5 ] Training Acc: 0.4666666666666667\n", + "Selection Epoch 6 Training epoch [ 6 ] Training Acc: 0.5485714285714286\n", + "Selection Epoch 6 Training epoch [ 7 ] Training Acc: 0.5295238095238095\n", + "Selection Epoch 6 Training epoch [ 8 ] Training Acc: 0.539047619047619\n", + "Selection Epoch 6 Training epoch [ 9 ] Training Acc: 0.5295238095238095\n", + "Selection Epoch 6 Training epoch [ 10 ] Training Acc: 0.56\n", + "Selection Epoch 6 Training epoch [ 11 ] Training Acc: 0.5866666666666667\n", + "Selection Epoch 6 Training epoch [ 12 ] Training Acc: 0.6361904761904762\n", + "Selection Epoch 6 Training epoch [ 13 ] Training Acc: 0.5771428571428572\n", + "Selection Epoch 6 Training epoch [ 14 ] Training Acc: 0.638095238095238\n", + "Selection Epoch 6 Training epoch [ 15 ] Training Acc: 0.5828571428571429\n", + "Selection Epoch 6 Training epoch [ 16 ] Training Acc: 0.6419047619047619\n", + "Selection Epoch 6 Training epoch [ 17 ] Training Acc: 0.7485714285714286\n", + "Selection Epoch 6 Training epoch [ 18 ] Training Acc: 0.7085714285714285\n", + "Selection Epoch 6 Training epoch [ 19 ] Training Acc: 0.7542857142857143\n", + "Selection Epoch 6 Training epoch [ 20 ] Training Acc: 0.7657142857142857\n", + "Selection Epoch 6 Training epoch [ 21 ] Training Acc: 0.8323809523809523\n", + "Selection Epoch 6 Training epoch [ 22 ] Training Acc: 0.8838095238095238\n", + "Selection Epoch 6 Training epoch [ 23 ] Training Acc: 0.7180952380952381\n", + "Selection Epoch 6 Training epoch [ 24 ] Training Acc: 0.7504761904761905\n", + "Selection Epoch 6 Training epoch [ 25 ] Training Acc: 0.8704761904761905\n", + "Selection Epoch 6 Training epoch [ 26 ] Training Acc: 0.8914285714285715\n", + "Selection Epoch 6 Training epoch [ 27 ] Training Acc: 0.9085714285714286\n", + "Selection Epoch 6 Training epoch [ 28 ] Training Acc: 0.8971428571428571\n", + "Selection Epoch 6 Training epoch [ 29 ] Training Acc: 0.9219047619047619\n", + "Selection Epoch 6 Training epoch [ 30 ] Training Acc: 0.9180952380952381\n", + "Selection Epoch 6 Training epoch [ 31 ] Training Acc: 0.9466666666666667\n", + "Selection Epoch 6 Training epoch [ 32 ] Training Acc: 0.9314285714285714\n", + "Selection Epoch 6 Training epoch [ 33 ] Training Acc: 0.9657142857142857\n", + "Selection Epoch 6 Training epoch [ 34 ] Training Acc: 0.92\n", + "Selection Epoch 6 Training epoch [ 35 ] Training Acc: 0.9638095238095238\n", + "Selection Epoch 6 Training epoch [ 36 ] Training Acc: 0.9238095238095239\n", + "Selection Epoch 6 Training epoch [ 37 ] Training Acc: 0.9752380952380952\n", + "Selection Epoch 6 Training epoch [ 38 ] Training Acc: 0.9657142857142857\n", + "Selection Epoch 6 Training epoch [ 39 ] Training Acc: 0.9523809523809523\n", + "Selection Epoch 6 Training epoch [ 40 ] Training Acc: 0.9676190476190476\n", + "Selection Epoch 6 Training epoch [ 41 ] Training Acc: 0.9752380952380952\n", + "Selection Epoch 6 Training epoch [ 42 ] Training Acc: 0.9980952380952381\n", + "Epoch: 7 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.3374885182129219 0.9980952380952381 0.10341165959835052 0.974025974025974 6.324185013771057 0.5224489795918368 159.01512026786804\n", + "AL epoch: 7\n", + "val, test error% for class 0 : 0.0 28.57\n", + "val, test error% for class 1 : 2.38 54.29\n", + "val, test error% for class 2 : 2.17 62.86\n", + "val, test error% for class 3 : 5.26 57.14\n", + "val, test error% for class 4 : 4.0 51.43\n", + "val, test error% for class 5 : 1.72 40.0\n", + "val, test error% for class 6 : 6.25 40.0\n", + "63 / 70 idc points.\n", + "selEpoch: 7, Selection Ended at: 2022-01-30 20:22:19.180366\n", + "63 5581 5651\n", + "After augmentation, size of train_set: 595 lake set: 5581 val set: 371\n", + "Selection Epoch 7 Training epoch [ 1 ] Training Acc: 0.292436974789916\n", + "Selection Epoch 7 Training epoch [ 2 ] Training Acc: 0.4050420168067227\n", + "Selection Epoch 7 Training epoch [ 3 ] Training Acc: 0.4756302521008403\n", + "Selection Epoch 7 Training epoch [ 4 ] Training Acc: 0.5478991596638656\n", + "Selection Epoch 7 Training epoch [ 5 ] Training Acc: 0.4773109243697479\n", + "Selection Epoch 7 Training epoch [ 6 ] Training Acc: 0.5949579831932773\n", + "Selection Epoch 7 Training epoch [ 7 ] Training Acc: 0.626890756302521\n", + "Selection Epoch 7 Training epoch [ 8 ] Training Acc: 0.6184873949579832\n", + "Selection Epoch 7 Training epoch [ 9 ] Training Acc: 0.6672268907563025\n", + "Selection Epoch 7 Training epoch [ 10 ] Training Acc: 0.6957983193277311\n", + "Selection Epoch 7 Training epoch [ 11 ] Training Acc: 0.7596638655462185\n", + "Selection Epoch 7 Training epoch [ 12 ] Training Acc: 0.7109243697478992\n", + "Selection Epoch 7 Training epoch [ 13 ] Training Acc: 0.7277310924369748\n", + "Selection Epoch 7 Training epoch [ 14 ] Training Acc: 0.7899159663865546\n", + "Selection Epoch 7 Training epoch [ 15 ] Training Acc: 0.7966386554621848\n", + "Selection Epoch 7 Training epoch [ 16 ] Training Acc: 0.9025210084033614\n", + "Selection Epoch 7 Training epoch [ 17 ] Training Acc: 0.9310924369747899\n", + "Selection Epoch 7 Training epoch [ 18 ] Training Acc: 0.7865546218487395\n", + "Selection Epoch 7 Training epoch [ 19 ] Training Acc: 0.761344537815126\n", + "Selection Epoch 7 Training epoch [ 20 ] Training Acc: 0.9613445378151261\n", + "Selection Epoch 7 Training epoch [ 21 ] Training Acc: 0.9378151260504202\n", + "Selection Epoch 7 Training epoch [ 22 ] Training Acc: 0.9764705882352941\n", + "Selection Epoch 7 Training epoch [ 23 ] Training Acc: 0.9915966386554622\n", + "Epoch: 8 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.8138716445537284 0.9915966386554622 0.14173361659049988 0.967654986522911 6.149673223495483 0.49795918367346936 98.56460404396057\n", + "AL epoch: 8\n", + "val, test error% for class 0 : 1.45 37.14\n", + "val, test error% for class 1 : 1.85 57.14\n", + "val, test error% for class 2 : 3.45 68.57\n", + "val, test error% for class 3 : 14.81 82.86\n", + "val, test error% for class 4 : 3.17 57.14\n", + "val, test error% for class 5 : 1.54 37.14\n", + "val, test error% for class 6 : 2.86 11.43\n", + "57 / 70 idc points.\n", + "selEpoch: 8, Selection Ended at: 2022-01-30 20:24:26.357928\n", + "57 5511 5581\n", + "After augmentation, size of train_set: 665 lake set: 5511 val set: 428\n", + "Selection Epoch 8 Training epoch [ 1 ] Training Acc: 0.22255639097744362\n", + "Selection Epoch 8 Training epoch [ 2 ] Training Acc: 0.45112781954887216\n", + "Selection Epoch 8 Training epoch [ 3 ] Training Acc: 0.46466165413533833\n", + "Selection Epoch 8 Training epoch [ 4 ] Training Acc: 0.5022556390977444\n", + "Selection Epoch 8 Training epoch [ 5 ] Training Acc: 0.5037593984962406\n", + "Selection Epoch 8 Training epoch [ 6 ] Training Acc: 0.5428571428571428\n", + "Selection Epoch 8 Training epoch [ 7 ] Training Acc: 0.4932330827067669\n", + "Selection Epoch 8 Training epoch [ 8 ] Training Acc: 0.37593984962406013\n", + "Selection Epoch 8 Training epoch [ 9 ] Training Acc: 0.5067669172932331\n", + "Selection Epoch 8 Training epoch [ 10 ] Training Acc: 0.5578947368421052\n", + "Selection Epoch 8 Training epoch [ 11 ] Training Acc: 0.5759398496240602\n", + "Selection Epoch 8 Training epoch [ 12 ] Training Acc: 0.5338345864661654\n", + "Selection Epoch 8 Training epoch [ 13 ] Training Acc: 0.6721804511278195\n", + "Selection Epoch 8 Training epoch [ 14 ] Training Acc: 0.6827067669172933\n", + "Selection Epoch 8 Training epoch [ 15 ] Training Acc: 0.7654135338345864\n", + "Selection Epoch 8 Training epoch [ 16 ] Training Acc: 0.7338345864661654\n", + "Selection Epoch 8 Training epoch [ 17 ] Training Acc: 0.7819548872180451\n", + "Selection Epoch 8 Training epoch [ 18 ] Training Acc: 0.8030075187969925\n", + "Selection Epoch 8 Training epoch [ 19 ] Training Acc: 0.806015037593985\n", + "Selection Epoch 8 Training epoch [ 20 ] Training Acc: 0.8751879699248121\n", + "Selection Epoch 8 Training epoch [ 21 ] Training Acc: 0.8616541353383459\n", + "Selection Epoch 8 Training epoch [ 22 ] Training Acc: 0.825563909774436\n", + "Selection Epoch 8 Training epoch [ 23 ] Training Acc: 0.8842105263157894\n", + "Selection Epoch 8 Training epoch [ 24 ] Training Acc: 0.8466165413533835\n", + "Selection Epoch 8 Training epoch [ 25 ] Training Acc: 0.9233082706766917\n", + "Selection Epoch 8 Training epoch [ 26 ] Training Acc: 0.8962406015037594\n", + "Selection Epoch 8 Training epoch [ 27 ] Training Acc: 0.9007518796992481\n", + "Selection Epoch 8 Training epoch [ 28 ] Training Acc: 0.956390977443609\n", + "Selection Epoch 8 Training epoch [ 29 ] Training Acc: 0.9593984962406015\n", + "Selection Epoch 8 Training epoch [ 30 ] Training Acc: 0.9864661654135338\n", + "Selection Epoch 8 Training epoch [ 31 ] Training Acc: 0.9849624060150376\n", + "Selection Epoch 8 Training epoch [ 32 ] Training Acc: 0.9398496240601504\n", + "Selection Epoch 8 Training epoch [ 33 ] Training Acc: 0.9654135338345865\n", + "Selection Epoch 8 Training epoch [ 34 ] Training Acc: 0.912781954887218\n", + "Selection Epoch 8 Training epoch [ 35 ] Training Acc: 0.9789473684210527\n", + "Selection Epoch 8 Training epoch [ 36 ] Training Acc: 0.9097744360902256\n", + "Selection Epoch 8 Training epoch [ 37 ] Training Acc: 0.9759398496240601\n", + "Selection Epoch 8 Training epoch [ 38 ] Training Acc: 0.98796992481203\n", + "Selection Epoch 8 Training epoch [ 39 ] Training Acc: 0.9924812030075187\n", + "Epoch: 9 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.547302422579378 0.9924812030075187 0.15514717996120453 0.9766355140186916 7.214998006820679 0.4326530612244898 184.95212149620056\n", + "AL epoch: 9\n", + "val, test error% for class 0 : 1.32 48.57\n", + "val, test error% for class 1 : 1.54 54.29\n", + "val, test error% for class 2 : 1.37 80.0\n", + "val, test error% for class 3 : 3.03 65.71\n", + "val, test error% for class 4 : 1.33 57.14\n", + "val, test error% for class 5 : 0.0 40.0\n", + "val, test error% for class 6 : 13.51 51.43\n", + "68 / 70 idc points.\n", + "selEpoch: 9, Selection Ended at: 2022-01-30 20:27:58.722328\n", + "68 5441 5511\n", + "After augmentation, size of train_set: 735 lake set: 5441 val set: 496\n", + "Selection Epoch 9 Training epoch [ 1 ] Training Acc: 0.2979591836734694\n", + "Selection Epoch 9 Training epoch [ 2 ] Training Acc: 0.3727891156462585\n", + "Selection Epoch 9 Training epoch [ 3 ] Training Acc: 0.41904761904761906\n", + "Selection Epoch 9 Training epoch [ 4 ] Training Acc: 0.4448979591836735\n", + "Selection Epoch 9 Training epoch [ 5 ] Training Acc: 0.49523809523809526\n", + "Selection Epoch 9 Training epoch [ 6 ] Training Acc: 0.4448979591836735\n", + "Selection Epoch 9 Training epoch [ 7 ] Training Acc: 0.5170068027210885\n", + "Selection Epoch 9 Training epoch [ 8 ] Training Acc: 0.5877551020408164\n", + "Selection Epoch 9 Training epoch [ 9 ] Training Acc: 0.49387755102040815\n", + "Selection Epoch 9 Training epoch [ 10 ] Training Acc: 0.5904761904761905\n", + "Selection Epoch 9 Training epoch [ 11 ] Training Acc: 0.6163265306122448\n", + "Selection Epoch 9 Training epoch [ 12 ] Training Acc: 0.617687074829932\n", + "Selection Epoch 9 Training epoch [ 13 ] Training Acc: 0.5918367346938775\n", + "Selection Epoch 9 Training epoch [ 14 ] Training Acc: 0.6761904761904762\n", + "Selection Epoch 9 Training epoch [ 15 ] Training Acc: 0.5659863945578232\n", + "Selection Epoch 9 Training epoch [ 16 ] Training Acc: 0.6952380952380952\n", + "Selection Epoch 9 Training epoch [ 17 ] Training Acc: 0.708843537414966\n", + "Selection Epoch 9 Training epoch [ 18 ] Training Acc: 0.7918367346938775\n", + "Selection Epoch 9 Training epoch [ 19 ] Training Acc: 0.7183673469387755\n", + "Selection Epoch 9 Training epoch [ 20 ] Training Acc: 0.7564625850340136\n", + "Selection Epoch 9 Training epoch [ 21 ] Training Acc: 0.8979591836734694\n", + "Selection Epoch 9 Training epoch [ 22 ] Training Acc: 0.8122448979591836\n", + "Selection Epoch 9 Training epoch [ 23 ] Training Acc: 0.8993197278911564\n", + "Selection Epoch 9 Training epoch [ 24 ] Training Acc: 0.927891156462585\n", + "Selection Epoch 9 Training epoch [ 25 ] Training Acc: 0.9360544217687075\n", + "Selection Epoch 9 Training epoch [ 26 ] Training Acc: 0.8612244897959184\n", + "Selection Epoch 9 Training epoch [ 27 ] Training Acc: 0.9319727891156463\n", + "Selection Epoch 9 Training epoch [ 28 ] Training Acc: 0.9034013605442177\n", + "Selection Epoch 9 Training epoch [ 29 ] Training Acc: 0.9768707482993197\n", + "Selection Epoch 9 Training epoch [ 30 ] Training Acc: 0.9823129251700681\n", + "Selection Epoch 9 Training epoch [ 31 ] Training Acc: 0.8829931972789116\n", + "Selection Epoch 9 Training epoch [ 32 ] Training Acc: 0.9414965986394558\n", + "Selection Epoch 9 Training epoch [ 33 ] Training Acc: 0.9523809523809523\n", + "Selection Epoch 9 Training epoch [ 34 ] Training Acc: 0.9904761904761905\n", + "Epoch: 10 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.3548264253186062 0.9904761904761905 0.12653622031211853 0.9717741935483871 6.614524483680725 0.5428571428571428 182.25668573379517\n", + "val, test error% for class 0 : 1.19 22.86\n", + "val, test error% for class 1 : 7.69 57.14\n", + "val, test error% for class 2 : 1.14 57.14\n", + "val, test error% for class 3 : 2.7 54.29\n", + "val, test error% for class 4 : 2.3 54.29\n", + "val, test error% for class 5 : 2.53 45.71\n", + "val, test error% for class 6 : 2.33 28.57\n", + "[[57.14, 68.57, 94.29, 62.86, 40.0, 71.43, 65.71, 65.71428571428571], [77.14, 62.86, 62.86, 57.14, 82.86, 80.0, 74.29, 71.02142857142857], [71.43, 51.43, 85.71, 68.57, 62.86, 42.86, 77.14, 65.71428571428571], [60.0, 54.29, 71.43, 74.29, 71.43, 51.43, 60.0, 63.26714285714286], [40.0, 51.43, 57.14, 97.14, 62.86, 28.57, 42.86, 54.285714285714285], [45.71, 65.71, 65.71, 62.86, 54.29, 37.14, 37.14, 52.651428571428575], [28.57, 54.29, 62.86, 57.14, 51.43, 40.0, 40.0, 47.75571428571429], [37.14, 57.14, 68.57, 82.86, 57.14, 37.14, 11.43, 50.202857142857134], [48.57, 54.29, 80.0, 65.71, 57.14, 40.0, 51.43, 56.73428571428571], [22.86, 57.14, 57.14, 54.29, 54.29, 45.71, 28.57, 45.71428571428571]]\n", + "\n", + "\n" + ] + } + ], + "source": [ + "train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, \"AL\",'badge')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Random" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "random random\n", + "num ood samples: 5000\n", + "Custom dataset stats: Train size: 105 Val size: 14 Lake size: 6071 Test set: 245\n", + "selected classes are: [0 1 2 3 4 5 6]\n", + "Saving results to: ./SMI_active_learning_results/dermamnist/ood/random/70/fkna_3\n", + "dermamnist_ood_random_7_random_budget:70_epochs:10_linear:True_runsfkna_3\n", + "AL epoch: 0\n", + "initial training epoch\n", + "Init model loaded from disk, skipping init training: /mnt/data2/akshit/Derma/weights/dermamnist_ood_ResNet18_0.01_15_2_7\n", + "AL epoch: 1\n", + "val, test error% for class 0 : 0.0 57.14\n", + "val, test error% for class 1 : 100.0 68.57\n", + "val, test error% for class 2 : 100.0 94.29\n", + "val, test error% for class 3 : 0.0 62.86\n", + "val, test error% for class 4 : 100.0 40.0\n", + "val, test error% for class 5 : 100.0 71.43\n", + "val, test error% for class 6 : 100.0 65.71\n", + "6 / 70 idc points.\n", + "selEpoch: 1, Selection Ended at: 2022-01-30 20:31:03.720899\n", + "6 6001 6071\n", + "After augmentation, size of train_set: 175 lake set: 6001 val set: 20\n", + "Selection Epoch 1 Training epoch [ 1 ] Training Acc: 0.4057142857142857\n", + "Selection Epoch 1 Training epoch [ 2 ] Training Acc: 0.4228571428571429\n", + "Selection Epoch 1 Training epoch [ 3 ] Training Acc: 0.45714285714285713\n", + "Selection Epoch 1 Training epoch [ 4 ] Training Acc: 0.49714285714285716\n", + "Selection Epoch 1 Training epoch [ 5 ] Training Acc: 0.5885714285714285\n", + "Selection Epoch 1 Training epoch [ 6 ] Training Acc: 0.5542857142857143\n", + "Selection Epoch 1 Training epoch [ 7 ] Training Acc: 0.6\n", + "Selection Epoch 1 Training epoch [ 8 ] Training Acc: 0.6285714285714286\n", + "Selection Epoch 1 Training epoch [ 9 ] Training Acc: 0.6514285714285715\n", + "Selection Epoch 1 Training epoch [ 10 ] Training Acc: 0.72\n", + "Selection Epoch 1 Training epoch [ 11 ] Training Acc: 0.7257142857142858\n", + "Selection Epoch 1 Training epoch [ 12 ] Training Acc: 0.6971428571428572\n", + "Selection Epoch 1 Training epoch [ 13 ] Training Acc: 0.7085714285714285\n", + "Selection Epoch 1 Training epoch [ 14 ] Training Acc: 0.7485714285714286\n", + "Selection Epoch 1 Training epoch [ 15 ] Training Acc: 0.7942857142857143\n", + "Selection Epoch 1 Training epoch [ 16 ] Training Acc: 0.8114285714285714\n", + "Selection Epoch 1 Training epoch [ 17 ] Training Acc: 0.84\n", + "Selection Epoch 1 Training epoch [ 18 ] Training Acc: 0.8628571428571429\n", + "Selection Epoch 1 Training epoch [ 19 ] Training Acc: 0.8571428571428571\n", + "Selection Epoch 1 Training epoch [ 20 ] Training Acc: 0.9371428571428572\n", + "Selection Epoch 1 Training epoch [ 21 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 1 Training epoch [ 22 ] Training Acc: 0.8742857142857143\n", + "Selection Epoch 1 Training epoch [ 23 ] Training Acc: 0.88\n", + "Selection Epoch 1 Training epoch [ 24 ] Training Acc: 0.9257142857142857\n", + "Selection Epoch 1 Training epoch [ 25 ] Training Acc: 0.9485714285714286\n", + "Selection Epoch 1 Training epoch [ 26 ] Training Acc: 0.9085714285714286\n", + "Selection Epoch 1 Training epoch [ 27 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 1 Training epoch [ 28 ] Training Acc: 0.92\n", + "Selection Epoch 1 Training epoch [ 29 ] Training Acc: 0.9657142857142857\n", + "Selection Epoch 1 Training epoch [ 30 ] Training Acc: 0.9771428571428571\n", + "Selection Epoch 1 Training epoch [ 31 ] Training Acc: 0.96\n", + "Selection Epoch 1 Training epoch [ 32 ] Training Acc: 0.9771428571428571\n", + "Selection Epoch 1 Training epoch [ 33 ] Training Acc: 0.9828571428571429\n", + "Selection Epoch 1 Training epoch [ 34 ] Training Acc: 0.9942857142857143\n", + "Epoch: 2 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.5551553089171648 0.9942857142857143 1.8635917901992798 0.55 10.714311599731445 0.2897959183673469 44.20124816894531\n", + "AL epoch: 2\n", + "val, test error% for class 0 : 0.0 71.43\n", + "val, test error% for class 1 : 66.67 71.43\n", + "val, test error% for class 2 : 66.67 88.57\n", + "val, test error% for class 3 : 0.0 45.71\n", + "val, test error% for class 4 : 100.0 68.57\n", + "val, test error% for class 5 : 33.33 62.86\n", + "val, test error% for class 6 : 100.0 88.57\n", + "10 / 70 idc points.\n", + "selEpoch: 2, Selection Ended at: 2022-01-30 20:31:48.360002\n", + "10 5931 6001\n", + "After augmentation, size of train_set: 245 lake set: 5931 val set: 30\n", + "Selection Epoch 2 Training epoch [ 1 ] Training Acc: 0.563265306122449\n", + "Selection Epoch 2 Training epoch [ 2 ] Training Acc: 0.5714285714285714\n", + "Selection Epoch 2 Training epoch [ 3 ] Training Acc: 0.6571428571428571\n", + "Selection Epoch 2 Training epoch [ 4 ] Training Acc: 0.6571428571428571\n", + "Selection Epoch 2 Training epoch [ 5 ] Training Acc: 0.6244897959183674\n", + "Selection Epoch 2 Training epoch [ 6 ] Training Acc: 0.636734693877551\n", + "Selection Epoch 2 Training epoch [ 7 ] Training Acc: 0.6326530612244898\n", + "Selection Epoch 2 Training epoch [ 8 ] Training Acc: 0.6979591836734694\n", + "Selection Epoch 2 Training epoch [ 9 ] Training Acc: 0.6693877551020408\n", + "Selection Epoch 2 Training epoch [ 10 ] Training Acc: 0.7142857142857143\n", + "Selection Epoch 2 Training epoch [ 11 ] Training Acc: 0.726530612244898\n", + "Selection Epoch 2 Training epoch [ 12 ] Training Acc: 0.7346938775510204\n", + "Selection Epoch 2 Training epoch [ 13 ] Training Acc: 0.7510204081632653\n", + "Selection Epoch 2 Training epoch [ 14 ] Training Acc: 0.7551020408163265\n", + "Selection Epoch 2 Training epoch [ 15 ] Training Acc: 0.7673469387755102\n", + "Selection Epoch 2 Training epoch [ 16 ] Training Acc: 0.7877551020408163\n", + "Selection Epoch 2 Training epoch [ 17 ] Training Acc: 0.8\n", + "Selection Epoch 2 Training epoch [ 18 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 2 Training epoch [ 19 ] Training Acc: 0.8367346938775511\n", + "Selection Epoch 2 Training epoch [ 20 ] Training Acc: 0.710204081632653\n", + "Selection Epoch 2 Training epoch [ 21 ] Training Acc: 0.8244897959183674\n", + "Selection Epoch 2 Training epoch [ 22 ] Training Acc: 0.8244897959183674\n", + "Selection Epoch 2 Training epoch [ 23 ] Training Acc: 0.8653061224489796\n", + "Selection Epoch 2 Training epoch [ 24 ] Training Acc: 0.7877551020408163\n", + "Selection Epoch 2 Training epoch [ 25 ] Training Acc: 0.8571428571428571\n", + "Selection Epoch 2 Training epoch [ 26 ] Training Acc: 0.9142857142857143\n", + "Selection Epoch 2 Training epoch [ 27 ] Training Acc: 0.8938775510204081\n", + "Selection Epoch 2 Training epoch [ 28 ] Training Acc: 0.9142857142857143\n", + "Selection Epoch 2 Training epoch [ 29 ] Training Acc: 0.9306122448979591\n", + "Selection Epoch 2 Training epoch [ 30 ] Training Acc: 0.9306122448979591\n", + "Selection Epoch 2 Training epoch [ 31 ] Training Acc: 0.9183673469387755\n", + "Selection Epoch 2 Training epoch [ 32 ] Training Acc: 0.8489795918367347\n", + "Selection Epoch 2 Training epoch [ 33 ] Training Acc: 0.8489795918367347\n", + "Selection Epoch 2 Training epoch [ 34 ] Training Acc: 0.8612244897959184\n", + "Selection Epoch 2 Training epoch [ 35 ] Training Acc: 0.8775510204081632\n", + "Selection Epoch 2 Training epoch [ 36 ] Training Acc: 0.9510204081632653\n", + "Selection Epoch 2 Training epoch [ 37 ] Training Acc: 0.963265306122449\n", + "Selection Epoch 2 Training epoch [ 38 ] Training Acc: 0.9673469387755103\n", + "Selection Epoch 2 Training epoch [ 39 ] Training Acc: 0.9836734693877551\n", + "Selection Epoch 2 Training epoch [ 40 ] Training Acc: 0.9795918367346939\n", + "Selection Epoch 2 Training epoch [ 41 ] Training Acc: 0.9959183673469387\n", + "Epoch: 3 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.4597263210453093 0.9959183673469387 1.3642923831939697 0.6333333333333333 8.862774133682251 0.34285714285714286 72.26628851890564\n", + "AL epoch: 3\n", + "val, test error% for class 0 : 33.33 62.86\n", + "val, test error% for class 1 : 33.33 74.29\n", + "val, test error% for class 2 : 50.0 82.86\n", + "val, test error% for class 3 : 0.0 68.57\n", + "val, test error% for class 4 : 100.0 74.29\n", + "val, test error% for class 5 : 50.0 57.14\n", + "val, test error% for class 6 : 25.0 40.0\n", + "13 / 70 idc points.\n", + "selEpoch: 3, Selection Ended at: 2022-01-30 20:33:01.103703\n", + "13 5861 5931\n", + "After augmentation, size of train_set: 315 lake set: 5861 val set: 43\n", + "Selection Epoch 3 Training epoch [ 1 ] Training Acc: 0.6253968253968254\n", + "Selection Epoch 3 Training epoch [ 2 ] Training Acc: 0.6857142857142857\n", + "Selection Epoch 3 Training epoch [ 3 ] Training Acc: 0.7047619047619048\n", + "Selection Epoch 3 Training epoch [ 4 ] Training Acc: 0.726984126984127\n", + "Selection Epoch 3 Training epoch [ 5 ] Training Acc: 0.746031746031746\n", + "Selection Epoch 3 Training epoch [ 6 ] Training Acc: 0.4793650793650794\n", + "Selection Epoch 3 Training epoch [ 7 ] Training Acc: 0.7206349206349206\n", + "Selection Epoch 3 Training epoch [ 8 ] Training Acc: 0.7111111111111111\n", + "Selection Epoch 3 Training epoch [ 9 ] Training Acc: 0.8031746031746032\n", + "Selection Epoch 3 Training epoch [ 10 ] Training Acc: 0.7206349206349206\n", + "Selection Epoch 3 Training epoch [ 11 ] Training Acc: 0.7777777777777778\n", + "Selection Epoch 3 Training epoch [ 12 ] Training Acc: 0.7873015873015873\n", + "Selection Epoch 3 Training epoch [ 13 ] Training Acc: 0.8444444444444444\n", + "Selection Epoch 3 Training epoch [ 14 ] Training Acc: 0.8825396825396825\n", + "Selection Epoch 3 Training epoch [ 15 ] Training Acc: 0.8825396825396825\n", + "Selection Epoch 3 Training epoch [ 16 ] Training Acc: 0.8380952380952381\n", + "Selection Epoch 3 Training epoch [ 17 ] Training Acc: 0.8317460317460318\n", + "Selection Epoch 3 Training epoch [ 18 ] Training Acc: 0.8380952380952381\n", + "Selection Epoch 3 Training epoch [ 19 ] Training Acc: 0.8888888888888888\n", + "Selection Epoch 3 Training epoch [ 20 ] Training Acc: 0.9238095238095239\n", + "Selection Epoch 3 Training epoch [ 21 ] Training Acc: 0.9047619047619048\n", + "Selection Epoch 3 Training epoch [ 22 ] Training Acc: 0.8507936507936508\n", + "Selection Epoch 3 Training epoch [ 23 ] Training Acc: 0.946031746031746\n", + "Selection Epoch 3 Training epoch [ 24 ] Training Acc: 0.9873015873015873\n", + "Selection Epoch 3 Training epoch [ 25 ] Training Acc: 0.9746031746031746\n", + "Selection Epoch 3 Training epoch [ 26 ] Training Acc: 0.9936507936507937\n", + "Epoch: 4 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.9151976378634572 0.9936507936507937 0.7830438017845154 0.7906976744186046 7.652293920516968 0.39183673469387753 61.684980630874634\n", + "AL epoch: 4\n", + "val, test error% for class 0 : 25.0 57.14\n", + "val, test error% for class 1 : 22.22 77.14\n", + "val, test error% for class 2 : 14.29 77.14\n", + "val, test error% for class 3 : 25.0 71.43\n", + "val, test error% for class 4 : 33.33 62.86\n", + "val, test error% for class 5 : 0.0 22.86\n", + "val, test error% for class 6 : 40.0 57.14\n", + "16 / 70 idc points.\n", + "selEpoch: 4, Selection Ended at: 2022-01-30 20:34:03.423704\n", + "16 5791 5861\n", + "After augmentation, size of train_set: 385 lake set: 5791 val set: 59\n", + "Selection Epoch 4 Training epoch [ 1 ] Training Acc: 0.625974025974026\n", + "Selection Epoch 4 Training epoch [ 2 ] Training Acc: 0.7038961038961039\n", + "Selection Epoch 4 Training epoch [ 3 ] Training Acc: 0.6363636363636364\n", + "Selection Epoch 4 Training epoch [ 4 ] Training Acc: 0.6831168831168831\n", + "Selection Epoch 4 Training epoch [ 5 ] Training Acc: 0.6935064935064935\n", + "Selection Epoch 4 Training epoch [ 6 ] Training Acc: 0.7220779220779221\n", + "Selection Epoch 4 Training epoch [ 7 ] Training Acc: 0.7272727272727273\n", + "Selection Epoch 4 Training epoch [ 8 ] Training Acc: 0.7324675324675325\n", + "Selection Epoch 4 Training epoch [ 9 ] Training Acc: 0.7220779220779221\n", + "Selection Epoch 4 Training epoch [ 10 ] Training Acc: 0.7142857142857143\n", + "Selection Epoch 4 Training epoch [ 11 ] Training Acc: 0.7350649350649351\n", + "Selection Epoch 4 Training epoch [ 12 ] Training Acc: 0.7376623376623377\n", + "Selection Epoch 4 Training epoch [ 13 ] Training Acc: 0.7662337662337663\n", + "Selection Epoch 4 Training epoch [ 14 ] Training Acc: 0.7792207792207793\n", + "Selection Epoch 4 Training epoch [ 15 ] Training Acc: 0.7792207792207793\n", + "Selection Epoch 4 Training epoch [ 16 ] Training Acc: 0.787012987012987\n", + "Selection Epoch 4 Training epoch [ 17 ] Training Acc: 0.7662337662337663\n", + "Selection Epoch 4 Training epoch [ 18 ] Training Acc: 0.7740259740259741\n", + "Selection Epoch 4 Training epoch [ 19 ] Training Acc: 0.7792207792207793\n", + "Selection Epoch 4 Training epoch [ 20 ] Training Acc: 0.8233766233766234\n", + "Selection Epoch 4 Training epoch [ 21 ] Training Acc: 0.7922077922077922\n", + "Selection Epoch 4 Training epoch [ 22 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 4 Training epoch [ 23 ] Training Acc: 0.8155844155844156\n", + "Selection Epoch 4 Training epoch [ 24 ] Training Acc: 0.8103896103896104\n", + "Selection Epoch 4 Training epoch [ 25 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 4 Training epoch [ 26 ] Training Acc: 0.7324675324675325\n", + "Selection Epoch 4 Training epoch [ 27 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 4 Training epoch [ 28 ] Training Acc: 0.8441558441558441\n", + "Selection Epoch 4 Training epoch [ 29 ] Training Acc: 0.8545454545454545\n", + "Selection Epoch 4 Training epoch [ 30 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 4 Training epoch [ 31 ] Training Acc: 0.8961038961038961\n", + "Selection Epoch 4 Training epoch [ 32 ] Training Acc: 0.8727272727272727\n", + "Selection Epoch 4 Training epoch [ 33 ] Training Acc: 0.8987012987012987\n", + "Selection Epoch 4 Training epoch [ 34 ] Training Acc: 0.9506493506493506\n", + "Selection Epoch 4 Training epoch [ 35 ] Training Acc: 0.9142857142857143\n", + "Selection Epoch 4 Training epoch [ 36 ] Training Acc: 0.9402597402597402\n", + "Selection Epoch 4 Training epoch [ 37 ] Training Acc: 0.961038961038961\n", + "Selection Epoch 4 Training epoch [ 38 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 4 Training epoch [ 39 ] Training Acc: 0.9506493506493506\n", + "Selection Epoch 4 Training epoch [ 40 ] Training Acc: 0.9532467532467532\n", + "Selection Epoch 4 Training epoch [ 41 ] Training Acc: 0.9636363636363636\n", + "Selection Epoch 4 Training epoch [ 42 ] Training Acc: 0.9766233766233766\n", + "Selection Epoch 4 Training epoch [ 43 ] Training Acc: 0.9506493506493506\n", + "Selection Epoch 4 Training epoch [ 44 ] Training Acc: 0.8935064935064935\n", + "Selection Epoch 4 Training epoch [ 45 ] Training Acc: 0.9168831168831169\n", + "Selection Epoch 4 Training epoch [ 46 ] Training Acc: 0.9922077922077922\n", + "Epoch: 5 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 2.939487539231777 0.9922077922077922 0.6734365224838257 0.8813559322033898 5.66205370426178 0.4 128.92210602760315\n", + "AL epoch: 5\n", + "val, test error% for class 0 : 18.18 82.86\n", + "val, test error% for class 1 : 0.0 34.29\n", + "val, test error% for class 2 : 22.22 88.57\n", + "val, test error% for class 3 : 0.0 62.86\n", + "val, test error% for class 4 : 25.0 65.71\n", + "val, test error% for class 5 : 9.09 34.29\n", + "val, test error% for class 6 : 16.67 51.43\n", + "6 / 70 idc points.\n", + "selEpoch: 5, Selection Ended at: 2022-01-30 20:36:12.792810\n", + "6 5721 5791\n", + "After augmentation, size of train_set: 455 lake set: 5721 val set: 65\n", + "Selection Epoch 5 Training epoch [ 1 ] Training Acc: 0.6835164835164835\n", + "Selection Epoch 5 Training epoch [ 2 ] Training Acc: 0.7296703296703296\n", + "Selection Epoch 5 Training epoch [ 3 ] Training Acc: 0.7692307692307693\n", + "Selection Epoch 5 Training epoch [ 4 ] Training Acc: 0.7758241758241758\n", + "Selection Epoch 5 Training epoch [ 5 ] Training Acc: 0.7384615384615385\n", + "Selection Epoch 5 Training epoch [ 6 ] Training Acc: 0.8043956043956044\n", + "Selection Epoch 5 Training epoch [ 7 ] Training Acc: 0.7538461538461538\n", + "Selection Epoch 5 Training epoch [ 8 ] Training Acc: 0.7824175824175824\n", + "Selection Epoch 5 Training epoch [ 9 ] Training Acc: 0.778021978021978\n", + "Selection Epoch 5 Training epoch [ 10 ] Training Acc: 0.7648351648351648\n", + "Selection Epoch 5 Training epoch [ 11 ] Training Acc: 0.7956043956043956\n", + "Selection Epoch 5 Training epoch [ 12 ] Training Acc: 0.7032967032967034\n", + "Selection Epoch 5 Training epoch [ 13 ] Training Acc: 0.7758241758241758\n", + "Selection Epoch 5 Training epoch [ 14 ] Training Acc: 0.7802197802197802\n", + "Selection Epoch 5 Training epoch [ 15 ] Training Acc: 0.7956043956043956\n", + "Selection Epoch 5 Training epoch [ 16 ] Training Acc: 0.8175824175824176\n", + "Selection Epoch 5 Training epoch [ 17 ] Training Acc: 0.8307692307692308\n", + "Selection Epoch 5 Training epoch [ 18 ] Training Acc: 0.832967032967033\n", + "Selection Epoch 5 Training epoch [ 19 ] Training Acc: 0.8549450549450549\n", + "Selection Epoch 5 Training epoch [ 20 ] Training Acc: 0.8549450549450549\n", + "Selection Epoch 5 Training epoch [ 21 ] Training Acc: 0.8549450549450549\n", + "Selection Epoch 5 Training epoch [ 22 ] Training Acc: 0.9010989010989011\n", + "Selection Epoch 5 Training epoch [ 23 ] Training Acc: 0.9054945054945055\n", + "Selection Epoch 5 Training epoch [ 24 ] Training Acc: 0.9010989010989011\n", + "Selection Epoch 5 Training epoch [ 25 ] Training Acc: 0.8901098901098901\n", + "Selection Epoch 5 Training epoch [ 26 ] Training Acc: 0.9208791208791208\n", + "Selection Epoch 5 Training epoch [ 27 ] Training Acc: 0.9362637362637363\n", + "Selection Epoch 5 Training epoch [ 28 ] Training Acc: 0.9142857142857143\n", + "Selection Epoch 5 Training epoch [ 29 ] Training Acc: 0.945054945054945\n", + "Selection Epoch 5 Training epoch [ 30 ] Training Acc: 0.9538461538461539\n", + "Selection Epoch 5 Training epoch [ 31 ] Training Acc: 0.9384615384615385\n", + "Selection Epoch 5 Training epoch [ 32 ] Training Acc: 0.9692307692307692\n", + "Selection Epoch 5 Training epoch [ 33 ] Training Acc: 0.978021978021978\n", + "Selection Epoch 5 Training epoch [ 34 ] Training Acc: 0.978021978021978\n", + "Selection Epoch 5 Training epoch [ 35 ] Training Acc: 0.967032967032967\n", + "Selection Epoch 5 Training epoch [ 36 ] Training Acc: 0.9934065934065934\n", + "Epoch: 6 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.357813410460949 0.9934065934065934 0.48478493094444275 0.8923076923076924 6.389882206916809 0.3673469387755102 121.99211311340332\n", + "AL epoch: 6\n", + "val, test error% for class 0 : 15.38 68.57\n", + "val, test error% for class 1 : 0.0 54.29\n", + "val, test error% for class 2 : 11.11 74.29\n", + "val, test error% for class 3 : 28.57 74.29\n", + "val, test error% for class 4 : 16.67 77.14\n", + "val, test error% for class 5 : 0.0 48.57\n", + "val, test error% for class 6 : 14.29 45.71\n", + "11 / 70 idc points.\n", + "selEpoch: 6, Selection Ended at: 2022-01-30 20:38:15.228773\n", + "11 5651 5721\n", + "After augmentation, size of train_set: 525 lake set: 5651 val set: 76\n", + "Selection Epoch 6 Training epoch [ 1 ] Training Acc: 0.72\n", + "Selection Epoch 6 Training epoch [ 2 ] Training Acc: 0.7657142857142857\n", + "Selection Epoch 6 Training epoch [ 3 ] Training Acc: 0.7904761904761904\n", + "Selection Epoch 6 Training epoch [ 4 ] Training Acc: 0.7771428571428571\n", + "Selection Epoch 6 Training epoch [ 5 ] Training Acc: 0.780952380952381\n", + "Selection Epoch 6 Training epoch [ 6 ] Training Acc: 0.6971428571428572\n", + "Selection Epoch 6 Training epoch [ 7 ] Training Acc: 0.7523809523809524\n", + "Selection Epoch 6 Training epoch [ 8 ] Training Acc: 0.7504761904761905\n", + "Selection Epoch 6 Training epoch [ 9 ] Training Acc: 0.7276190476190476\n", + "Selection Epoch 6 Training epoch [ 10 ] Training Acc: 0.7638095238095238\n", + "Selection Epoch 6 Training epoch [ 11 ] Training Acc: 0.780952380952381\n", + "Selection Epoch 6 Training epoch [ 12 ] Training Acc: 0.7638095238095238\n", + "Selection Epoch 6 Training epoch [ 13 ] Training Acc: 0.7676190476190476\n", + "Selection Epoch 6 Training epoch [ 14 ] Training Acc: 0.7942857142857143\n", + "Selection Epoch 6 Training epoch [ 15 ] Training Acc: 0.8171428571428572\n", + "Selection Epoch 6 Training epoch [ 16 ] Training Acc: 0.7866666666666666\n", + "Selection Epoch 6 Training epoch [ 17 ] Training Acc: 0.8152380952380952\n", + "Selection Epoch 6 Training epoch [ 18 ] Training Acc: 0.7885714285714286\n", + "Selection Epoch 6 Training epoch [ 19 ] Training Acc: 0.8133333333333334\n", + "Selection Epoch 6 Training epoch [ 20 ] Training Acc: 0.7961904761904762\n", + "Selection Epoch 6 Training epoch [ 21 ] Training Acc: 0.8228571428571428\n", + "Selection Epoch 6 Training epoch [ 22 ] Training Acc: 0.820952380952381\n", + "Selection Epoch 6 Training epoch [ 23 ] Training Acc: 0.8228571428571428\n", + "Selection Epoch 6 Training epoch [ 24 ] Training Acc: 0.8380952380952381\n", + "Selection Epoch 6 Training epoch [ 25 ] Training Acc: 0.8076190476190476\n", + "Selection Epoch 6 Training epoch [ 26 ] Training Acc: 0.8380952380952381\n", + "Selection Epoch 6 Training epoch [ 27 ] Training Acc: 0.8495238095238096\n", + "Selection Epoch 6 Training epoch [ 28 ] Training Acc: 0.8666666666666667\n", + "Selection Epoch 6 Training epoch [ 29 ] Training Acc: 0.878095238095238\n", + "Selection Epoch 6 Training epoch [ 30 ] Training Acc: 0.8723809523809524\n", + "Selection Epoch 6 Training epoch [ 31 ] Training Acc: 0.8647619047619047\n", + "Selection Epoch 6 Training epoch [ 32 ] Training Acc: 0.8952380952380953\n", + "Selection Epoch 6 Training epoch [ 33 ] Training Acc: 0.9104761904761904\n", + "Selection Epoch 6 Training epoch [ 34 ] Training Acc: 0.8933333333333333\n", + "Selection Epoch 6 Training epoch [ 35 ] Training Acc: 0.88\n", + "Selection Epoch 6 Training epoch [ 36 ] Training Acc: 0.9142857142857143\n", + "Selection Epoch 6 Training epoch [ 37 ] Training Acc: 0.8952380952380953\n", + "Selection Epoch 6 Training epoch [ 38 ] Training Acc: 0.84\n", + "Selection Epoch 6 Training epoch [ 39 ] Training Acc: 0.8952380952380953\n", + "Selection Epoch 6 Training epoch [ 40 ] Training Acc: 0.9085714285714286\n", + "Selection Epoch 6 Training epoch [ 41 ] Training Acc: 0.8990476190476191\n", + "Selection Epoch 6 Training epoch [ 42 ] Training Acc: 0.9104761904761904\n", + "Selection Epoch 6 Training epoch [ 43 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 6 Training epoch [ 44 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 6 Training epoch [ 45 ] Training Acc: 0.9447619047619048\n", + "Selection Epoch 6 Training epoch [ 46 ] Training Acc: 0.9542857142857143\n", + "Selection Epoch 6 Training epoch [ 47 ] Training Acc: 0.9371428571428572\n", + "Selection Epoch 6 Training epoch [ 48 ] Training Acc: 0.9447619047619048\n", + "Selection Epoch 6 Training epoch [ 49 ] Training Acc: 0.92\n", + "Selection Epoch 6 Training epoch [ 50 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 6 Training epoch [ 51 ] Training Acc: 0.979047619047619\n", + "Selection Epoch 6 Training epoch [ 52 ] Training Acc: 0.9657142857142857\n", + "Selection Epoch 6 Training epoch [ 53 ] Training Acc: 0.9809523809523809\n", + "Selection Epoch 6 Training epoch [ 54 ] Training Acc: 0.9504761904761905\n", + "Selection Epoch 6 Training epoch [ 55 ] Training Acc: 0.9542857142857143\n", + "Selection Epoch 6 Training epoch [ 56 ] Training Acc: 0.9828571428571429\n", + "Selection Epoch 6 Training epoch [ 57 ] Training Acc: 0.9885714285714285\n", + "Selection Epoch 6 Training epoch [ 58 ] Training Acc: 0.9961904761904762\n", + "Epoch: 7 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.2202326526748948 0.9961904761904762 0.3604626953601837 0.9473684210526315 6.473782896995544 0.42448979591836733 226.50392627716064\n", + "AL epoch: 7\n", + "val, test error% for class 0 : 6.67 65.71\n", + "val, test error% for class 1 : 0.0 40.0\n", + "val, test error% for class 2 : 10.0 85.71\n", + "val, test error% for class 3 : 0.0 74.29\n", + "val, test error% for class 4 : 12.5 68.57\n", + "val, test error% for class 5 : 0.0 31.43\n", + "val, test error% for class 6 : 14.29 37.14\n", + "15 / 70 idc points.\n", + "selEpoch: 7, Selection Ended at: 2022-01-30 20:42:02.229076\n", + "15 5581 5651\n", + "After augmentation, size of train_set: 595 lake set: 5581 val set: 91\n", + "Selection Epoch 7 Training epoch [ 1 ] Training Acc: 0.7411764705882353\n", + "Selection Epoch 7 Training epoch [ 2 ] Training Acc: 0.7411764705882353\n", + "Selection Epoch 7 Training epoch [ 3 ] Training Acc: 0.761344537815126\n", + "Selection Epoch 7 Training epoch [ 4 ] Training Acc: 0.7966386554621848\n", + "Selection Epoch 7 Training epoch [ 5 ] Training Acc: 0.788235294117647\n", + "Selection Epoch 7 Training epoch [ 6 ] Training Acc: 0.788235294117647\n", + "Selection Epoch 7 Training epoch [ 7 ] Training Acc: 0.8016806722689076\n", + "Selection Epoch 7 Training epoch [ 8 ] Training Acc: 0.788235294117647\n", + "Selection Epoch 7 Training epoch [ 9 ] Training Acc: 0.8016806722689076\n", + "Selection Epoch 7 Training epoch [ 10 ] Training Acc: 0.8050420168067227\n", + "Selection Epoch 7 Training epoch [ 11 ] Training Acc: 0.8084033613445378\n", + "Selection Epoch 7 Training epoch [ 12 ] Training Acc: 0.8302521008403362\n", + "Selection Epoch 7 Training epoch [ 13 ] Training Acc: 0.8336134453781513\n", + "Selection Epoch 7 Training epoch [ 14 ] Training Acc: 0.8218487394957983\n", + "Selection Epoch 7 Training epoch [ 15 ] Training Acc: 0.8369747899159664\n", + "Selection Epoch 7 Training epoch [ 16 ] Training Acc: 0.8689075630252101\n", + "Selection Epoch 7 Training epoch [ 17 ] Training Acc: 0.8554621848739495\n", + "Selection Epoch 7 Training epoch [ 18 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 7 Training epoch [ 19 ] Training Acc: 0.8705882352941177\n", + "Selection Epoch 7 Training epoch [ 20 ] Training Acc: 0.8605042016806723\n", + "Selection Epoch 7 Training epoch [ 21 ] Training Acc: 0.8957983193277311\n", + "Selection Epoch 7 Training epoch [ 22 ] Training Acc: 0.8941176470588236\n", + "Selection Epoch 7 Training epoch [ 23 ] Training Acc: 0.9042016806722689\n", + "Selection Epoch 7 Training epoch [ 24 ] Training Acc: 0.9277310924369748\n", + "Selection Epoch 7 Training epoch [ 25 ] Training Acc: 0.9243697478991597\n", + "Selection Epoch 7 Training epoch [ 26 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 7 Training epoch [ 27 ] Training Acc: 0.9647058823529412\n", + "Selection Epoch 7 Training epoch [ 28 ] Training Acc: 0.9294117647058824\n", + "Selection Epoch 7 Training epoch [ 29 ] Training Acc: 0.9394957983193277\n", + "Selection Epoch 7 Training epoch [ 30 ] Training Acc: 0.9478991596638655\n", + "Selection Epoch 7 Training epoch [ 31 ] Training Acc: 0.9680672268907563\n", + "Selection Epoch 7 Training epoch [ 32 ] Training Acc: 0.9915966386554622\n", + "Epoch: 8 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.6454224521294236 0.9915966386554622 0.5068990588188171 0.9010989010989011 6.119324803352356 0.39591836734693875 140.63802099227905\n", + "AL epoch: 8\n", + "val, test error% for class 0 : 6.25 68.57\n", + "val, test error% for class 1 : 0.0 42.86\n", + "val, test error% for class 2 : 25.0 82.86\n", + "val, test error% for class 3 : 11.11 74.29\n", + "val, test error% for class 4 : 20.0 57.14\n", + "val, test error% for class 5 : 6.67 40.0\n", + "val, test error% for class 6 : 10.0 57.14\n", + "10 / 70 idc points.\n", + "selEpoch: 8, Selection Ended at: 2022-01-30 20:44:23.274715\n", + "10 5511 5581\n", + "After augmentation, size of train_set: 665 lake set: 5511 val set: 101\n", + "Selection Epoch 8 Training epoch [ 1 ] Training Acc: 0.706766917293233\n", + "Selection Epoch 8 Training epoch [ 2 ] Training Acc: 0.7563909774436091\n", + "Selection Epoch 8 Training epoch [ 3 ] Training Acc: 0.7759398496240602\n", + "Selection Epoch 8 Training epoch [ 4 ] Training Acc: 0.806015037593985\n", + "Selection Epoch 8 Training epoch [ 5 ] Training Acc: 0.7924812030075188\n", + "Selection Epoch 8 Training epoch [ 6 ] Training Acc: 0.8120300751879699\n", + "Selection Epoch 8 Training epoch [ 7 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 8 Training epoch [ 8 ] Training Acc: 0.8240601503759398\n", + "Selection Epoch 8 Training epoch [ 9 ] Training Acc: 0.7669172932330827\n", + "Selection Epoch 8 Training epoch [ 10 ] Training Acc: 0.7909774436090226\n", + "Selection Epoch 8 Training epoch [ 11 ] Training Acc: 0.8105263157894737\n", + "Selection Epoch 8 Training epoch [ 12 ] Training Acc: 0.8315789473684211\n", + "Selection Epoch 8 Training epoch [ 13 ] Training Acc: 0.8511278195488722\n", + "Selection Epoch 8 Training epoch [ 14 ] Training Acc: 0.8360902255639098\n", + "Selection Epoch 8 Training epoch [ 15 ] Training Acc: 0.8180451127819549\n", + "Selection Epoch 8 Training epoch [ 16 ] Training Acc: 0.8406015037593985\n", + "Selection Epoch 8 Training epoch [ 17 ] Training Acc: 0.8616541353383459\n", + "Selection Epoch 8 Training epoch [ 18 ] Training Acc: 0.8796992481203008\n", + "Selection Epoch 8 Training epoch [ 19 ] Training Acc: 0.8827067669172932\n", + "Selection Epoch 8 Training epoch [ 20 ] Training Acc: 0.8781954887218045\n", + "Selection Epoch 8 Training epoch [ 21 ] Training Acc: 0.8781954887218045\n", + "Selection Epoch 8 Training epoch [ 22 ] Training Acc: 0.8736842105263158\n", + "Selection Epoch 8 Training epoch [ 23 ] Training Acc: 0.8992481203007519\n", + "Selection Epoch 8 Training epoch [ 24 ] Training Acc: 0.9052631578947369\n", + "Selection Epoch 8 Training epoch [ 25 ] Training Acc: 0.8992481203007519\n", + "Selection Epoch 8 Training epoch [ 26 ] Training Acc: 0.9112781954887218\n", + "Selection Epoch 8 Training epoch [ 27 ] Training Acc: 0.9007518796992481\n", + "Selection Epoch 8 Training epoch [ 28 ] Training Acc: 0.9383458646616541\n", + "Selection Epoch 8 Training epoch [ 29 ] Training Acc: 0.956390977443609\n", + "Selection Epoch 8 Training epoch [ 30 ] Training Acc: 0.9624060150375939\n", + "Selection Epoch 8 Training epoch [ 31 ] Training Acc: 0.9548872180451128\n", + "Selection Epoch 8 Training epoch [ 32 ] Training Acc: 0.9759398496240601\n", + "Selection Epoch 8 Training epoch [ 33 ] Training Acc: 0.968421052631579\n", + "Selection Epoch 8 Training epoch [ 34 ] Training Acc: 0.9533834586466166\n", + "Selection Epoch 8 Training epoch [ 35 ] Training Acc: 0.9729323308270676\n", + "Selection Epoch 8 Training epoch [ 36 ] Training Acc: 0.9413533834586466\n", + "Selection Epoch 8 Training epoch [ 37 ] Training Acc: 0.9699248120300752\n", + "Selection Epoch 8 Training epoch [ 38 ] Training Acc: 0.9819548872180451\n", + "Selection Epoch 8 Training epoch [ 39 ] Training Acc: 0.9654135338345865\n", + "Selection Epoch 8 Training epoch [ 40 ] Training Acc: 0.9969924812030075\n", + "Epoch: 9 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.748473773231126 0.9969924812030075 0.29504939913749695 0.9306930693069307 6.987715721130371 0.42857142857142855 192.7541892528534\n", + "AL epoch: 9\n", + "val, test error% for class 0 : 5.88 62.86\n", + "val, test error% for class 1 : 5.0 31.43\n", + "val, test error% for class 2 : 12.5 71.43\n", + "val, test error% for class 3 : 0.0 65.71\n", + "val, test error% for class 4 : 20.0 80.0\n", + "val, test error% for class 5 : 0.0 42.86\n", + "val, test error% for class 6 : 9.09 45.71\n", + "11 / 70 idc points.\n", + "selEpoch: 9, Selection Ended at: 2022-01-30 20:47:36.352776\n", + "11 5441 5511\n", + "After augmentation, size of train_set: 735 lake set: 5441 val set: 112\n", + "Selection Epoch 9 Training epoch [ 1 ] Training Acc: 0.7278911564625851\n", + "Selection Epoch 9 Training epoch [ 2 ] Training Acc: 0.7591836734693878\n", + "Selection Epoch 9 Training epoch [ 3 ] Training Acc: 0.7795918367346939\n", + "Selection Epoch 9 Training epoch [ 4 ] Training Acc: 0.7863945578231293\n", + "Selection Epoch 9 Training epoch [ 5 ] Training Acc: 0.8108843537414966\n", + "Selection Epoch 9 Training epoch [ 6 ] Training Acc: 0.8108843537414966\n", + "Selection Epoch 9 Training epoch [ 7 ] Training Acc: 0.8231292517006803\n", + "Selection Epoch 9 Training epoch [ 8 ] Training Acc: 0.8231292517006803\n", + "Selection Epoch 9 Training epoch [ 9 ] Training Acc: 0.8163265306122449\n", + "Selection Epoch 9 Training epoch [ 10 ] Training Acc: 0.8136054421768707\n", + "Selection Epoch 9 Training epoch [ 11 ] Training Acc: 0.8462585034013606\n", + "Selection Epoch 9 Training epoch [ 12 ] Training Acc: 0.8340136054421768\n", + "Selection Epoch 9 Training epoch [ 13 ] Training Acc: 0.8721088435374149\n", + "Selection Epoch 9 Training epoch [ 14 ] Training Acc: 0.8598639455782313\n", + "Selection Epoch 9 Training epoch [ 15 ] Training Acc: 0.8816326530612245\n", + "Selection Epoch 9 Training epoch [ 16 ] Training Acc: 0.8653061224489796\n", + "Selection Epoch 9 Training epoch [ 17 ] Training Acc: 0.9006802721088435\n", + "Selection Epoch 9 Training epoch [ 18 ] Training Acc: 0.8938775510204081\n", + "Selection Epoch 9 Training epoch [ 19 ] Training Acc: 0.8204081632653061\n", + "Selection Epoch 9 Training epoch [ 20 ] Training Acc: 0.8857142857142857\n", + "Selection Epoch 9 Training epoch [ 21 ] Training Acc: 0.9047619047619048\n", + "Selection Epoch 9 Training epoch [ 22 ] Training Acc: 0.9306122448979591\n", + "Selection Epoch 9 Training epoch [ 23 ] Training Acc: 0.9360544217687075\n", + "Selection Epoch 9 Training epoch [ 24 ] Training Acc: 0.9333333333333333\n", + "Selection Epoch 9 Training epoch [ 25 ] Training Acc: 0.9238095238095239\n", + "Selection Epoch 9 Training epoch [ 26 ] Training Acc: 0.9306122448979591\n", + "Selection Epoch 9 Training epoch [ 27 ] Training Acc: 0.9387755102040817\n", + "Selection Epoch 9 Training epoch [ 28 ] Training Acc: 0.9591836734693877\n", + "Selection Epoch 9 Training epoch [ 29 ] Training Acc: 0.9455782312925171\n", + "Selection Epoch 9 Training epoch [ 30 ] Training Acc: 0.9823129251700681\n", + "Selection Epoch 9 Training epoch [ 31 ] Training Acc: 0.9482993197278912\n", + "Selection Epoch 9 Training epoch [ 32 ] Training Acc: 0.9768707482993197\n", + "Selection Epoch 9 Training epoch [ 33 ] Training Acc: 0.9945578231292517\n", + "Epoch: 10 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.165637472178787 0.9945578231292517 0.28866174817085266 0.9464285714285714 6.673982858657837 0.4489795918367347 176.31855177879333\n", + "val, test error% for class 0 : 4.76 40.0\n", + "val, test error% for class 1 : 9.52 71.43\n", + "val, test error% for class 2 : 0.0 77.14\n", + "val, test error% for class 3 : 0.0 45.71\n", + "val, test error% for class 4 : 10.0 68.57\n", + "val, test error% for class 5 : 5.26 48.57\n", + "val, test error% for class 6 : 7.69 34.29\n", + "[[57.14, 68.57, 94.29, 62.86, 40.0, 71.43, 65.71, 65.71428571428571], [71.43, 71.43, 88.57, 45.71, 68.57, 62.86, 88.57, 71.02], [62.86, 74.29, 82.86, 68.57, 74.29, 57.14, 40.0, 65.71571428571428], [57.14, 77.14, 77.14, 71.43, 62.86, 22.86, 57.14, 60.81571428571429], [82.86, 34.29, 88.57, 62.86, 65.71, 34.29, 51.43, 60.00142857142857], [68.57, 54.29, 74.29, 74.29, 77.14, 48.57, 45.71, 63.26571428571428], [65.71, 40.0, 85.71, 74.29, 68.57, 31.43, 37.14, 57.55], [68.57, 42.86, 82.86, 74.29, 57.14, 40.0, 57.14, 60.40857142857142], [62.86, 31.43, 71.43, 65.71, 80.0, 42.86, 45.71, 57.142857142857146], [40.0, 71.43, 77.14, 45.71, 68.57, 48.57, 34.29, 55.10142857142858]]\n", + "\n", + "\n" + ] + } + ], + "source": [ + "train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, \"random\",'random')\n" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorials/trust_ood_demo_organmnist.ipynb b/tutorials/trust_ood_demo_organmnist.ipynb new file mode 100644 index 0000000..eb5a272 --- /dev/null +++ b/tutorials/trust_ood_demo_organmnist.ipynb @@ -0,0 +1,1796 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Avoiding Out-of-Distribution Incorrectly Acquired Images" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Libraries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import h5py\n", + "import time\n", + "import random\n", + "import datetime\n", + "import copy\n", + "import numpy as np\n", + "import os\n", + "import csv\n", + "import json\n", + "import subprocess\n", + "import sys\n", + "import PIL.Image as Image\n", + "import torch\n", + "import torch.backends.cudnn as cudnn\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import torch.optim as optim\n", + "import torchvision\n", + "import torchvision.models as models\n", + "from torchvision.datasets import cifar\n", + "from matplotlib import pyplot as plt\n", + "sys.path.append('/mnt/data2/akshit/distil/')\n", + "sys.path.append('/mnt/data2/akshit/trust/')\n", + "from distil.utils.models.resnet import ResNet18\n", + "from trust.utils.organmnist import load_dataset_custom as load_dataset_custom\n", + "from torch.utils.data import Dataset, Subset, ConcatDataset, DataLoader\n", + "from torch.utils.data import Subset\n", + "from torch.autograd import Variable\n", + "import tqdm\n", + "from math import floor\n", + "from sklearn.metrics.pairwise import cosine_similarity, pairwise_distances\n", + "from distil.active_learning_strategies.scmi import SCMI\n", + "from distil.active_learning_strategies.smi import SMI\n", + "from distil.active_learning_strategies.badge import BADGE\n", + "from distil.active_learning_strategies.entropy_sampling import EntropySampling\n", + "from distil.active_learning_strategies.gradmatch_active import GradMatchActive\n", + "from distil.active_learning_strategies.glister import GLISTER\n", + "from trust.strategies.random_sampling import RandomSampling\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining Parameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "seed=42\n", + "torch.manual_seed(seed)\n", + "np.random.seed(seed)\n", + "random.seed(seed)\n", + "from distil.utils.utils import *\n", + "\n", + "feature = \"ood\"\n", + "run=\"fkna_3\"\n", + "datadir = 'data/'\n", + "data_name = 'organmnist'\n", + "model_name = 'ResNet18'\n", + "num_rep = 10\n", + "learning_rate = 0.01\n", + "num_runs = 1 # number of random runs\n", + "computeClassErrorLog = True\n", + "\n", + "device_id = 0\n", + "magnification = 1\n", + "device = \"cuda:\"+str(device_id) if torch.cuda.is_available() else \"cpu\"\n", + "datkbuildPath = \"./datk/build\"\n", + "exePath = \"cifarSubsetSelector\"\n", + "print(\"Using Device:\", device)\n", + "doublePrecision = True\n", + "linearLayer = True\n", + "miscls = True\n", + "# handler = DataHandler_CIFAR10\n", + "augTarget = True\n", + "embedding_type = \"gradients\"\n", + "\n", + "num_cls=11\n", + "budget=30\n", + "num_epochs = int(10)\n", + "split_cfg = {'num_cls_idc':11, 'per_idc_train':10, 'per_idc_val':10, 'per_idc_lake':50, 'per_ood_train':0, 'per_ood_val':0, 'per_ood_lake':8000}\n", + "\n", + "initModelPath = \"/mnt/data2/akshit/Organ/weights/\" + data_name + \"_\" + feature + \"_\" + model_name + \"_\" + str(learning_rate) + \"_\" + str(split_cfg[\"per_idc_train\"]) + \"_\" + str(split_cfg[\"per_idc_val\"]) + \"_\" + str(split_cfg[\"num_cls_idc\"])\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Helper Functions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def model_eval_loss(data_loader, model, criterion):\n", + " total_loss = 0\n", + " with torch.no_grad():\n", + " for batch_idx, (inputs, targets) in enumerate(data_loader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " total_loss += loss.item()\n", + " return total_loss\n", + "\n", + "def init_weights(m):\n", + "# torch.manual_seed(35)\n", + " if isinstance(m, nn.Conv2d):\n", + " torch.nn.init.xavier_uniform_(m.weight)\n", + " elif isinstance(m, nn.Linear):\n", + " torch.nn.init.xavier_uniform_(m.weight)\n", + " m.bias.data.fill_(0.01)\n", + "\n", + "def weight_reset(m):\n", + " if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n", + " m.reset_parameters()\n", + " \n", + "def create_model(name, num_cls, device, embedding_type):\n", + " if name == 'ResNet18':\n", + " if embedding_type == \"gradients\":\n", + " model = ResNet18(num_cls)\n", + " else:\n", + " model = models.resnet18()\n", + " elif name == 'MnistNet':\n", + " model = MnistNet()\n", + " elif name == 'ResNet164':\n", + " model = ResNet164(num_cls)\n", + " model.apply(init_weights)\n", + " model = model.to(device)\n", + " return model\n", + "\n", + "def loss_function():\n", + " criterion = nn.CrossEntropyLoss()\n", + " criterion_nored = nn.CrossEntropyLoss(reduction='none')\n", + " return criterion, criterion_nored\n", + "\n", + "def optimizer_with_scheduler(model, num_epochs, learning_rate, m=0.9, wd=5e-4):\n", + " optimizer = optim.SGD(model.parameters(), lr=learning_rate,\n", + " momentum=m, weight_decay=wd)\n", + " scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)\n", + " return optimizer, scheduler\n", + "\n", + "def optimizer_without_scheduler(model, learning_rate, m=0.9, wd=5e-4):\n", + "# optimizer = optim.Adam(model.parameters(),weight_decay=wd)\n", + " optimizer = optim.SGD(model.parameters(), lr=learning_rate,\n", + " momentum=m, weight_decay=wd)\n", + " return optimizer\n", + "\n", + "def generate_cumulative_timing(mod_timing):\n", + " tmp = 0\n", + " mod_cum_timing = np.zeros(len(mod_timing))\n", + " for i in range(len(mod_timing)):\n", + " tmp += mod_timing[i]\n", + " mod_cum_timing[i] = tmp\n", + " return mod_cum_timing/3600\n", + "\n", + "def find_err_per_class(test_set, val_set, final_val_classifications, final_val_predictions, final_tst_classifications, \n", + " final_tst_predictions, saveDir, prefix):\n", + " #find queries from the validation set that are erroneous\n", + "# saveDir = os.path.join(saveDir, prefix)\n", + "# if(not(os.path.exists(saveDir))):\n", + "# os.mkdir(saveDir)\n", + " val_err_idx = list(np.where(np.array(final_val_classifications) == False)[0])\n", + " tst_err_idx = list(np.where(np.array(final_tst_classifications) == False)[0])\n", + " val_class_err_idxs = []\n", + " tst_err_log = []\n", + " val_err_log = []\n", + " for i in range(num_cls):\n", + " if(feature==\"ood\"): tst_class_idxs = list(torch.where(torch.Tensor(test_set.targets.float()) == i)[0].cpu().numpy())\n", + " if(feature==\"classimb\"): tst_class_idxs = list(torch.where(torch.Tensor(test_set.targets) == i)[0].cpu().numpy())\n", + " val_class_idxs = list(torch.where(torch.Tensor(val_set.targets.float()) == i)[0].cpu().numpy())\n", + " #err classifications per class\n", + " val_err_class_idx = set(val_err_idx).intersection(set(val_class_idxs))\n", + " tst_err_class_idx = set(tst_err_idx).intersection(set(tst_class_idxs))\n", + " if(len(val_class_idxs)>0):\n", + " val_error_perc = round((len(val_err_class_idx)/len(val_class_idxs))*100,2)\n", + " else:\n", + " val_error_perc = 0\n", + " \n", + " tst_error_perc = round((len(tst_err_class_idx)/len(tst_class_idxs))*100,2)\n", + " print(\"val, test error% for class \", i, \" : \", val_error_perc, tst_error_perc)\n", + " val_class_err_idxs.append(val_err_class_idx)\n", + " tst_err_log.append(tst_error_perc)\n", + " val_err_log.append(val_error_perc)\n", + " tst_err_log.append(sum(tst_err_log)/len(tst_err_log))\n", + " val_err_log.append(sum(val_err_log)/len(val_err_log))\n", + " return tst_err_log, val_err_log, val_class_err_idxs\n", + "\n", + "def aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget, augrandom=False):\n", + " all_lake_idx = list(range(len(lake_set)))\n", + " if(not(len(subset)==budget) and augrandom):\n", + " print(\"Budget not filled, adding \", str(int(budget) - len(subset)), \" randomly.\")\n", + " remain_budget = int(budget) - len(subset)\n", + " remain_lake_idx = list(set(all_lake_idx) - set(subset))\n", + " random_subset_idx = list(np.random.choice(np.array(remain_lake_idx), size=int(remain_budget), replace=False))\n", + " subset += random_subset_idx\n", + " lake_ss = SubsetWithTargets(true_lake_set, subset, torch.Tensor(true_lake_set.targets.float())[subset])\n", + " if(feature==\"ood\"): \n", + " ood_lake_idx = list(set(lake_subset_idxs)-set(subset))\n", + " private_set = SubsetWithTargets(true_lake_set, ood_lake_idx, torch.Tensor(np.array([split_cfg['num_cls_idc']]*len(ood_lake_idx))).float())\n", + " remain_lake_idx = list(set(all_lake_idx) - set(lake_subset_idxs))\n", + " remain_lake_set = SubsetWithTargets(lake_set, remain_lake_idx, torch.Tensor(lake_set.targets.float())[remain_lake_idx])\n", + " remain_true_lake_set = SubsetWithTargets(true_lake_set, remain_lake_idx, torch.Tensor(true_lake_set.targets.float())[remain_lake_idx])\n", + " print(len(lake_ss),len(remain_lake_set),len(lake_set))\n", + " if(feature!=\"ood\"): assert((len(lake_ss)+len(remain_lake_set))==len(lake_set))\n", + " aug_train_set = torch.utils.data.ConcatDataset([train_set, lake_ss])\n", + " if(feature==\"ood\"): \n", + " return aug_train_set, remain_lake_set, remain_true_lake_set, private_set, lake_ss\n", + " else:\n", + " return aug_train_set, remain_lake_set, remain_true_lake_set, lake_ss\n", + " \n", + "def getQuerySet(val_set, val_class_err_idxs, imb_cls_idx, miscls):\n", + " miscls_idx = []\n", + " if(miscls):\n", + " for i in range(len(val_class_err_idxs)):\n", + " if i in imb_cls_idx:\n", + " miscls_idx += val_class_err_idxs[i]\n", + " print(\"total misclassified ex from imb classes: \", len(miscls_idx))\n", + " else:\n", + " for i in imb_cls_idx:\n", + " imb_cls_samples = list(torch.where(torch.Tensor(val_set.targets.float()) == i)[0].cpu().numpy())\n", + " miscls_idx += imb_cls_samples\n", + " print(\"total samples from imb classes as targets: \", len(miscls_idx))\n", + " return Subset(val_set, miscls_idx)\n", + "\n", + "def getPrivateSet(lake_set, subset, private_set):\n", + " #augment prev private set and current subset\n", + " new_private_set = SubsetWithTargets(lake_set, subset, torch.Tensor(lake_set.targets.float())[subset])\n", + "# new_private_set = Subset(lake_set, subset)\n", + " total_private_set = torch.utils.data.ConcatDataset([private_set, new_private_set])\n", + " return total_private_set\n", + "\n", + "def remove_ood_points(lake_set, subset, idc_idx):\n", + " idx_subset = []\n", + " subset_cls = torch.Tensor(lake_set.targets.float())[subset]\n", + " for i in idc_idx:\n", + " idc_subset_idx = list(torch.where(subset_cls == i)[0].cpu().numpy())\n", + " idx_subset += list(np.array(subset)[idc_subset_idx])\n", + " print(len(idx_subset),\"/\",len(subset), \" idc points.\")\n", + " return idx_subset\n", + "\n", + "def getPerClassSel(lake_set, subset, num_cls):\n", + " perClsSel = []\n", + " subset_cls = torch.Tensor(lake_set.targets.float())[subset]\n", + " for i in range(num_cls):\n", + " cls_subset_idx = list(torch.where(subset_cls == i)[0].cpu().numpy())\n", + " perClsSel.append(len(cls_subset_idx))\n", + " return perClsSel\n", + "\n", + "\n", + "def train_model_al(datkbuildPath, exePath, num_epochs, dataset_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run,\n", + " device, computeErrorLog, strategy=\"SIM\", sf=\"\"):\n", + "# torch.manual_seed(42)\n", + "# np.random.seed(42)\n", + " print(strategy, sf)\n", + " #load the dataset based on type of feature\n", + " train_set, val_set, test_set, lake_set, sel_cls_idx, num_cls = load_dataset_custom(datadir, feature, split_cfg, False, True)\n", + " print(\"selected classes are: \", sel_cls_idx)\n", + "\n", + " if(feature==\"ood\"): num_cls+=1 #Add one class for OOD class\n", + " N = len(train_set)\n", + " trn_batch_size = 20\n", + " val_batch_size = 10\n", + " tst_batch_size = 100\n", + "\n", + " trainloader = torch.utils.data.DataLoader(train_set, batch_size=trn_batch_size,\n", + " shuffle=True, pin_memory=True)\n", + "\n", + " valloader = torch.utils.data.DataLoader(val_set, batch_size=val_batch_size, \n", + " shuffle=False, pin_memory=True)\n", + "\n", + " tstloader = torch.utils.data.DataLoader(test_set, batch_size=tst_batch_size,\n", + " shuffle=False, pin_memory=True)\n", + " \n", + " lakeloader = torch.utils.data.DataLoader(lake_set, batch_size=tst_batch_size,\n", + " shuffle=False, pin_memory=True)\n", + " true_lake_set = copy.deepcopy(lake_set)\n", + " # Budget for subset selection\n", + " bud = budget\n", + " \n", + " # Variables to store accuracies\n", + " fulltrn_losses = np.zeros(num_epochs)\n", + " val_losses = np.zeros(num_epochs)\n", + " tst_losses = np.zeros(num_epochs)\n", + " timing = np.zeros(num_epochs)\n", + " val_acc = np.zeros(num_epochs)\n", + " full_trn_acc = np.zeros(num_epochs)\n", + " tst_acc = np.zeros(num_epochs)\n", + " final_tst_predictions = []\n", + " final_tst_classifications = []\n", + " best_val_acc = -1\n", + " csvlog = []\n", + " val_csvlog = []\n", + " # Results logging file\n", + " print_every = 3\n", + "# all_logs_dir = '/content/drive/MyDrive/research/tdss/SMI_active_learning_results_woVal/' + dataset_name + '/' + feature + '/'+ sf + '/' + str(bud) + '/' + str(run)\n", + " all_logs_dir = './SMI_active_learning_results/' + dataset_name + '/' + feature + '/'+ sf + '/' + str(bud) + '/' + str(run)\n", + " print(\"Saving results to: \", all_logs_dir)\n", + " subprocess.run([\"mkdir\", \"-p\", all_logs_dir])\n", + " exp_name = dataset_name + \"_\" + feature + \"_\" + strategy + \"_\" + str(len(sel_cls_idx)) +\"_\" + sf + '_budget:' + str(bud) + '_epochs:' + str(num_epochs) + '_linear:' + str(linearLayer) + '_runs' + str(run)\n", + " print(exp_name)\n", + " res_dict = {\"dataset\":data_name, \n", + " \"feature\":feature, \n", + " \"sel_func\":sf,\n", + " \"sel_budget\":budget, \n", + " \"num_selections\":num_epochs, \n", + " \"model\":model_name, \n", + " \"learning_rate\":learning_rate, \n", + " \"setting\":split_cfg, \n", + " \"all_class_acc\":None, \n", + " \"test_acc\":[],\n", + " \"sel_per_cls\":[], \n", + " \"sel_cls_idx\":sel_cls_idx.tolist()}\n", + " # Model Creation\n", + " model = create_model(model_name, num_cls, device, embedding_type)\n", + " model1 = create_model(model_name, num_cls, device, embedding_type)\n", + " \n", + " # Loss Functions\n", + " criterion, criterion_nored = loss_function()\n", + " \n", + " strategy_args = {'batch_size': 20, 'device':'cuda', 'num_partitions':1, 'wrapped_strategy_class': None, \n", + " 'embedding_type':'gradients', 'keep_embedding':False, 'budget':'budget'}\n", + " unlabeled_lake_set = LabeledToUnlabeledDataset(lake_set)\n", + " if(strategy == \"AL\"):\n", + " if(sf==\"badge\"):\n", + " strategy_sel = BADGE(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " elif(sf==\"us\"):\n", + " strategy_sel = EntropySampling(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " elif(sf==\"glister\" or sf==\"glister-tss\"):\n", + " strategy_sel = GLISTER(train_set, unlabeled_lake_set, model, num_cls, strategy_args, val_set, typeOf='rand', lam=0.1)\n", + " elif(sf==\"gradmatch-tss\"):\n", + " strategy_sel = GradMatchActive(train_set, unlabeled_lake_set, model, num_cls, strategy_args, val_set)\n", + " elif(sf==\"coreset\"):\n", + " strategy_sel = CoreSet(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " elif(sf==\"leastconf\"):\n", + " strategy_sel = LeastConfidence(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " elif(sf==\"margin\"):\n", + " strategy_sel = MarginSampling(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " if(strategy == \"SIM\"):\n", + " if(sf.endswith(\"mic\")):\n", + " strategy_args['scmi_function'] = sf.split(\"mic\")[0] + \"cmi\"\n", + " strategy_sel = SCMI(train_set, unlabeled_lake_set, val_set, val_set, model, num_cls, strategy_args)\n", + " elif(sf.endswith(\"mi\")):\n", + " strategy_args['smi_function'] = sf\n", + " strategy_sel = SMI(train_set, unlabeled_lake_set, val_set, model, num_cls, strategy_args)\n", + " if(strategy == \"random\"):\n", + " strategy_sel = RandomSampling(train_set, unlabeled_lake_set, model, num_cls, strategy_args)\n", + " \n", + " strategy_args['verbose'] = False\n", + " strategy_args['optimizer'] = \"LazyGreedy\"\n", + "\n", + " # Getting the optimizer and scheduler\n", + "# optimizer, scheduler = optimizer_with_scheduler(model, num_epochs, learning_rate)\n", + " optimizer = optimizer_without_scheduler(model, learning_rate)\n", + " private_set = []\n", + "\n", + " for i in range(num_epochs):\n", + " print(\"AL epoch: \", i)\n", + " tst_loss = 0\n", + " tst_correct = 0\n", + " tst_total = 0\n", + " val_loss = 0\n", + " val_correct = 0\n", + " val_total = 0\n", + " \n", + " if(i==0):\n", + " print(\"initial training epoch\")\n", + " if(os.path.exists(initModelPath)):\n", + " model.load_state_dict(torch.load(initModelPath, map_location=device))\n", + " print(\"Init model loaded from disk, skipping init training: \", initModelPath)\n", + " model.eval()\n", + " with torch.no_grad():\n", + " final_val_predictions = []\n", + " final_val_classifications = []\n", + " for batch_idx, (inputs, targets) in enumerate(valloader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " val_loss += loss.item()\n", + " if(feature==\"ood\"): \n", + " _, predicted = outputs[...,:-1].max(1)\n", + " else:\n", + " _, predicted = outputs.max(1)\n", + " val_total += targets.size(0)\n", + " val_correct += predicted.eq(targets).sum().item()\n", + " final_val_predictions += list(predicted.cpu().numpy())\n", + " final_val_classifications += list(predicted.eq(targets).cpu().numpy())\n", + " \n", + " final_tst_predictions = []\n", + " final_tst_classifications = []\n", + " for batch_idx, (inputs, targets) in enumerate(tstloader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " tst_loss += loss.item()\n", + " if(feature==\"ood\"): \n", + " _, predicted = outputs[...,:-1].max(1)\n", + " else:\n", + " _, predicted = outputs.max(1)\n", + " tst_total += targets.size(0)\n", + " tst_correct += predicted.eq(targets).sum().item()\n", + " final_tst_predictions += list(predicted.cpu().numpy())\n", + " final_tst_classifications += list(predicted.eq(targets).cpu().numpy()) \n", + " best_val_acc = (val_correct/val_total)\n", + " val_acc[i] = val_correct / val_total\n", + " tst_acc[i] = tst_correct / tst_total\n", + " val_losses[i] = val_loss\n", + " tst_losses[i] = tst_loss\n", + " res_dict[\"test_acc\"].append(tst_acc[i])\n", + " continue\n", + " else:\n", + " unlabeled_lake_set = LabeledToUnlabeledDataset(lake_set)\n", + " strategy_sel.update_data(train_set, unlabeled_lake_set)\n", + " #compute the error log before every selection\n", + " if(computeErrorLog):\n", + " tst_err_log, val_err_log, val_class_err_idxs = find_err_per_class(test_set, val_set, final_val_classifications, final_val_predictions, final_tst_classifications, final_tst_predictions, all_logs_dir, sf+\"_\"+str(bud))\n", + " csvlog.append(tst_err_log)\n", + " val_csvlog.append(val_err_log)\n", + " ####SIM####\n", + " if(strategy==\"SIM\" or strategy==\"SF\"):\n", + " if(sf.endswith(\"mi\")):\n", + " if(feature==\"classimb\"):\n", + " #make a dataloader for the misclassifications - only for experiments with targets\n", + " miscls_set = getQuerySet(val_set, val_class_err_idxs, sel_cls_idx, miscls)\n", + " strategy_sel.update_queries(miscls_set)\n", + " elif(sf.endswith(\"mic\")): #configured for the OOD setting\n", + " print(\"val set targets: \", val_set.targets)\n", + " strategy_sel.update_queries(val_set) #In-dist samples are in Val \n", + " if(len(private_set)!=0):\n", + " print(\"private set targets: \", private_set.targets)\n", + " strategy_sel.update_privates(private_set)\n", + "\n", + " ###AL###\n", + " elif(strategy==\"AL\"):\n", + " if(sf==\"glister-tss\" or sf==\"gradmatch-tss\"):\n", + " miscls_set = getQuerySet(val_set, val_class_err_idxs, sel_cls_idx, miscls)\n", + " strategy_sel.update_queries(miscls_set)\n", + " print(\"reinit AL with targeted miscls samples\")\n", + " \n", + " elif(strategy==\"random\"):\n", + " subset = np.random.choice(np.array(list(range(len(lake_set)))), size=budget, replace=False)\n", + " \n", + " strategy_sel.update_model(model)\n", + " subset = strategy_sel.select(budget)\n", + "# print(\"True targets of subset: \", torch.Tensor(true_lake_set.targets.float())[subset])\n", + "# hypothesized_targets = strategy_sel.predict(unlabeled_lake_set)\n", + "# print(\"Hypothesized targets of subset: \", hypothesized_targets)\n", + " lake_subset_idxs = subset #indices wrt to lake that need to be removed from the lake\n", + " if(feature==\"ood\"): #remove ood points from the subset\n", + " subset = remove_ood_points(true_lake_set, subset, sel_cls_idx)\n", + " \n", + " print(\"selEpoch: %d, Selection Ended at:\" % (i), str(datetime.datetime.now()))\n", + " perClsSel = getPerClassSel(true_lake_set, lake_subset_idxs, num_cls)\n", + " res_dict['sel_per_cls'].append(perClsSel)\n", + " \n", + " #augment the train_set with selected indices from the lake\n", + " if(feature==\"classimb\"):\n", + " train_set, lake_set, true_lake_set, add_val_set = aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget, True) #aug train with random if budget is not filled\n", + " if(augTarget): val_set = ConcatWithTargets(val_set, add_val_set)\n", + " elif(feature==\"ood\"):\n", + " train_set, lake_set, true_lake_set, new_private_set, add_val_set = aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget)\n", + " train_set = torch.utils.data.ConcatDataset([train_set, new_private_set]) #Add the OOD samples with a common OOD class\n", + " val_set = ConcatWithTargets(val_set, add_val_set)\n", + " if(len(private_set)!=0):\n", + " private_set = ConcatWithTargets(private_set, new_private_set)\n", + " else:\n", + " private_set = new_private_set\n", + " else:\n", + " train_set, lake_set, true_lake_set = aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget)\n", + " print(\"After augmentation, size of train_set: \", len(train_set), \" lake set: \", len(lake_set), \" val set: \", len(val_set))\n", + " \n", + "# Reinit train and lake loaders with new splits and reinit the model\n", + " trainloader = torch.utils.data.DataLoader(train_set, batch_size=trn_batch_size, shuffle=True, pin_memory=True)\n", + " lakeloader = torch.utils.data.DataLoader(lake_set, batch_size=tst_batch_size, shuffle=False, pin_memory=True)\n", + "\n", + " if(augTarget):\n", + " valloader = torch.utils.data.DataLoader(val_set, batch_size=len(val_set), shuffle=False, pin_memory=True)\n", + " model = create_model(model_name, num_cls, device, strategy_args['embedding_type'])\n", + " optimizer = optimizer_without_scheduler(model, learning_rate)\n", + " \n", + " #Start training\n", + " start_time = time.time()\n", + " num_ep=1\n", + " while(full_trn_acc[i]<0.99 and num_ep<300):\n", + " model.train()\n", + " for batch_idx, (inputs, targets) in enumerate(trainloader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " # Variables in Pytorch are differentiable.\n", + " inputs, target = Variable(inputs), Variable(inputs)\n", + " # This will zero out the gradients for this batch.\n", + " optimizer.zero_grad()\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " loss.backward()\n", + " optimizer.step()\n", + "# scheduler.step()\n", + " \n", + " full_trn_loss = 0\n", + " full_trn_correct = 0\n", + " full_trn_total = 0\n", + " model.eval()\n", + " with torch.no_grad():\n", + " for batch_idx, (inputs, targets) in enumerate(trainloader):\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " full_trn_loss += loss.item()\n", + " _, predicted = outputs.max(1)\n", + " full_trn_total += targets.size(0)\n", + " full_trn_correct += predicted.eq(targets).sum().item()\n", + " full_trn_acc[i] = full_trn_correct / full_trn_total\n", + " print(\"Selection Epoch \", i, \" Training epoch [\" , num_ep, \"]\" , \" Training Acc: \", full_trn_acc[i], end=\"\\r\")\n", + " num_ep+=1\n", + " timing[i] = time.time() - start_time\n", + " with torch.no_grad():\n", + " final_val_predictions = []\n", + " final_val_classifications = []\n", + " for batch_idx, (inputs, targets) in enumerate(valloader): #Compute Val accuracy\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " val_loss += loss.item()\n", + " if(feature==\"ood\"): \n", + " _, predicted = outputs[...,:-1].max(1)\n", + " else:\n", + " _, predicted = outputs.max(1)\n", + " val_total += targets.size(0)\n", + " val_correct += predicted.eq(targets).sum().item()\n", + " final_val_predictions += list(predicted.cpu().numpy())\n", + " final_val_classifications += list(predicted.eq(targets).cpu().numpy())\n", + "\n", + " final_tst_predictions = []\n", + " final_tst_classifications = []\n", + " for batch_idx, (inputs, targets) in enumerate(tstloader): #Compute test accuracy\n", + " inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)\n", + " outputs = model(inputs)\n", + " loss = criterion(outputs, targets)\n", + " tst_loss += loss.item()\n", + " if(feature==\"ood\"): \n", + " _, predicted = outputs[...,:-1].max(1)\n", + " else:\n", + " _, predicted = outputs.max(1)\n", + " tst_total += targets.size(0)\n", + " tst_correct += predicted.eq(targets).sum().item()\n", + " final_tst_predictions += list(predicted.cpu().numpy())\n", + " final_tst_classifications += list(predicted.eq(targets).cpu().numpy()) \n", + " val_acc[i] = val_correct / val_total\n", + " tst_acc[i] = tst_correct / tst_total\n", + " val_losses[i] = val_loss\n", + " fulltrn_losses[i] = full_trn_loss\n", + " tst_losses[i] = tst_loss\n", + " full_val_acc = list(np.array(val_acc))\n", + " full_timing = list(np.array(timing))\n", + " res_dict[\"test_acc\"].append(tst_acc[i])\n", + " print('Epoch:', i + 1, 'FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time:', full_trn_loss, full_trn_acc[i], val_loss, val_acc[i], tst_loss, tst_acc[i], timing[i])\n", + " if(i==0):\n", + " torch.save(model.state_dict(), initModelPath) #save initial train model if not present\n", + " if(computeErrorLog):\n", + " tst_err_log, val_err_log, val_class_err_idxs = find_err_per_class(test_set, val_set, final_val_classifications, final_val_predictions, final_tst_classifications, final_tst_predictions, all_logs_dir, sf+\"_\"+str(bud))\n", + " csvlog.append(tst_err_log)\n", + " val_csvlog.append(val_err_log)\n", + " print(csvlog)\n", + " res_dict[\"all_class_acc\"] = csvlog\n", + " res_dict[\"all_val_class_acc\"] = val_csvlog\n", + " with open(os.path.join(all_logs_dir, exp_name+\".csv\"), \"w\") as f:\n", + " writer = csv.writer(f)\n", + " writer.writerows(csvlog)\n", + " #save results dir with test acc and per class selections\n", + " with open(os.path.join(all_logs_dir, exp_name+\".json\"), 'w') as fp:\n", + " json.dump(res_dict, fp)\n", + " # plt.xlabel('AL epochs')\n", + " # plt.ylabel('Test Accuracy')\n", + " # plt.plot(tst_acc, label=f'{strategy}-{sf}')\n", + " # plt.title('Budget:'+str(budget)+' Trainset:'+ str(split_cfg['per_idc_train']))\n", + " \n", + " return tst_acc\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### FL2MI" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "SIM fl2mi\n", + "num ood samples: 8000\n", + "CIFAR-10 Custom dataset stats: Train size: 110 Val size: 110 Lake size: 9100 Test set: 3300\n", + "selected classes are: [ 0 1 2 3 4 5 6 7 8 9 10]\n", + "Saving results to: ./SMI_active_learning_results/organmnist/ood/fl2mi/30/fkna_3\n", + "organmnist_ood_SIM_11_fl2mi_budget:30_epochs:10_linear:True_runsfkna_3\n", + "AL epoch: 0\n", + "initial training epoch\n", + "Selection Epoch 0 Training epoch [ 1 ] Training Acc: 0.32727272727272727\n", + "Selection Epoch 0 Training epoch [ 2 ] Training Acc: 0.18181818181818182\n", + "Selection Epoch 0 Training epoch [ 3 ] Training Acc: 0.4\n", + "Selection Epoch 0 Training epoch [ 4 ] Training Acc: 0.6090909090909091\n", + "Selection Epoch 0 Training epoch [ 5 ] Training Acc: 0.4909090909090909\n", + "Selection Epoch 0 Training epoch [ 6 ] Training Acc: 0.8272727272727273\n", + "Selection Epoch 0 Training epoch [ 7 ] Training Acc: 0.9272727272727272\n", + "Selection Epoch 0 Training epoch [ 8 ] Training Acc: 0.8545454545454545\n", + "Selection Epoch 0 Training epoch [ 9 ] Training Acc: 0.9818181818181818\n", + "Selection Epoch 0 Training epoch [ 10 ] Training Acc: 0.990909090909091\n", + "Epoch: 1 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.12345767440274358 0.990909090909091 18.41137532144785 0.6727272727272727 71.45803889632225 0.5463636363636364 3.6022884845733643\n", + "AL epoch: 1\n", + "val, test error% for class 0 : 10.0 20.0\n", + "val, test error% for class 1 : 0.0 30.67\n", + "val, test error% for class 2 : 50.0 47.0\n", + "val, test error% for class 3 : 10.0 18.0\n", + "val, test error% for class 4 : 90.0 93.0\n", + "val, test error% for class 5 : 50.0 60.67\n", + "val, test error% for class 6 : 30.0 52.33\n", + "val, test error% for class 7 : 0.0 14.67\n", + "val, test error% for class 8 : 0.0 17.33\n", + "val, test error% for class 9 : 70.0 62.0\n", + "val, test error% for class 10 : 50.0 83.33\n", + "22 / 30 idc points.\n", + "selEpoch: 1, Selection Ended at: 2022-02-03 03:15:41.977614\n", + "22 9070 9100\n", + "After augmentation, size of train_set: 140 lake set: 9070 val set: 132\n", + "Selection Epoch 1 Training epoch [ 1 ] Training Acc: 0.2785714285714286\n", + "Selection Epoch 1 Training epoch [ 2 ] Training Acc: 0.17857142857142858\n", + "Selection Epoch 1 Training epoch [ 3 ] Training Acc: 0.2642857142857143\n", + "Selection Epoch 1 Training epoch [ 4 ] Training Acc: 0.45\n", + "Selection Epoch 1 Training epoch [ 5 ] Training Acc: 0.6571428571428571\n", + "Selection Epoch 1 Training epoch [ 6 ] Training Acc: 0.9571428571428572\n", + "Selection Epoch 1 Training epoch [ 7 ] Training Acc: 0.7285714285714285\n", + "Selection Epoch 1 Training epoch [ 8 ] Training Acc: 0.9857142857142858\n", + "Selection Epoch 1 Training epoch [ 9 ] Training Acc: 0.9928571428571429\n", + "Epoch: 2 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.2550799734890461 0.9928571428571429 1.230104923248291 0.6666666666666666 55.428039722144604 0.563939393939394 4.259060382843018\n", + "AL epoch: 2\n", + "val, test error% for class 0 : 33.33 45.0\n", + "val, test error% for class 1 : 7.69 31.0\n", + "val, test error% for class 2 : 63.64 40.67\n", + "val, test error% for class 3 : 14.29 24.67\n", + "val, test error% for class 4 : 72.73 82.33\n", + "val, test error% for class 5 : 61.54 81.33\n", + "val, test error% for class 6 : 36.36 38.67\n", + "val, test error% for class 7 : 23.08 30.33\n", + "val, test error% for class 8 : 0.0 4.33\n", + "val, test error% for class 9 : 50.0 75.0\n", + "val, test error% for class 10 : 9.09 26.33\n", + "27 / 30 idc points.\n", + "selEpoch: 2, Selection Ended at: 2022-02-03 03:16:06.901020\n", + "27 9040 9070\n", + "After augmentation, size of train_set: 170 lake set: 9040 val set: 159\n", + "Selection Epoch 2 Training epoch [ 1 ] Training Acc: 0.16470588235294117\n", + "Selection Epoch 2 Training epoch [ 2 ] Training Acc: 0.12352941176470589\n", + "Selection Epoch 2 Training epoch [ 3 ] Training Acc: 0.31176470588235294\n", + "Selection Epoch 2 Training epoch [ 4 ] Training Acc: 0.6529411764705882\n", + "Selection Epoch 2 Training epoch [ 5 ] Training Acc: 0.8529411764705882\n", + "Selection Epoch 2 Training epoch [ 6 ] Training Acc: 0.9470588235294117\n", + "Selection Epoch 2 Training epoch [ 7 ] Training Acc: 0.9941176470588236\n", + "Epoch: 3 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.502000579610467 0.9941176470588236 0.7200183272361755 0.8364779874213837 45.822599947452545 0.6336363636363637 3.957005262374878\n", + "AL epoch: 3\n", + "val, test error% for class 0 : 6.25 20.0\n", + "val, test error% for class 1 : 0.0 42.0\n", + "val, test error% for class 2 : 57.14 41.33\n", + "val, test error% for class 3 : 5.88 18.0\n", + "val, test error% for class 4 : 33.33 43.33\n", + "val, test error% for class 5 : 35.29 70.33\n", + "val, test error% for class 6 : 20.0 40.0\n", + "val, test error% for class 7 : 0.0 17.33\n", + "val, test error% for class 8 : 0.0 11.67\n", + "val, test error% for class 9 : 7.14 42.0\n", + "val, test error% for class 10 : 16.67 57.0\n", + "27 / 30 idc points.\n", + "selEpoch: 3, Selection Ended at: 2022-02-03 03:16:32.569928\n", + "27 9010 9040\n", + "After augmentation, size of train_set: 200 lake set: 9010 val set: 186\n", + "Selection Epoch 3 Training epoch [ 1 ] Training Acc: 0.19\n", + "Selection Epoch 3 Training epoch [ 2 ] Training Acc: 0.295\n", + "Selection Epoch 3 Training epoch [ 3 ] Training Acc: 0.36\n", + "Selection Epoch 3 Training epoch [ 4 ] Training Acc: 0.66\n", + "Selection Epoch 3 Training epoch [ 5 ] Training Acc: 0.83\n", + "Selection Epoch 3 Training epoch [ 6 ] Training Acc: 0.875\n", + "Selection Epoch 3 Training epoch [ 7 ] Training Acc: 0.96\n", + "Selection Epoch 3 Training epoch [ 8 ] Training Acc: 0.985\n", + "Selection Epoch 3 Training epoch [ 9 ] Training Acc: 0.985\n", + "Selection Epoch 3 Training epoch [ 10 ] Training Acc: 1.0\n", + "Epoch: 4 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.09003472269978374 1.0 0.6791873574256897 0.8548387096774194 40.57865971326828 0.6845454545454546 6.911073684692383\n", + "AL epoch: 4\n", + "val, test error% for class 0 : 11.76 15.33\n", + "val, test error% for class 1 : 0.0 21.67\n", + "val, test error% for class 2 : 25.0 22.0\n", + "val, test error% for class 3 : 5.0 13.33\n", + "val, test error% for class 4 : 37.5 57.33\n", + "val, test error% for class 5 : 26.32 50.0\n", + "val, test error% for class 6 : 22.22 42.0\n", + "val, test error% for class 7 : 0.0 12.0\n", + "val, test error% for class 8 : 0.0 17.67\n", + "val, test error% for class 9 : 13.33 44.33\n", + "val, test error% for class 10 : 18.75 51.33\n", + "28 / 30 idc points.\n", + "selEpoch: 4, Selection Ended at: 2022-02-03 03:17:02.394633\n", + "28 8980 9010\n", + "After augmentation, size of train_set: 230 lake set: 8980 val set: 214\n", + "Selection Epoch 4 Training epoch [ 1 ] Training Acc: 0.3695652173913043\n", + "Selection Epoch 4 Training epoch [ 2 ] Training Acc: 0.1956521739130435\n", + "Selection Epoch 4 Training epoch [ 3 ] Training Acc: 0.34347826086956523\n", + "Selection Epoch 4 Training epoch [ 4 ] Training Acc: 0.7695652173913043\n", + "Selection Epoch 4 Training epoch [ 5 ] Training Acc: 0.8478260869565217\n", + "Selection Epoch 4 Training epoch [ 6 ] Training Acc: 0.9347826086956522\n", + "Selection Epoch 4 Training epoch [ 7 ] Training Acc: 0.9391304347826087\n", + "Selection Epoch 4 Training epoch [ 8 ] Training Acc: 0.9478260869565217\n", + "Selection Epoch 4 Training epoch [ 9 ] Training Acc: 0.9826086956521739\n", + "Selection Epoch 4 Training epoch [ 10 ] Training Acc: 0.9826086956521739\n", + "Selection Epoch 4 Training epoch [ 11 ] Training Acc: 0.991304347826087\n", + "Epoch: 5 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.27562923450022936 0.991304347826087 0.5597469806671143 0.8738317757009346 44.42860025167465 0.6718181818181819 8.488568305969238\n", + "AL epoch: 5\n", + "val, test error% for class 0 : 11.76 24.67\n", + "val, test error% for class 1 : 0.0 18.67\n", + "val, test error% for class 2 : 44.44 31.0\n", + "val, test error% for class 3 : 4.35 17.67\n", + "val, test error% for class 4 : 33.33 73.0\n", + "val, test error% for class 5 : 8.33 33.67\n", + "val, test error% for class 6 : 13.64 38.33\n", + "val, test error% for class 7 : 5.0 22.67\n", + "val, test error% for class 8 : 0.0 12.33\n", + "val, test error% for class 9 : 5.56 33.33\n", + "val, test error% for class 10 : 16.67 55.67\n", + "26 / 30 idc points.\n", + "selEpoch: 5, Selection Ended at: 2022-02-03 03:17:30.713933\n", + "26 8950 8980\n", + "After augmentation, size of train_set: 260 lake set: 8950 val set: 240\n", + "Selection Epoch 5 Training epoch [ 1 ] Training Acc: 0.1423076923076923\n", + "Selection Epoch 5 Training epoch [ 2 ] Training Acc: 0.3230769230769231\n", + "Selection Epoch 5 Training epoch [ 3 ] Training Acc: 0.6\n", + "Selection Epoch 5 Training epoch [ 4 ] Training Acc: 0.7192307692307692\n", + "Selection Epoch 5 Training epoch [ 5 ] Training Acc: 0.85\n", + "Selection Epoch 5 Training epoch [ 6 ] Training Acc: 0.9192307692307692\n", + "Selection Epoch 5 Training epoch [ 7 ] Training Acc: 0.9615384615384616\n", + "Selection Epoch 5 Training epoch [ 8 ] Training Acc: 0.9807692307692307\n", + "Selection Epoch 5 Training epoch [ 9 ] Training Acc: 0.9923076923076923\n", + "Epoch: 6 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.5525787975639105 0.9923076923076923 0.36838218569755554 0.9041666666666667 40.3967704474926 0.7045454545454546 8.122658729553223\n", + "AL epoch: 6\n", + "val, test error% for class 0 : 5.0 15.33\n", + "val, test error% for class 1 : 0.0 15.67\n", + "val, test error% for class 2 : 21.05 18.0\n", + "val, test error% for class 3 : 3.85 28.33\n", + "val, test error% for class 4 : 15.79 58.67\n", + "val, test error% for class 5 : 23.08 60.33\n", + "val, test error% for class 6 : 7.69 24.33\n", + "val, test error% for class 7 : 4.35 21.0\n", + "val, test error% for class 8 : 0.0 12.0\n", + "val, test error% for class 9 : 9.52 22.67\n", + "val, test error% for class 10 : 13.64 48.67\n", + "29 / 30 idc points.\n", + "selEpoch: 6, Selection Ended at: 2022-02-03 03:18:01.388089\n", + "29 8920 8950\n", + "After augmentation, size of train_set: 290 lake set: 8920 val set: 269\n", + "Selection Epoch 6 Training epoch [ 1 ] Training Acc: 0.11724137931034483\n", + "Selection Epoch 6 Training epoch [ 2 ] Training Acc: 0.3758620689655172\n", + "Selection Epoch 6 Training epoch [ 3 ] Training Acc: 0.7965517241379311\n", + "Selection Epoch 6 Training epoch [ 4 ] Training Acc: 0.7482758620689656\n", + "Selection Epoch 6 Training epoch [ 5 ] Training Acc: 0.7413793103448276\n", + "Selection Epoch 6 Training epoch [ 6 ] Training Acc: 0.903448275862069\n", + "Selection Epoch 6 Training epoch [ 7 ] Training Acc: 0.9620689655172414\n", + "Selection Epoch 6 Training epoch [ 8 ] Training Acc: 0.9517241379310345\n", + "Selection Epoch 6 Training epoch [ 9 ] Training Acc: 0.996551724137931\n", + "Epoch: 7 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.3615231600124389 0.996551724137931 0.5454710125923157 0.9033457249070632 42.919586434960365 0.6827272727272727 8.828480958938599\n", + "AL epoch: 7\n", + "val, test error% for class 0 : 8.7 19.0\n", + "val, test error% for class 1 : 0.0 29.0\n", + "val, test error% for class 2 : 14.29 16.0\n", + "val, test error% for class 3 : 6.67 21.33\n", + "val, test error% for class 4 : 33.33 67.33\n", + "val, test error% for class 5 : 17.86 41.0\n", + "val, test error% for class 6 : 0.0 10.33\n", + "val, test error% for class 7 : 0.0 12.33\n", + "val, test error% for class 8 : 0.0 8.0\n", + "val, test error% for class 9 : 13.64 51.33\n", + "val, test error% for class 10 : 16.0 73.33\n", + "28 / 30 idc points.\n", + "selEpoch: 7, Selection Ended at: 2022-02-03 03:18:32.729092\n", + "28 8890 8920\n", + "After augmentation, size of train_set: 320 lake set: 8890 val set: 297\n", + "Selection Epoch 7 Training epoch [ 1 ] Training Acc: 0.125\n", + "Selection Epoch 7 Training epoch [ 2 ] Training Acc: 0.59375\n", + "Selection Epoch 7 Training epoch [ 3 ] Training Acc: 0.55\n", + "Selection Epoch 7 Training epoch [ 4 ] Training Acc: 0.7375\n", + "Selection Epoch 7 Training epoch [ 5 ] Training Acc: 0.9375\n", + "Selection Epoch 7 Training epoch [ 6 ] Training Acc: 0.95\n", + "Selection Epoch 7 Training epoch [ 7 ] Training Acc: 0.99375\n", + "Epoch: 8 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.2979037165641785 0.99375 0.3649521768093109 0.9124579124579124 46.49285668134689 0.6245454545454545 7.720529556274414\n", + "AL epoch: 8\n", + "val, test error% for class 0 : 8.0 16.33\n", + "val, test error% for class 1 : 0.0 19.0\n", + "val, test error% for class 2 : 17.39 11.0\n", + "val, test error% for class 3 : 3.12 25.67\n", + "val, test error% for class 4 : 18.18 42.33\n", + "val, test error% for class 5 : 16.67 58.67\n", + "val, test error% for class 6 : 6.06 35.33\n", + "val, test error% for class 7 : 3.57 37.0\n", + "val, test error% for class 8 : 13.04 57.0\n", + "val, test error% for class 9 : 7.69 35.67\n", + "val, test error% for class 10 : 6.9 75.0\n", + "30 / 30 idc points.\n", + "selEpoch: 8, Selection Ended at: 2022-02-03 03:19:03.140939\n", + "30 8860 8890\n", + "After augmentation, size of train_set: 350 lake set: 8860 val set: 327\n", + "Selection Epoch 8 Training epoch [ 1 ] Training Acc: 0.16285714285714287\n", + "Selection Epoch 8 Training epoch [ 2 ] Training Acc: 0.4228571428571429\n", + "Selection Epoch 8 Training epoch [ 3 ] Training Acc: 0.6485714285714286\n", + "Selection Epoch 8 Training epoch [ 4 ] Training Acc: 0.7914285714285715\n", + "Selection Epoch 8 Training epoch [ 5 ] Training Acc: 0.9028571428571428\n", + "Selection Epoch 8 Training epoch [ 6 ] Training Acc: 0.9571428571428572\n", + "Selection Epoch 8 Training epoch [ 7 ] Training Acc: 0.9685714285714285\n", + "Selection Epoch 8 Training epoch [ 8 ] Training Acc: 0.9914285714285714\n", + "Epoch: 9 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.7335402709431946 0.9914285714285714 0.2643401026725769 0.9480122324159022 36.337587773799896 0.7315151515151516 9.457635879516602\n", + "AL epoch: 9\n", + "val, test error% for class 0 : 3.7 13.0\n", + "val, test error% for class 1 : 3.45 20.33\n", + "val, test error% for class 2 : 11.54 16.67\n", + "val, test error% for class 3 : 2.94 14.0\n", + "val, test error% for class 4 : 18.18 70.33\n", + "val, test error% for class 5 : 11.76 47.33\n", + "val, test error% for class 6 : 2.7 17.33\n", + "val, test error% for class 7 : 0.0 9.67\n", + "val, test error% for class 8 : 0.0 3.0\n", + "val, test error% for class 9 : 3.57 42.33\n", + "val, test error% for class 10 : 3.03 41.33\n", + "29 / 30 idc points.\n", + "selEpoch: 9, Selection Ended at: 2022-02-03 03:19:32.342779\n", + "29 8830 8860\n", + "After augmentation, size of train_set: 380 lake set: 8830 val set: 356\n", + "Selection Epoch 9 Training epoch [ 1 ] Training Acc: 0.10526315789473684\n", + "Selection Epoch 9 Training epoch [ 2 ] Training Acc: 0.32105263157894737\n", + "Selection Epoch 9 Training epoch [ 3 ] Training Acc: 0.8447368421052631\n", + "Selection Epoch 9 Training epoch [ 4 ] Training Acc: 0.8921052631578947\n", + "Selection Epoch 9 Training epoch [ 5 ] Training Acc: 0.9552631578947368\n", + "Selection Epoch 9 Training epoch [ 6 ] Training Acc: 0.9736842105263158\n", + "Selection Epoch 9 Training epoch [ 7 ] Training Acc: 0.9973684210526316\n", + "Epoch: 10 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.5029678102582693 0.9973684210526316 0.2728564739227295 0.9438202247191011 37.8411568403244 0.7127272727272728 9.134510278701782\n", + "val, test error% for class 0 : 6.9 18.67\n", + "val, test error% for class 1 : 0.0 16.0\n", + "val, test error% for class 2 : 10.34 16.0\n", + "val, test error% for class 3 : 2.7 22.33\n", + "val, test error% for class 4 : 14.81 51.67\n", + "val, test error% for class 5 : 11.43 50.67\n", + "val, test error% for class 6 : 0.0 9.67\n", + "val, test error% for class 7 : 0.0 5.33\n", + "val, test error% for class 8 : 0.0 8.67\n", + "val, test error% for class 9 : 9.68 51.67\n", + "val, test error% for class 10 : 8.57 65.33\n", + "[[20.0, 30.67, 47.0, 18.0, 93.0, 60.67, 52.33, 14.67, 17.33, 62.0, 83.33, 45.36363636363637], [45.0, 31.0, 40.67, 24.67, 82.33, 81.33, 38.67, 30.33, 4.33, 75.0, 26.33, 43.60545454545454], [20.0, 42.0, 41.33, 18.0, 43.33, 70.33, 40.0, 17.33, 11.67, 42.0, 57.0, 36.63545454545454], [15.33, 21.67, 22.0, 13.33, 57.33, 50.0, 42.0, 12.0, 17.67, 44.33, 51.33, 31.54454545454545], [24.67, 18.67, 31.0, 17.67, 73.0, 33.67, 38.33, 22.67, 12.33, 33.33, 55.67, 32.81909090909091], [15.33, 15.67, 18.0, 28.33, 58.67, 60.33, 24.33, 21.0, 12.0, 22.67, 48.67, 29.545454545454547], [19.0, 29.0, 16.0, 21.33, 67.33, 41.0, 10.33, 12.33, 8.0, 51.33, 73.33, 31.725454545454546], [16.33, 19.0, 11.0, 25.67, 42.33, 58.67, 35.33, 37.0, 57.0, 35.67, 75.0, 37.54545454545455], [13.0, 20.33, 16.67, 14.0, 70.33, 47.33, 17.33, 9.67, 3.0, 42.33, 41.33, 26.84727272727272], [18.67, 16.0, 16.0, 22.33, 51.67, 50.67, 9.67, 5.33, 8.67, 51.67, 65.33, 28.728181818181817]]\n", + "\n", + "\n" + ] + } + ], + "source": [ + "train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, \"SIM\",'fl2mi')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### FL1MI" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "SIM fl1mi\n", + "num ood samples: 8000\n", + "CIFAR-10 Custom dataset stats: Train size: 110 Val size: 110 Lake size: 9100 Test set: 3300\n", + "selected classes are: [ 0 1 2 3 4 5 6 7 8 9 10]\n", + "Saving results to: ./SMI_active_learning_results/organmnist/ood/fl1mi/30/fkna_3\n", + "organmnist_ood_SIM_11_fl1mi_budget:30_epochs:10_linear:True_runsfkna_3\n", + "AL epoch: 0\n", + "initial training epoch\n", + "Init model loaded from disk, skipping init training: /mnt/data2/akshit/Organ/weights/organmnist_ood_ResNet18_0.01_10_10_11\n", + "AL epoch: 1\n", + "val, test error% for class 0 : 10.0 20.0\n", + "val, test error% for class 1 : 0.0 30.67\n", + "val, test error% for class 2 : 50.0 47.0\n", + "val, test error% for class 3 : 10.0 18.0\n", + "val, test error% for class 4 : 90.0 93.0\n", + "val, test error% for class 5 : 50.0 60.67\n", + "val, test error% for class 6 : 30.0 52.33\n", + "val, test error% for class 7 : 0.0 14.67\n", + "val, test error% for class 8 : 0.0 17.33\n", + "val, test error% for class 9 : 70.0 62.0\n", + "val, test error% for class 10 : 50.0 83.33\n", + "8 / 30 idc points.\n", + "selEpoch: 1, Selection Ended at: 2022-02-03 03:20:34.385887\n", + "8 9070 9100\n", + "After augmentation, size of train_set: 140 lake set: 9070 val set: 118\n", + "Selection Epoch 1 Training epoch [ 1 ] Training Acc: 0.2642857142857143\n", + "Selection Epoch 1 Training epoch [ 2 ] Training Acc: 0.2\n", + "Selection Epoch 1 Training epoch [ 3 ] Training Acc: 0.2714285714285714\n", + "Selection Epoch 1 Training epoch [ 4 ] Training Acc: 0.15\n", + "Selection Epoch 1 Training epoch [ 5 ] Training Acc: 0.5571428571428572\n", + "Selection Epoch 1 Training epoch [ 6 ] Training Acc: 0.4\n", + "Selection Epoch 1 Training epoch [ 7 ] Training Acc: 0.36428571428571427\n", + "Selection Epoch 1 Training epoch [ 8 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 1 Training epoch [ 9 ] Training Acc: 0.9142857142857143\n", + "Selection Epoch 1 Training epoch [ 10 ] Training Acc: 1.0\n", + "Epoch: 2 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.059122280683368444 1.0 1.6999752521514893 0.652542372881356 67.28636400401592 0.5415151515151515 4.637464761734009\n", + "AL epoch: 2\n", + "val, test error% for class 0 : 45.45 28.0\n", + "val, test error% for class 1 : 30.0 49.0\n", + "val, test error% for class 2 : 27.27 18.0\n", + "val, test error% for class 3 : 18.18 33.0\n", + "val, test error% for class 4 : 63.64 69.33\n", + "val, test error% for class 5 : 60.0 83.0\n", + "val, test error% for class 6 : 18.18 31.0\n", + "val, test error% for class 7 : 18.18 36.33\n", + "val, test error% for class 8 : 0.0 6.33\n", + "val, test error% for class 9 : 63.64 88.33\n", + "val, test error% for class 10 : 40.0 62.0\n", + "27 / 30 idc points.\n", + "selEpoch: 2, Selection Ended at: 2022-02-03 03:21:26.364822\n", + "27 9040 9070\n", + "After augmentation, size of train_set: 170 lake set: 9040 val set: 145\n", + "Selection Epoch 2 Training epoch [ 1 ] Training Acc: 0.21764705882352942\n", + "Selection Epoch 2 Training epoch [ 2 ] Training Acc: 0.17647058823529413\n", + "Selection Epoch 2 Training epoch [ 3 ] Training Acc: 0.2\n", + "Selection Epoch 2 Training epoch [ 4 ] Training Acc: 0.6823529411764706\n", + "Selection Epoch 2 Training epoch [ 5 ] Training Acc: 0.9352941176470588\n", + "Selection Epoch 2 Training epoch [ 6 ] Training Acc: 0.9529411764705882\n", + "Selection Epoch 2 Training epoch [ 7 ] Training Acc: 0.9411764705882353\n", + "Selection Epoch 2 Training epoch [ 8 ] Training Acc: 0.9588235294117647\n", + "Selection Epoch 2 Training epoch [ 9 ] Training Acc: 0.9470588235294117\n", + "Selection Epoch 2 Training epoch [ 10 ] Training Acc: 0.7764705882352941\n", + "Selection Epoch 2 Training epoch [ 11 ] Training Acc: 0.9588235294117647\n", + "Selection Epoch 2 Training epoch [ 12 ] Training Acc: 1.0\n", + "Epoch: 3 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.07227501692250371 1.0 1.1204864978790283 0.7655172413793103 61.256064385175705 0.6215151515151515 6.592379570007324\n", + "AL epoch: 3\n", + "val, test error% for class 0 : 7.69 13.67\n", + "val, test error% for class 1 : 15.38 23.0\n", + "val, test error% for class 2 : 30.77 20.0\n", + "val, test error% for class 3 : 7.14 25.67\n", + "val, test error% for class 4 : 61.54 57.67\n", + "val, test error% for class 5 : 63.64 77.33\n", + "val, test error% for class 6 : 25.0 53.33\n", + "val, test error% for class 7 : 0.0 13.0\n", + "val, test error% for class 8 : 7.69 18.67\n", + "val, test error% for class 9 : 14.29 36.0\n", + "val, test error% for class 10 : 33.33 78.0\n", + "28 / 30 idc points.\n", + "selEpoch: 3, Selection Ended at: 2022-02-03 03:22:21.278732\n", + "28 9010 9040\n", + "After augmentation, size of train_set: 200 lake set: 9010 val set: 173\n", + "Selection Epoch 3 Training epoch [ 1 ] Training Acc: 0.14\n", + "Selection Epoch 3 Training epoch [ 2 ] Training Acc: 0.185\n", + "Selection Epoch 3 Training epoch [ 3 ] Training Acc: 0.19\n", + "Selection Epoch 3 Training epoch [ 4 ] Training Acc: 0.295\n", + "Selection Epoch 3 Training epoch [ 5 ] Training Acc: 0.85\n", + "Selection Epoch 3 Training epoch [ 6 ] Training Acc: 0.92\n", + "Selection Epoch 3 Training epoch [ 7 ] Training Acc: 0.965\n", + "Selection Epoch 3 Training epoch [ 8 ] Training Acc: 0.975\n", + "Selection Epoch 3 Training epoch [ 9 ] Training Acc: 0.98\n", + "Selection Epoch 3 Training epoch [ 10 ] Training Acc: 1.0\n", + "Epoch: 4 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.11824328685179353 1.0 0.7211366891860962 0.8323699421965318 46.510882541537285 0.6578787878787878 6.602997779846191\n", + "AL epoch: 4\n", + "val, test error% for class 0 : 14.29 23.67\n", + "val, test error% for class 1 : 6.25 37.33\n", + "val, test error% for class 2 : 11.76 15.0\n", + "val, test error% for class 3 : 6.25 12.0\n", + "val, test error% for class 4 : 41.18 59.0\n", + "val, test error% for class 5 : 46.15 66.0\n", + "val, test error% for class 6 : 15.79 38.33\n", + "val, test error% for class 7 : 5.88 11.67\n", + "val, test error% for class 8 : 0.0 11.0\n", + "val, test error% for class 9 : 13.33 30.67\n", + "val, test error% for class 10 : 28.57 71.67\n", + "28 / 30 idc points.\n", + "selEpoch: 4, Selection Ended at: 2022-02-03 03:23:17.172136\n", + "28 8980 9010\n", + "After augmentation, size of train_set: 230 lake set: 8980 val set: 201\n", + "Selection Epoch 4 Training epoch [ 1 ] Training Acc: 0.09565217391304348\n", + "Selection Epoch 4 Training epoch [ 2 ] Training Acc: 0.30869565217391304\n", + "Selection Epoch 4 Training epoch [ 3 ] Training Acc: 0.6391304347826087\n", + "Selection Epoch 4 Training epoch [ 4 ] Training Acc: 0.8217391304347826\n", + "Selection Epoch 4 Training epoch [ 5 ] Training Acc: 0.8478260869565217\n", + "Selection Epoch 4 Training epoch [ 6 ] Training Acc: 0.9043478260869565\n", + "Selection Epoch 4 Training epoch [ 7 ] Training Acc: 0.9478260869565217\n", + "Selection Epoch 4 Training epoch [ 8 ] Training Acc: 0.782608695652174\n", + "Selection Epoch 4 Training epoch [ 9 ] Training Acc: 0.9478260869565217\n", + "Selection Epoch 4 Training epoch [ 10 ] Training Acc: 0.9434782608695652\n", + "Selection Epoch 4 Training epoch [ 11 ] Training Acc: 0.9956521739130435\n", + "Epoch: 5 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.548540704883635 0.9956521739130435 0.7385290265083313 0.835820895522388 60.311096236109734 0.6275757575757576 8.211057424545288\n", + "AL epoch: 5\n", + "val, test error% for class 0 : 16.67 30.33\n", + "val, test error% for class 1 : 11.76 41.0\n", + "val, test error% for class 2 : 10.53 16.33\n", + "val, test error% for class 3 : 5.56 12.67\n", + "val, test error% for class 4 : 31.58 71.33\n", + "val, test error% for class 5 : 40.0 57.67\n", + "val, test error% for class 6 : 4.55 16.33\n", + "val, test error% for class 7 : 10.0 26.67\n", + "val, test error% for class 8 : 0.0 8.67\n", + "val, test error% for class 9 : 33.33 73.67\n", + "val, test error% for class 10 : 23.53 55.0\n", + "28 / 30 idc points.\n", + "selEpoch: 5, Selection Ended at: 2022-02-03 03:24:09.556374\n", + "28 8950 8980\n", + "After augmentation, size of train_set: 260 lake set: 8950 val set: 229\n", + "Selection Epoch 5 Training epoch [ 1 ] Training Acc: 0.12692307692307692\n", + "Selection Epoch 5 Training epoch [ 2 ] Training Acc: 0.1423076923076923\n", + "Selection Epoch 5 Training epoch [ 3 ] Training Acc: 0.6192307692307693\n", + "Selection Epoch 5 Training epoch [ 4 ] Training Acc: 0.6153846153846154\n", + "Selection Epoch 5 Training epoch [ 5 ] Training Acc: 0.7653846153846153\n", + "Selection Epoch 5 Training epoch [ 6 ] Training Acc: 0.9192307692307692\n", + "Selection Epoch 5 Training epoch [ 7 ] Training Acc: 0.8846153846153846\n", + "Selection Epoch 5 Training epoch [ 8 ] Training Acc: 0.9769230769230769\n", + "Selection Epoch 5 Training epoch [ 9 ] Training Acc: 0.9653846153846154\n", + "Selection Epoch 5 Training epoch [ 10 ] Training Acc: 0.9961538461538462\n", + "Epoch: 6 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.1957303408998996 0.9961538461538462 0.3985424339771271 0.8951965065502183 45.270287573337555 0.6906060606060606 8.56587815284729\n", + "AL epoch: 6\n", + "val, test error% for class 0 : 10.0 31.0\n", + "val, test error% for class 1 : 0.0 25.33\n", + "val, test error% for class 2 : 14.29 25.0\n", + "val, test error% for class 3 : 0.0 24.33\n", + "val, test error% for class 4 : 40.0 39.0\n", + "val, test error% for class 5 : 29.41 51.0\n", + "val, test error% for class 6 : 3.85 23.0\n", + "val, test error% for class 7 : 0.0 16.33\n", + "val, test error% for class 8 : 0.0 15.33\n", + "val, test error% for class 9 : 5.26 27.0\n", + "val, test error% for class 10 : 16.67 63.0\n", + "29 / 30 idc points.\n", + "selEpoch: 6, Selection Ended at: 2022-02-03 03:25:02.321201\n", + "29 8920 8950\n", + "After augmentation, size of train_set: 290 lake set: 8920 val set: 258\n", + "Selection Epoch 6 Training epoch [ 1 ] Training Acc: 0.11379310344827587\n", + "Selection Epoch 6 Training epoch [ 2 ] Training Acc: 0.2206896551724138\n", + "Selection Epoch 6 Training epoch [ 3 ] Training Acc: 0.5448275862068965\n", + "Selection Epoch 6 Training epoch [ 4 ] Training Acc: 0.6827586206896552\n", + "Selection Epoch 6 Training epoch [ 5 ] Training Acc: 0.7965517241379311\n", + "Selection Epoch 6 Training epoch [ 6 ] Training Acc: 0.8896551724137931\n", + "Selection Epoch 6 Training epoch [ 7 ] Training Acc: 0.9827586206896551\n", + "Selection Epoch 6 Training epoch [ 8 ] Training Acc: 0.9724137931034482\n", + "Selection Epoch 6 Training epoch [ 9 ] Training Acc: 0.9310344827586207\n", + "Selection Epoch 6 Training epoch [ 10 ] Training Acc: 0.9793103448275862\n", + "Selection Epoch 6 Training epoch [ 11 ] Training Acc: 0.9896551724137931\n", + "Selection Epoch 6 Training epoch [ 12 ] Training Acc: 1.0\n", + "Epoch: 7 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.14299100707285106 1.0 0.3706114590167999 0.9186046511627907 36.33944997191429 0.7093939393939394 11.394667148590088\n", + "AL epoch: 7\n", + "val, test error% for class 0 : 9.09 23.33\n", + "val, test error% for class 1 : 0.0 9.0\n", + "val, test error% for class 2 : 8.7 20.0\n", + "val, test error% for class 3 : 4.55 24.33\n", + "val, test error% for class 4 : 31.82 61.67\n", + "val, test error% for class 5 : 21.05 55.67\n", + "val, test error% for class 6 : 3.33 15.33\n", + "val, test error% for class 7 : 0.0 14.67\n", + "val, test error% for class 8 : 0.0 21.33\n", + "val, test error% for class 9 : 4.55 31.67\n", + "val, test error% for class 10 : 11.11 42.67\n", + "28 / 30 idc points.\n", + "selEpoch: 7, Selection Ended at: 2022-02-03 03:26:02.783568\n", + "28 8890 8920\n", + "After augmentation, size of train_set: 320 lake set: 8890 val set: 286\n", + "Selection Epoch 7 Training epoch [ 1 ] Training Acc: 0.246875\n", + "Selection Epoch 7 Training epoch [ 2 ] Training Acc: 0.375\n", + "Selection Epoch 7 Training epoch [ 3 ] Training Acc: 0.459375\n", + "Selection Epoch 7 Training epoch [ 4 ] Training Acc: 0.63125\n", + "Selection Epoch 7 Training epoch [ 5 ] Training Acc: 0.6125\n", + "Selection Epoch 7 Training epoch [ 6 ] Training Acc: 0.75\n", + "Selection Epoch 7 Training epoch [ 7 ] Training Acc: 0.978125\n", + "Selection Epoch 7 Training epoch [ 8 ] Training Acc: 0.990625\n", + "Epoch: 8 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 1.0916571037378162 0.990625 0.4464597702026367 0.9055944055944056 44.45199039578438 0.6878787878787879 8.446425914764404\n", + "AL epoch: 8\n", + "val, test error% for class 0 : 7.69 17.67\n", + "val, test error% for class 1 : 0.0 23.67\n", + "val, test error% for class 2 : 16.0 23.33\n", + "val, test error% for class 3 : 4.17 20.67\n", + "val, test error% for class 4 : 32.0 41.33\n", + "val, test error% for class 5 : 23.81 73.67\n", + "val, test error% for class 6 : 3.03 14.33\n", + "val, test error% for class 7 : 0.0 19.33\n", + "val, test error% for class 8 : 0.0 10.0\n", + "val, test error% for class 9 : 19.23 62.67\n", + "val, test error% for class 10 : 3.45 36.67\n", + "29 / 30 idc points.\n", + "selEpoch: 8, Selection Ended at: 2022-02-03 03:26:57.988285\n", + "29 8860 8890\n", + "After augmentation, size of train_set: 350 lake set: 8860 val set: 315\n", + "Selection Epoch 8 Training epoch [ 1 ] Training Acc: 0.18571428571428572\n", + "Selection Epoch 8 Training epoch [ 2 ] Training Acc: 0.3628571428571429\n", + "Selection Epoch 8 Training epoch [ 3 ] Training Acc: 0.5285714285714286\n", + "Selection Epoch 8 Training epoch [ 4 ] Training Acc: 0.8028571428571428\n", + "Selection Epoch 8 Training epoch [ 5 ] Training Acc: 0.8028571428571428\n", + "Selection Epoch 8 Training epoch [ 6 ] Training Acc: 0.9657142857142857\n", + "Selection Epoch 8 Training epoch [ 7 ] Training Acc: 0.9714285714285714\n", + "Selection Epoch 8 Training epoch [ 8 ] Training Acc: 0.9914285714285714\n", + "Epoch: 9 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.6714746626093984 0.9914285714285714 0.3877151310443878 0.926984126984127 41.091063141822815 0.6924242424242424 9.334815502166748\n", + "AL epoch: 9\n", + "val, test error% for class 0 : 3.45 17.67\n", + "val, test error% for class 1 : 0.0 33.33\n", + "val, test error% for class 2 : 14.81 20.67\n", + "val, test error% for class 3 : 3.57 20.0\n", + "val, test error% for class 4 : 17.86 42.67\n", + "val, test error% for class 5 : 21.74 59.0\n", + "val, test error% for class 6 : 2.78 23.33\n", + "val, test error% for class 7 : 0.0 18.33\n", + "val, test error% for class 8 : 0.0 2.0\n", + "val, test error% for class 9 : 14.29 71.67\n", + "val, test error% for class 10 : 6.25 29.67\n", + "28 / 30 idc points.\n", + "selEpoch: 9, Selection Ended at: 2022-02-03 03:27:54.618432\n", + "28 8830 8860\n", + "After augmentation, size of train_set: 380 lake set: 8830 val set: 343\n", + "Selection Epoch 9 Training epoch [ 1 ] Training Acc: 0.16578947368421051\n", + "Selection Epoch 9 Training epoch [ 2 ] Training Acc: 0.4789473684210526\n", + "Selection Epoch 9 Training epoch [ 3 ] Training Acc: 0.46578947368421053\n", + "Selection Epoch 9 Training epoch [ 4 ] Training Acc: 0.8131578947368421\n", + "Selection Epoch 9 Training epoch [ 5 ] Training Acc: 0.7868421052631579\n", + "Selection Epoch 9 Training epoch [ 6 ] Training Acc: 0.9184210526315789\n", + "Selection Epoch 9 Training epoch [ 7 ] Training Acc: 0.9\n", + "Selection Epoch 9 Training epoch [ 8 ] Training Acc: 0.9421052631578948\n", + "Selection Epoch 9 Training epoch [ 9 ] Training Acc: 0.9605263157894737\n", + "Selection Epoch 9 Training epoch [ 10 ] Training Acc: 0.9605263157894737\n", + "Selection Epoch 9 Training epoch [ 11 ] Training Acc: 0.9657894736842105\n", + "Selection Epoch 9 Training epoch [ 12 ] Training Acc: 1.0\n", + "Epoch: 10 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.18239361292216927 1.0 0.2883251905441284 0.9416909620991254 37.932950884103775 0.7154545454545455 15.078807353973389\n", + "val, test error% for class 0 : 0.0 19.67\n", + "val, test error% for class 1 : 0.0 20.0\n", + "val, test error% for class 2 : 6.67 24.67\n", + "val, test error% for class 3 : 6.67 27.0\n", + "val, test error% for class 4 : 25.81 58.0\n", + "val, test error% for class 5 : 8.0 23.0\n", + "val, test error% for class 6 : 2.5 16.0\n", + "val, test error% for class 7 : 0.0 10.33\n", + "val, test error% for class 8 : 0.0 12.33\n", + "val, test error% for class 9 : 10.34 54.33\n", + "val, test error% for class 10 : 6.06 47.67\n", + "[[20.0, 30.67, 47.0, 18.0, 93.0, 60.67, 52.33, 14.67, 17.33, 62.0, 83.33, 45.36363636363637], [28.0, 49.0, 18.0, 33.0, 69.33, 83.0, 31.0, 36.33, 6.33, 88.33, 62.0, 45.847272727272724], [13.67, 23.0, 20.0, 25.67, 57.67, 77.33, 53.33, 13.0, 18.67, 36.0, 78.0, 37.849090909090904], [23.67, 37.33, 15.0, 12.0, 59.0, 66.0, 38.33, 11.67, 11.0, 30.67, 71.67, 34.21272727272728], [30.33, 41.0, 16.33, 12.67, 71.33, 57.67, 16.33, 26.67, 8.67, 73.67, 55.0, 37.24272727272727], [31.0, 25.33, 25.0, 24.33, 39.0, 51.0, 23.0, 16.33, 15.33, 27.0, 63.0, 30.93818181818182], [23.33, 9.0, 20.0, 24.33, 61.67, 55.67, 15.33, 14.67, 21.33, 31.67, 42.67, 29.060909090909092], [17.67, 23.67, 23.33, 20.67, 41.33, 73.67, 14.33, 19.33, 10.0, 62.67, 36.67, 31.212727272727275], [17.67, 33.33, 20.67, 20.0, 42.67, 59.0, 23.33, 18.33, 2.0, 71.67, 29.67, 30.75818181818182], [19.67, 20.0, 24.67, 27.0, 58.0, 23.0, 16.0, 10.33, 12.33, 54.33, 47.67, 28.45454545454546]]\n", + "\n", + "\n" + ] + } + ], + "source": [ + "train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, \"SIM\",'fl1mi')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### AL BADGE" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "AL badge\n", + "num ood samples: 8000\n", + "CIFAR-10 Custom dataset stats: Train size: 110 Val size: 110 Lake size: 9100 Test set: 3300\n", + "selected classes are: [ 0 1 2 3 4 5 6 7 8 9 10]\n", + "Saving results to: ./SMI_active_learning_results/organmnist/ood/badge/30/fkna_3\n", + "organmnist_ood_AL_11_badge_budget:30_epochs:10_linear:True_runsfkna_3\n", + "AL epoch: 0\n", + "initial training epoch\n", + "Init model loaded from disk, skipping init training: /mnt/data2/akshit/Organ/weights/organmnist_ood_ResNet18_0.01_10_10_11\n", + "AL epoch: 1\n", + "val, test error% for class 0 : 10.0 20.0\n", + "val, test error% for class 1 : 0.0 30.67\n", + "val, test error% for class 2 : 50.0 47.0\n", + "val, test error% for class 3 : 10.0 18.0\n", + "val, test error% for class 4 : 90.0 93.0\n", + "val, test error% for class 5 : 50.0 60.67\n", + "val, test error% for class 6 : 30.0 52.33\n", + "val, test error% for class 7 : 0.0 14.67\n", + "val, test error% for class 8 : 0.0 17.33\n", + "val, test error% for class 9 : 70.0 62.0\n", + "val, test error% for class 10 : 50.0 83.33\n", + "8 / 30 idc points.\n", + "selEpoch: 1, Selection Ended at: 2022-02-03 03:28:37.820532\n", + "8 9070 9100\n", + "After augmentation, size of train_set: 140 lake set: 9070 val set: 118\n", + "Selection Epoch 1 Training epoch [ 1 ] Training Acc: 0.20714285714285716\n", + "Selection Epoch 1 Training epoch [ 2 ] Training Acc: 0.24285714285714285\n", + "Selection Epoch 1 Training epoch [ 3 ] Training Acc: 0.32857142857142857\n", + "Selection Epoch 1 Training epoch [ 4 ] Training Acc: 0.25\n", + "Selection Epoch 1 Training epoch [ 5 ] Training Acc: 0.7071428571428572\n", + "Selection Epoch 1 Training epoch [ 6 ] Training Acc: 0.6142857142857143\n", + "Selection Epoch 1 Training epoch [ 7 ] Training Acc: 0.7714285714285715\n", + "Selection Epoch 1 Training epoch [ 8 ] Training Acc: 1.0\n", + "Epoch: 2 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.42635762318968773 1.0 1.103916883468628 0.6949152542372882 45.58243507146835 0.6142424242424243 3.7027690410614014\n", + "AL epoch: 2\n", + "val, test error% for class 0 : 9.09 9.0\n", + "val, test error% for class 1 : 10.0 28.0\n", + "val, test error% for class 2 : 50.0 35.33\n", + "val, test error% for class 3 : 10.0 22.67\n", + "val, test error% for class 4 : 80.0 52.0\n", + "val, test error% for class 5 : 60.0 67.33\n", + "val, test error% for class 6 : 33.33 52.67\n", + "val, test error% for class 7 : 27.27 36.0\n", + "val, test error% for class 8 : 0.0 11.67\n", + "val, test error% for class 9 : 50.0 55.67\n", + "val, test error% for class 10 : 16.67 54.0\n", + "8 / 30 idc points.\n", + "selEpoch: 2, Selection Ended at: 2022-02-03 03:29:03.774695\n", + "8 9040 9070\n", + "After augmentation, size of train_set: 170 lake set: 9040 val set: 126\n", + "Selection Epoch 2 Training epoch [ 1 ] Training Acc: 0.21176470588235294\n", + "Selection Epoch 2 Training epoch [ 2 ] Training Acc: 0.3176470588235294\n", + "Selection Epoch 2 Training epoch [ 3 ] Training Acc: 0.25882352941176473\n", + "Selection Epoch 2 Training epoch [ 4 ] Training Acc: 0.45294117647058824\n", + "Selection Epoch 2 Training epoch [ 5 ] Training Acc: 0.7411764705882353\n", + "Selection Epoch 2 Training epoch [ 6 ] Training Acc: 0.788235294117647\n", + "Selection Epoch 2 Training epoch [ 7 ] Training Acc: 0.8176470588235294\n", + "Selection Epoch 2 Training epoch [ 8 ] Training Acc: 0.8823529411764706\n", + "Selection Epoch 2 Training epoch [ 9 ] Training Acc: 0.9529411764705882\n", + "Selection Epoch 2 Training epoch [ 10 ] Training Acc: 0.9411764705882353\n", + "Selection Epoch 2 Training epoch [ 11 ] Training Acc: 0.9705882352941176\n", + "Selection Epoch 2 Training epoch [ 12 ] Training Acc: 1.0\n", + "Epoch: 3 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.05312637076713145 1.0 1.606369137763977 0.6587301587301587 66.19305050373077 0.5618181818181818 6.6427648067474365\n", + "AL epoch: 3\n", + "val, test error% for class 0 : 36.36 25.67\n", + "val, test error% for class 1 : 0.0 25.0\n", + "val, test error% for class 2 : 60.0 31.33\n", + "val, test error% for class 3 : 9.09 18.67\n", + "val, test error% for class 4 : 72.73 91.0\n", + "val, test error% for class 5 : 80.0 79.33\n", + "val, test error% for class 6 : 23.08 42.67\n", + "val, test error% for class 7 : 36.36 41.33\n", + "val, test error% for class 8 : 0.0 26.67\n", + "val, test error% for class 9 : 50.0 59.0\n", + "val, test error% for class 10 : 23.08 41.33\n", + "29 / 30 idc points.\n", + "selEpoch: 3, Selection Ended at: 2022-02-03 03:29:32.487784\n", + "29 9010 9040\n", + "After augmentation, size of train_set: 200 lake set: 9010 val set: 155\n", + "Selection Epoch 3 Training epoch [ 1 ] Training Acc: 0.125\n", + "Selection Epoch 3 Training epoch [ 2 ] Training Acc: 0.145\n", + "Selection Epoch 3 Training epoch [ 3 ] Training Acc: 0.345\n", + "Selection Epoch 3 Training epoch [ 4 ] Training Acc: 0.405\n", + "Selection Epoch 3 Training epoch [ 5 ] Training Acc: 0.805\n", + "Selection Epoch 3 Training epoch [ 6 ] Training Acc: 0.985\n", + "Selection Epoch 3 Training epoch [ 7 ] Training Acc: 0.82\n", + "Selection Epoch 3 Training epoch [ 8 ] Training Acc: 0.94\n", + "Selection Epoch 3 Training epoch [ 9 ] Training Acc: 0.975\n", + "Selection Epoch 3 Training epoch [ 10 ] Training Acc: 0.975\n", + "Selection Epoch 3 Training epoch [ 11 ] Training Acc: 0.995\n", + "Epoch: 4 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.29857200535479933 0.995 0.9999412298202515 0.7548387096774194 59.562572956085205 0.5727272727272728 7.299763441085815\n", + "AL epoch: 4\n", + "val, test error% for class 0 : 18.18 41.0\n", + "val, test error% for class 1 : 25.0 54.0\n", + "val, test error% for class 2 : 23.08 21.0\n", + "val, test error% for class 3 : 16.67 37.33\n", + "val, test error% for class 4 : 75.0 50.0\n", + "val, test error% for class 5 : 29.41 52.67\n", + "val, test error% for class 6 : 25.0 44.67\n", + "val, test error% for class 7 : 6.67 19.67\n", + "val, test error% for class 8 : 0.0 32.0\n", + "val, test error% for class 9 : 15.79 34.67\n", + "val, test error% for class 10 : 40.0 83.0\n", + "23 / 30 idc points.\n", + "selEpoch: 4, Selection Ended at: 2022-02-03 03:30:01.826622\n", + "23 8980 9010\n", + "After augmentation, size of train_set: 230 lake set: 8980 val set: 178\n", + "Selection Epoch 4 Training epoch [ 1 ] Training Acc: 0.24347826086956523\n", + "Selection Epoch 4 Training epoch [ 2 ] Training Acc: 0.1782608695652174\n", + "Selection Epoch 4 Training epoch [ 3 ] Training Acc: 0.34782608695652173\n", + "Selection Epoch 4 Training epoch [ 4 ] Training Acc: 0.4782608695652174\n", + "Selection Epoch 4 Training epoch [ 5 ] Training Acc: 0.8695652173913043\n", + "Selection Epoch 4 Training epoch [ 6 ] Training Acc: 0.9434782608695652\n", + "Selection Epoch 4 Training epoch [ 7 ] Training Acc: 0.9695652173913043\n", + "Selection Epoch 4 Training epoch [ 8 ] Training Acc: 0.9695652173913043\n", + "Selection Epoch 4 Training epoch [ 9 ] Training Acc: 0.9956521739130435\n", + "Epoch: 5 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.25316934287548065 0.9956521739130435 0.6955233812332153 0.8595505617977528 51.87502561509609 0.67 6.787353277206421\n", + "AL epoch: 5\n", + "val, test error% for class 0 : 7.14 30.67\n", + "val, test error% for class 1 : 6.67 31.0\n", + "val, test error% for class 2 : 30.77 22.0\n", + "val, test error% for class 3 : 6.67 12.33\n", + "val, test error% for class 4 : 26.67 53.33\n", + "val, test error% for class 5 : 20.0 53.67\n", + "val, test error% for class 6 : 17.65 33.67\n", + "val, test error% for class 7 : 5.56 17.67\n", + "val, test error% for class 8 : 0.0 5.67\n", + "val, test error% for class 9 : 9.09 32.33\n", + "val, test error% for class 10 : 25.0 70.67\n", + "23 / 30 idc points.\n", + "selEpoch: 5, Selection Ended at: 2022-02-03 03:30:28.067272\n", + "23 8950 8980\n", + "After augmentation, size of train_set: 260 lake set: 8950 val set: 201\n", + "Selection Epoch 5 Training epoch [ 1 ] Training Acc: 0.1576923076923077\n", + "Selection Epoch 5 Training epoch [ 2 ] Training Acc: 0.1\n", + "Selection Epoch 5 Training epoch [ 3 ] Training Acc: 0.23076923076923078\n", + "Selection Epoch 5 Training epoch [ 4 ] Training Acc: 0.7384615384615385\n", + "Selection Epoch 5 Training epoch [ 5 ] Training Acc: 0.8384615384615385\n", + "Selection Epoch 5 Training epoch [ 6 ] Training Acc: 0.8192307692307692\n", + "Selection Epoch 5 Training epoch [ 7 ] Training Acc: 0.9538461538461539\n", + "Selection Epoch 5 Training epoch [ 8 ] Training Acc: 0.9807692307692307\n", + "Selection Epoch 5 Training epoch [ 9 ] Training Acc: 0.9923076923076923\n", + "Epoch: 6 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.44172421656548977 0.9923076923076923 0.6698916554450989 0.8507462686567164 45.18770349025726 0.6472727272727272 7.783966541290283\n", + "AL epoch: 6\n", + "val, test error% for class 0 : 6.25 12.67\n", + "val, test error% for class 1 : 5.88 39.33\n", + "val, test error% for class 2 : 26.67 24.0\n", + "val, test error% for class 3 : 6.67 22.67\n", + "val, test error% for class 4 : 22.73 56.0\n", + "val, test error% for class 5 : 21.74 53.67\n", + "val, test error% for class 6 : 23.81 57.33\n", + "val, test error% for class 7 : 5.56 17.0\n", + "val, test error% for class 8 : 0.0 6.33\n", + "val, test error% for class 9 : 4.17 19.67\n", + "val, test error% for class 10 : 35.29 79.33\n", + "22 / 30 idc points.\n", + "selEpoch: 6, Selection Ended at: 2022-02-03 03:30:58.134853\n", + "22 8920 8950\n", + "After augmentation, size of train_set: 290 lake set: 8920 val set: 223\n", + "Selection Epoch 6 Training epoch [ 1 ] Training Acc: 0.08275862068965517\n", + "Selection Epoch 6 Training epoch [ 2 ] Training Acc: 0.1103448275862069\n", + "Selection Epoch 6 Training epoch [ 3 ] Training Acc: 0.6448275862068965\n", + "Selection Epoch 6 Training epoch [ 4 ] Training Acc: 0.7379310344827587\n", + "Selection Epoch 6 Training epoch [ 5 ] Training Acc: 0.7344827586206897\n", + "Selection Epoch 6 Training epoch [ 6 ] Training Acc: 0.9379310344827586\n", + "Selection Epoch 6 Training epoch [ 7 ] Training Acc: 0.8206896551724138\n", + "Selection Epoch 6 Training epoch [ 8 ] Training Acc: 0.9862068965517241\n", + "Selection Epoch 6 Training epoch [ 9 ] Training Acc: 0.7551724137931034\n", + "Selection Epoch 6 Training epoch [ 10 ] Training Acc: 0.9310344827586207\n", + "Selection Epoch 6 Training epoch [ 11 ] Training Acc: 0.996551724137931\n", + "Epoch: 7 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.31972095812670887 0.996551724137931 0.5932512283325195 0.874439461883408 48.60236635059118 0.666060606060606 10.522771120071411\n", + "AL epoch: 7\n", + "val, test error% for class 0 : 12.5 42.0\n", + "val, test error% for class 1 : 0.0 23.67\n", + "val, test error% for class 2 : 50.0 65.0\n", + "val, test error% for class 3 : 0.0 14.0\n", + "val, test error% for class 4 : 16.0 64.0\n", + "val, test error% for class 5 : 21.74 45.67\n", + "val, test error% for class 6 : 21.74 41.67\n", + "val, test error% for class 7 : 5.56 14.0\n", + "val, test error% for class 8 : 0.0 0.67\n", + "val, test error% for class 9 : 0.0 21.0\n", + "val, test error% for class 10 : 14.29 35.67\n", + "29 / 30 idc points.\n", + "selEpoch: 7, Selection Ended at: 2022-02-03 03:31:31.103789\n", + "29 8890 8920\n", + "After augmentation, size of train_set: 320 lake set: 8890 val set: 252\n", + "Selection Epoch 7 Training epoch [ 1 ] Training Acc: 0.175\n", + "Selection Epoch 7 Training epoch [ 2 ] Training Acc: 0.415625\n", + "Selection Epoch 7 Training epoch [ 3 ] Training Acc: 0.43125\n", + "Selection Epoch 7 Training epoch [ 4 ] Training Acc: 0.5625\n", + "Selection Epoch 7 Training epoch [ 5 ] Training Acc: 0.871875\n", + "Selection Epoch 7 Training epoch [ 6 ] Training Acc: 0.925\n", + "Selection Epoch 7 Training epoch [ 7 ] Training Acc: 0.9875\n", + "Selection Epoch 7 Training epoch [ 8 ] Training Acc: 0.98125\n", + "Selection Epoch 7 Training epoch [ 9 ] Training Acc: 0.84375\n", + "Selection Epoch 7 Training epoch [ 10 ] Training Acc: 0.99375\n", + "Epoch: 8 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.5359387202188373 0.99375 0.47803544998168945 0.8888888888888888 44.27353589236736 0.6851515151515152 10.66312026977539\n", + "AL epoch: 8\n", + "val, test error% for class 0 : 10.53 12.33\n", + "val, test error% for class 1 : 4.35 27.67\n", + "val, test error% for class 2 : 16.67 17.67\n", + "val, test error% for class 3 : 0.0 30.0\n", + "val, test error% for class 4 : 24.14 60.33\n", + "val, test error% for class 5 : 28.0 72.0\n", + "val, test error% for class 6 : 3.57 21.67\n", + "val, test error% for class 7 : 19.05 38.67\n", + "val, test error% for class 8 : 0.0 3.33\n", + "val, test error% for class 9 : 3.23 10.33\n", + "val, test error% for class 10 : 8.33 52.33\n", + "17 / 30 idc points.\n", + "selEpoch: 8, Selection Ended at: 2022-02-03 03:32:04.442769\n", + "17 8860 8890\n", + "After augmentation, size of train_set: 350 lake set: 8860 val set: 269\n", + "Selection Epoch 8 Training epoch [ 1 ] Training Acc: 0.10571428571428572\n", + "Selection Epoch 8 Training epoch [ 2 ] Training Acc: 0.4114285714285714\n", + "Selection Epoch 8 Training epoch [ 3 ] Training Acc: 0.6314285714285715\n", + "Selection Epoch 8 Training epoch [ 4 ] Training Acc: 0.8285714285714286\n", + "Selection Epoch 8 Training epoch [ 5 ] Training Acc: 0.8942857142857142\n", + "Selection Epoch 8 Training epoch [ 6 ] Training Acc: 0.96\n", + "Selection Epoch 8 Training epoch [ 7 ] Training Acc: 0.94\n", + "Selection Epoch 8 Training epoch [ 8 ] Training Acc: 0.96\n", + "Selection Epoch 8 Training epoch [ 9 ] Training Acc: 0.9828571428571429\n", + "Selection Epoch 8 Training epoch [ 10 ] Training Acc: 0.9971428571428571\n", + "Epoch: 9 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.49581311107613146 0.9971428571428571 0.3372265100479126 0.9182156133828996 35.31700824201107 0.7084848484848485 11.594537496566772\n", + "AL epoch: 9\n", + "val, test error% for class 0 : 5.0 29.67\n", + "val, test error% for class 1 : 8.0 27.33\n", + "val, test error% for class 2 : 10.0 21.0\n", + "val, test error% for class 3 : 5.26 22.33\n", + "val, test error% for class 4 : 13.33 47.33\n", + "val, test error% for class 5 : 29.63 63.67\n", + "val, test error% for class 6 : 3.33 19.67\n", + "val, test error% for class 7 : 0.0 7.0\n", + "val, test error% for class 8 : 0.0 6.33\n", + "val, test error% for class 9 : 3.12 24.0\n", + "val, test error% for class 10 : 7.69 52.33\n", + "16 / 30 idc points.\n", + "selEpoch: 9, Selection Ended at: 2022-02-03 03:32:38.434238\n", + "16 8830 8860\n", + "After augmentation, size of train_set: 380 lake set: 8830 val set: 285\n", + "Selection Epoch 9 Training epoch [ 1 ] Training Acc: 0.16842105263157894\n", + "Selection Epoch 9 Training epoch [ 2 ] Training Acc: 0.12631578947368421\n", + "Selection Epoch 9 Training epoch [ 3 ] Training Acc: 0.618421052631579\n", + "Selection Epoch 9 Training epoch [ 4 ] Training Acc: 0.5947368421052631\n", + "Selection Epoch 9 Training epoch [ 5 ] Training Acc: 0.9157894736842105\n", + "Selection Epoch 9 Training epoch [ 6 ] Training Acc: 0.9026315789473685\n", + "Selection Epoch 9 Training epoch [ 7 ] Training Acc: 0.9526315789473684\n", + "Selection Epoch 9 Training epoch [ 8 ] Training Acc: 0.9184210526315789\n", + "Selection Epoch 9 Training epoch [ 9 ] Training Acc: 0.9657894736842105\n", + "Selection Epoch 9 Training epoch [ 10 ] Training Acc: 0.9657894736842105\n", + "Selection Epoch 9 Training epoch [ 11 ] Training Acc: 0.9789473684210527\n", + "Selection Epoch 9 Training epoch [ 12 ] Training Acc: 0.9973684210526316\n", + "Epoch: 10 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.2166772201890126 0.9973684210526316 0.3834996521472931 0.9298245614035088 44.22295039333403 0.723030303030303 15.603134155273438\n", + "val, test error% for class 0 : 4.55 14.33\n", + "val, test error% for class 1 : 0.0 24.67\n", + "val, test error% for class 2 : 15.0 34.0\n", + "val, test error% for class 3 : 5.26 14.0\n", + "val, test error% for class 4 : 19.35 62.67\n", + "val, test error% for class 5 : 19.35 59.0\n", + "val, test error% for class 6 : 0.0 30.0\n", + "val, test error% for class 7 : 0.0 7.33\n", + "val, test error% for class 8 : 0.0 0.67\n", + "val, test error% for class 9 : 2.94 5.67\n", + "val, test error% for class 10 : 7.41 52.33\n", + "[[20.0, 30.67, 47.0, 18.0, 93.0, 60.67, 52.33, 14.67, 17.33, 62.0, 83.33, 45.36363636363637], [9.0, 28.0, 35.33, 22.67, 52.0, 67.33, 52.67, 36.0, 11.67, 55.67, 54.0, 38.57636363636364], [25.67, 25.0, 31.33, 18.67, 91.0, 79.33, 42.67, 41.33, 26.67, 59.0, 41.33, 43.81818181818182], [41.0, 54.0, 21.0, 37.33, 50.0, 52.67, 44.67, 19.67, 32.0, 34.67, 83.0, 42.728181818181824], [30.67, 31.0, 22.0, 12.33, 53.33, 53.67, 33.67, 17.67, 5.67, 32.33, 70.67, 33.0009090909091], [12.67, 39.33, 24.0, 22.67, 56.0, 53.67, 57.33, 17.0, 6.33, 19.67, 79.33, 35.27272727272727], [42.0, 23.67, 65.0, 14.0, 64.0, 45.67, 41.67, 14.0, 0.67, 21.0, 35.67, 33.395454545454555], [12.33, 27.67, 17.67, 30.0, 60.33, 72.0, 21.67, 38.67, 3.33, 10.33, 52.33, 31.484545454545454], [29.67, 27.33, 21.0, 22.33, 47.33, 63.67, 19.67, 7.0, 6.33, 24.0, 52.33, 29.150909090909092], [14.33, 24.67, 34.0, 14.0, 62.67, 59.0, 30.0, 7.33, 0.67, 5.67, 52.33, 27.69727272727273]]\n", + "\n", + "\n" + ] + } + ], + "source": [ + "train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, \"AL\",'badge')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Random" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "random random\n", + "num ood samples: 8000\n", + "CIFAR-10 Custom dataset stats: Train size: 110 Val size: 110 Lake size: 9100 Test set: 3300\n", + "selected classes are: [ 0 1 2 3 4 5 6 7 8 9 10]\n", + "Saving results to: ./SMI_active_learning_results/organmnist/ood/random/30/fkna_3\n", + "organmnist_ood_random_11_random_budget:30_epochs:10_linear:True_runsfkna_3\n", + "AL epoch: 0\n", + "initial training epoch\n", + "Init model loaded from disk, skipping init training: /mnt/data2/akshit/Organ/weights/organmnist_ood_ResNet18_0.01_10_10_11\n", + "AL epoch: 1\n", + "val, test error% for class 0 : 10.0 20.0\n", + "val, test error% for class 1 : 0.0 30.67\n", + "val, test error% for class 2 : 50.0 47.0\n", + "val, test error% for class 3 : 10.0 18.0\n", + "val, test error% for class 4 : 90.0 93.0\n", + "val, test error% for class 5 : 50.0 60.67\n", + "val, test error% for class 6 : 30.0 52.33\n", + "val, test error% for class 7 : 0.0 14.67\n", + "val, test error% for class 8 : 0.0 17.33\n", + "val, test error% for class 9 : 70.0 62.0\n", + "val, test error% for class 10 : 50.0 83.33\n", + "4 / 30 idc points.\n", + "selEpoch: 1, Selection Ended at: 2022-02-03 03:32:57.837804\n", + "4 9070 9100\n", + "After augmentation, size of train_set: 140 lake set: 9070 val set: 114\n", + "Selection Epoch 1 Training epoch [ 1 ] Training Acc: 0.19285714285714287\n", + "Selection Epoch 1 Training epoch [ 2 ] Training Acc: 0.2\n", + "Selection Epoch 1 Training epoch [ 3 ] Training Acc: 0.2857142857142857\n", + "Selection Epoch 1 Training epoch [ 4 ] Training Acc: 0.22142857142857142\n", + "Selection Epoch 1 Training epoch [ 5 ] Training Acc: 0.39285714285714285\n", + "Selection Epoch 1 Training epoch [ 6 ] Training Acc: 0.8571428571428571\n", + "Selection Epoch 1 Training epoch [ 7 ] Training Acc: 0.9357142857142857\n", + "Selection Epoch 1 Training epoch [ 8 ] Training Acc: 0.7428571428571429\n", + "Selection Epoch 1 Training epoch [ 9 ] Training Acc: 0.8928571428571429\n", + "Selection Epoch 1 Training epoch [ 10 ] Training Acc: 1.0\n", + "Epoch: 2 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.2823886200785637 1.0 1.8169406652450562 0.6228070175438597 75.65299433469772 0.5281818181818182 4.645708322525024\n", + "AL epoch: 2\n", + "val, test error% for class 0 : 10.0 20.67\n", + "val, test error% for class 1 : 40.0 61.0\n", + "val, test error% for class 2 : 80.0 80.0\n", + "val, test error% for class 3 : 0.0 16.0\n", + "val, test error% for class 4 : 100.0 93.0\n", + "val, test error% for class 5 : 41.67 69.33\n", + "val, test error% for class 6 : 30.0 50.67\n", + "val, test error% for class 7 : 18.18 19.33\n", + "val, test error% for class 8 : 0.0 6.0\n", + "val, test error% for class 9 : 30.0 40.33\n", + "val, test error% for class 10 : 70.0 62.67\n", + "3 / 30 idc points.\n", + "selEpoch: 2, Selection Ended at: 2022-02-03 03:33:03.503099\n", + "3 9040 9070\n", + "After augmentation, size of train_set: 170 lake set: 9040 val set: 117\n", + "Selection Epoch 2 Training epoch [ 1 ] Training Acc: 0.3176470588235294\n", + "Selection Epoch 2 Training epoch [ 2 ] Training Acc: 0.2235294117647059\n", + "Selection Epoch 2 Training epoch [ 3 ] Training Acc: 0.25882352941176473\n", + "Selection Epoch 2 Training epoch [ 4 ] Training Acc: 0.34705882352941175\n", + "Selection Epoch 2 Training epoch [ 5 ] Training Acc: 0.7470588235294118\n", + "Selection Epoch 2 Training epoch [ 6 ] Training Acc: 0.9176470588235294\n", + "Selection Epoch 2 Training epoch [ 7 ] Training Acc: 0.9705882352941176\n", + "Selection Epoch 2 Training epoch [ 8 ] Training Acc: 0.9705882352941176\n", + "Selection Epoch 2 Training epoch [ 9 ] Training Acc: 0.7823529411764706\n", + "Selection Epoch 2 Training epoch [ 10 ] Training Acc: 1.0\n", + "Epoch: 3 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.11527949525043368 1.0 1.701460361480713 0.6153846153846154 72.40866848826408 0.5021212121212121 5.558394908905029\n", + "AL epoch: 3\n", + "val, test error% for class 0 : 20.0 21.33\n", + "val, test error% for class 1 : 0.0 32.67\n", + "val, test error% for class 2 : 40.0 34.0\n", + "val, test error% for class 3 : 50.0 52.67\n", + "val, test error% for class 4 : 60.0 88.67\n", + "val, test error% for class 5 : 23.08 36.0\n", + "val, test error% for class 6 : 100.0 88.67\n", + "val, test error% for class 7 : 25.0 43.0\n", + "val, test error% for class 8 : 0.0 11.0\n", + "val, test error% for class 9 : 50.0 60.67\n", + "val, test error% for class 10 : 70.0 79.0\n", + "2 / 30 idc points.\n", + "selEpoch: 3, Selection Ended at: 2022-02-03 03:33:10.092687\n", + "2 9010 9040\n", + "After augmentation, size of train_set: 200 lake set: 9010 val set: 119\n", + "Selection Epoch 3 Training epoch [ 1 ] Training Acc: 0.245\n", + "Selection Epoch 3 Training epoch [ 2 ] Training Acc: 0.145\n", + "Selection Epoch 3 Training epoch [ 3 ] Training Acc: 0.155\n", + "Selection Epoch 3 Training epoch [ 4 ] Training Acc: 0.86\n", + "Selection Epoch 3 Training epoch [ 5 ] Training Acc: 0.615\n", + "Selection Epoch 3 Training epoch [ 6 ] Training Acc: 0.84\n", + "Selection Epoch 3 Training epoch [ 7 ] Training Acc: 0.975\n", + "Selection Epoch 3 Training epoch [ 8 ] Training Acc: 0.89\n", + "Selection Epoch 3 Training epoch [ 9 ] Training Acc: 1.0\n", + "Epoch: 4 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.08777270140126348 1.0 1.6405795812606812 0.6722689075630253 61.9202715754509 0.5836363636363636 6.072718143463135\n", + "AL epoch: 4\n", + "val, test error% for class 0 : 20.0 24.67\n", + "val, test error% for class 1 : 0.0 39.33\n", + "val, test error% for class 2 : 40.0 23.0\n", + "val, test error% for class 3 : 10.0 15.0\n", + "val, test error% for class 4 : 80.0 65.67\n", + "val, test error% for class 5 : 40.0 77.0\n", + "val, test error% for class 6 : 60.0 64.33\n", + "val, test error% for class 7 : 0.0 7.33\n", + "val, test error% for class 8 : 9.09 11.33\n", + "val, test error% for class 9 : 70.0 82.33\n", + "val, test error% for class 10 : 40.0 48.0\n", + "5 / 30 idc points.\n", + "selEpoch: 4, Selection Ended at: 2022-02-03 03:33:17.194355\n", + "5 8980 9010\n", + "After augmentation, size of train_set: 230 lake set: 8980 val set: 124\n", + "Selection Epoch 4 Training epoch [ 1 ] Training Acc: 0.3217391304347826\n", + "Selection Epoch 4 Training epoch [ 2 ] Training Acc: 0.24347826086956523\n", + "Selection Epoch 4 Training epoch [ 3 ] Training Acc: 0.5695652173913044\n", + "Selection Epoch 4 Training epoch [ 4 ] Training Acc: 0.45652173913043476\n", + "Selection Epoch 4 Training epoch [ 5 ] Training Acc: 0.7956521739130434\n", + "Selection Epoch 4 Training epoch [ 6 ] Training Acc: 0.9\n", + "Selection Epoch 4 Training epoch [ 7 ] Training Acc: 0.7391304347826086\n", + "Selection Epoch 4 Training epoch [ 8 ] Training Acc: 0.9521739130434783\n", + "Selection Epoch 4 Training epoch [ 9 ] Training Acc: 0.8695652173913043\n", + "Selection Epoch 4 Training epoch [ 10 ] Training Acc: 0.9521739130434783\n", + "Selection Epoch 4 Training epoch [ 11 ] Training Acc: 0.9652173913043478\n", + "Selection Epoch 4 Training epoch [ 12 ] Training Acc: 0.991304347826087\n", + "Epoch: 5 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.5379656704608351 0.991304347826087 1.728601336479187 0.6370967741935484 73.9620152413845 0.5533333333333333 9.124643087387085\n", + "AL epoch: 5\n", + "val, test error% for class 0 : 20.0 12.33\n", + "val, test error% for class 1 : 0.0 24.33\n", + "val, test error% for class 2 : 50.0 44.0\n", + "val, test error% for class 3 : 10.0 18.33\n", + "val, test error% for class 4 : 70.0 78.33\n", + "val, test error% for class 5 : 60.0 86.33\n", + "val, test error% for class 6 : 36.36 63.33\n", + "val, test error% for class 7 : 16.67 12.67\n", + "val, test error% for class 8 : 8.33 14.67\n", + "val, test error% for class 9 : 36.36 39.33\n", + "val, test error% for class 10 : 90.0 97.67\n", + "5 / 30 idc points.\n", + "selEpoch: 5, Selection Ended at: 2022-02-03 03:33:27.356518\n", + "5 8950 8980\n", + "After augmentation, size of train_set: 260 lake set: 8950 val set: 129\n", + "Selection Epoch 5 Training epoch [ 1 ] Training Acc: 0.4807692307692308\n", + "Selection Epoch 5 Training epoch [ 2 ] Training Acc: 0.11923076923076924\n", + "Selection Epoch 5 Training epoch [ 3 ] Training Acc: 0.3769230769230769\n", + "Selection Epoch 5 Training epoch [ 4 ] Training Acc: 0.9230769230769231\n", + "Selection Epoch 5 Training epoch [ 5 ] Training Acc: 0.6307692307692307\n", + "Selection Epoch 5 Training epoch [ 6 ] Training Acc: 0.4461538461538462\n", + "Selection Epoch 5 Training epoch [ 7 ] Training Acc: 0.9461538461538461\n", + "Selection Epoch 5 Training epoch [ 8 ] Training Acc: 0.9115384615384615\n", + "Selection Epoch 5 Training epoch [ 9 ] Training Acc: 0.9961538461538462\n", + "Epoch: 6 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.27840808196924627 0.9961538461538462 1.7526054382324219 0.6124031007751938 86.61560316383839 0.4672727272727273 7.9583353996276855\n", + "AL epoch: 6\n", + "val, test error% for class 0 : 60.0 62.0\n", + "val, test error% for class 1 : 8.33 59.0\n", + "val, test error% for class 2 : 41.67 25.33\n", + "val, test error% for class 3 : 50.0 59.67\n", + "val, test error% for class 4 : 40.0 40.67\n", + "val, test error% for class 5 : 29.41 56.33\n", + "val, test error% for class 6 : 66.67 77.0\n", + "val, test error% for class 7 : 8.33 16.67\n", + "val, test error% for class 8 : 0.0 6.33\n", + "val, test error% for class 9 : 90.91 97.0\n", + "val, test error% for class 10 : 50.0 86.0\n", + "5 / 30 idc points.\n", + "selEpoch: 6, Selection Ended at: 2022-02-03 03:33:36.340551\n", + "5 8920 8950\n", + "After augmentation, size of train_set: 290 lake set: 8920 val set: 134\n", + "Selection Epoch 6 Training epoch [ 1 ] Training Acc: 0.21379310344827587\n", + "Selection Epoch 6 Training epoch [ 2 ] Training Acc: 0.45517241379310347\n", + "Selection Epoch 6 Training epoch [ 3 ] Training Acc: 0.6103448275862069\n", + "Selection Epoch 6 Training epoch [ 4 ] Training Acc: 0.8241379310344827\n", + "Selection Epoch 6 Training epoch [ 5 ] Training Acc: 0.9275862068965517\n", + "Selection Epoch 6 Training epoch [ 6 ] Training Acc: 0.8896551724137931\n", + "Selection Epoch 6 Training epoch [ 7 ] Training Acc: 0.993103448275862\n", + "Epoch: 7 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.8904009722173214 0.993103448275862 1.7156916856765747 0.6044776119402985 77.57510636001825 0.45 6.686241626739502\n", + "AL epoch: 7\n", + "val, test error% for class 0 : 33.33 55.67\n", + "val, test error% for class 1 : 46.15 72.67\n", + "val, test error% for class 2 : 33.33 20.33\n", + "val, test error% for class 3 : 50.0 53.33\n", + "val, test error% for class 4 : 80.0 72.0\n", + "val, test error% for class 5 : 35.29 79.33\n", + "val, test error% for class 6 : 7.69 24.67\n", + "val, test error% for class 7 : 58.33 74.0\n", + "val, test error% for class 8 : 0.0 6.33\n", + "val, test error% for class 9 : 63.64 72.33\n", + "val, test error% for class 10 : 50.0 74.33\n", + "4 / 30 idc points.\n", + "selEpoch: 7, Selection Ended at: 2022-02-03 03:33:44.075883\n", + "4 8890 8920\n", + "After augmentation, size of train_set: 320 lake set: 8890 val set: 138\n", + "Selection Epoch 7 Training epoch [ 1 ] Training Acc: 0.225\n", + "Selection Epoch 7 Training epoch [ 2 ] Training Acc: 0.14375\n", + "Selection Epoch 7 Training epoch [ 3 ] Training Acc: 0.528125\n", + "Selection Epoch 7 Training epoch [ 4 ] Training Acc: 0.91875\n", + "Selection Epoch 7 Training epoch [ 5 ] Training Acc: 0.9\n", + "Selection Epoch 7 Training epoch [ 6 ] Training Acc: 0.81875\n", + "Selection Epoch 7 Training epoch [ 7 ] Training Acc: 0.965625\n", + "Selection Epoch 7 Training epoch [ 8 ] Training Acc: 0.9875\n", + "Selection Epoch 7 Training epoch [ 9 ] Training Acc: 0.946875\n", + "Selection Epoch 7 Training epoch [ 10 ] Training Acc: 0.996875\n", + "Epoch: 8 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.510515873786062 0.996875 0.8186928033828735 0.7681159420289855 47.45541213452816 0.6166666666666667 10.8457772731781\n", + "AL epoch: 8\n", + "val, test error% for class 0 : 16.67 21.67\n", + "val, test error% for class 1 : 0.0 31.33\n", + "val, test error% for class 2 : 58.33 36.33\n", + "val, test error% for class 3 : 9.09 24.67\n", + "val, test error% for class 4 : 80.0 87.0\n", + "val, test error% for class 5 : 27.78 54.0\n", + "val, test error% for class 6 : 38.46 52.0\n", + "val, test error% for class 7 : 0.0 8.33\n", + "val, test error% for class 8 : 0.0 7.0\n", + "val, test error% for class 9 : 8.33 37.33\n", + "val, test error% for class 10 : 30.0 62.0\n", + "4 / 30 idc points.\n", + "selEpoch: 8, Selection Ended at: 2022-02-03 03:33:55.961236\n", + "4 8860 8890\n", + "After augmentation, size of train_set: 350 lake set: 8860 val set: 142\n", + "Selection Epoch 8 Training epoch [ 1 ] Training Acc: 0.2742857142857143\n", + "Selection Epoch 8 Training epoch [ 2 ] Training Acc: 0.11428571428571428\n", + "Selection Epoch 8 Training epoch [ 3 ] Training Acc: 0.5971428571428572\n", + "Selection Epoch 8 Training epoch [ 4 ] Training Acc: 0.7685714285714286\n", + "Selection Epoch 8 Training epoch [ 5 ] Training Acc: 0.8685714285714285\n", + "Selection Epoch 8 Training epoch [ 6 ] Training Acc: 0.6914285714285714\n", + "Selection Epoch 8 Training epoch [ 7 ] Training Acc: 0.9428571428571428\n", + "Selection Epoch 8 Training epoch [ 8 ] Training Acc: 0.9771428571428571\n", + "Selection Epoch 8 Training epoch [ 9 ] Training Acc: 1.0\n", + "Epoch: 9 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.4426519423723221 1.0 0.9385123252868652 0.795774647887324 51.3927758038044 0.6315151515151515 10.487995147705078\n", + "AL epoch: 9\n", + "val, test error% for class 0 : 50.0 59.67\n", + "val, test error% for class 1 : 0.0 39.67\n", + "val, test error% for class 2 : 16.67 21.33\n", + "val, test error% for class 3 : 18.18 29.33\n", + "val, test error% for class 4 : 60.0 72.67\n", + "val, test error% for class 5 : 22.22 49.0\n", + "val, test error% for class 6 : 13.33 36.33\n", + "val, test error% for class 7 : 16.67 20.33\n", + "val, test error% for class 8 : 0.0 4.33\n", + "val, test error% for class 9 : 15.38 30.0\n", + "val, test error% for class 10 : 27.27 42.67\n", + "5 / 30 idc points.\n", + "selEpoch: 9, Selection Ended at: 2022-02-03 03:34:07.488266\n", + "5 8830 8860\n", + "After augmentation, size of train_set: 380 lake set: 8830 val set: 147\n", + "Selection Epoch 9 Training epoch [ 1 ] Training Acc: 0.15526315789473685\n", + "Selection Epoch 9 Training epoch [ 2 ] Training Acc: 0.31842105263157894\n", + "Selection Epoch 9 Training epoch [ 3 ] Training Acc: 0.7526315789473684\n", + "Selection Epoch 9 Training epoch [ 4 ] Training Acc: 0.8131578947368421\n", + "Selection Epoch 9 Training epoch [ 5 ] Training Acc: 0.9131578947368421\n", + "Selection Epoch 9 Training epoch [ 6 ] Training Acc: 0.95\n", + "Selection Epoch 9 Training epoch [ 7 ] Training Acc: 0.9342105263157895\n", + "Selection Epoch 9 Training epoch [ 8 ] Training Acc: 0.9421052631578948\n", + "Selection Epoch 9 Training epoch [ 9 ] Training Acc: 0.9552631578947368\n", + "Selection Epoch 9 Training epoch [ 10 ] Training Acc: 0.9894736842105263\n", + "Selection Epoch 9 Training epoch [ 11 ] Training Acc: 0.9894736842105263\n", + "Selection Epoch 9 Training epoch [ 12 ] Training Acc: 1.0\n", + "Epoch: 10 FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time: 0.10701807384612039 1.0 0.7929274439811707 0.8163265306122449 52.15802972018719 0.6339393939393939 15.465835571289062\n", + "val, test error% for class 0 : 8.33 25.33\n", + "val, test error% for class 1 : 0.0 40.0\n", + "val, test error% for class 2 : 33.33 28.33\n", + "val, test error% for class 3 : 7.14 9.0\n", + "val, test error% for class 4 : 40.0 59.0\n", + "val, test error% for class 5 : 21.05 65.0\n", + "val, test error% for class 6 : 13.33 18.33\n", + "val, test error% for class 7 : 8.33 23.67\n", + "val, test error% for class 8 : 0.0 9.0\n", + "val, test error% for class 9 : 53.85 82.33\n", + "val, test error% for class 10 : 27.27 42.67\n", + "[[20.0, 30.67, 47.0, 18.0, 93.0, 60.67, 52.33, 14.67, 17.33, 62.0, 83.33, 45.36363636363637], [20.67, 61.0, 80.0, 16.0, 93.0, 69.33, 50.67, 19.33, 6.0, 40.33, 62.67, 47.18181818181818], [21.33, 32.67, 34.0, 52.67, 88.67, 36.0, 88.67, 43.0, 11.0, 60.67, 79.0, 49.789090909090916], [24.67, 39.33, 23.0, 15.0, 65.67, 77.0, 64.33, 7.33, 11.33, 82.33, 48.0, 41.63545454545454], [12.33, 24.33, 44.0, 18.33, 78.33, 86.33, 63.33, 12.67, 14.67, 39.33, 97.67, 44.665454545454544], [62.0, 59.0, 25.33, 59.67, 40.67, 56.33, 77.0, 16.67, 6.33, 97.0, 86.0, 53.27272727272727], [55.67, 72.67, 20.33, 53.33, 72.0, 79.33, 24.67, 74.0, 6.33, 72.33, 74.33, 54.99909090909091], [21.67, 31.33, 36.33, 24.67, 87.0, 54.0, 52.0, 8.33, 7.0, 37.33, 62.0, 38.33272727272727], [59.67, 39.67, 21.33, 29.33, 72.67, 49.0, 36.33, 20.33, 4.33, 30.0, 42.67, 36.848181818181814], [25.33, 40.0, 28.33, 9.0, 59.0, 65.0, 18.33, 23.67, 9.0, 82.33, 42.67, 36.60545454545455]]\n", + "\n", + "\n" + ] + } + ], + "source": [ + "train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, \"random\",'random')" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +}