From 952c5af50375fed675a2526519b39b95db1b1fb3 Mon Sep 17 00:00:00 2001 From: tjddus9597 Date: Wed, 8 Apr 2020 00:15:44 +0900 Subject: [PATCH] upload code --- README.md | 143 ++++++++++ code/dataset/Inshop.py | 72 +++++ code/dataset/SOP.py | 20 ++ code/dataset/__init__.py | 16 ++ code/dataset/base.py | 45 +++ code/dataset/cars.py | 25 ++ code/dataset/cub.py | 25 ++ code/dataset/sampler.py | 31 ++ code/dataset/utils.py | 100 +++++++ code/evaluate.py | 156 +++++++++++ code/losses.py | 120 ++++++++ code/net/bn_inception.py | 530 +++++++++++++++++++++++++++++++++++ code/net/googlenet.py | 256 +++++++++++++++++ code/net/resnet.py | 254 +++++++++++++++++ code/train.py | 355 +++++++++++++++++++++++ code/utils.py | 166 +++++++++++ misc/Recall_Trainingtime.jpg | Bin 0 -> 80327 bytes 17 files changed, 2314 insertions(+) create mode 100644 README.md create mode 100644 code/dataset/Inshop.py create mode 100644 code/dataset/SOP.py create mode 100644 code/dataset/__init__.py create mode 100644 code/dataset/base.py create mode 100644 code/dataset/cars.py create mode 100644 code/dataset/cub.py create mode 100644 code/dataset/sampler.py create mode 100644 code/dataset/utils.py create mode 100644 code/evaluate.py create mode 100644 code/losses.py create mode 100644 code/net/bn_inception.py create mode 100644 code/net/googlenet.py create mode 100644 code/net/resnet.py create mode 100644 code/train.py create mode 100644 code/utils.py create mode 100644 misc/Recall_Trainingtime.jpg diff --git a/README.md b/README.md new file mode 100644 index 00000000..0245c859 --- /dev/null +++ b/README.md @@ -0,0 +1,143 @@ + +# Proxy Anchor Loss for Deep Metric Learning + +Official PyTorch implementation of CVPR 2020 paper [**Proxy Anchor Loss for Deep Metric Learning**](https://arxiv.org/abs/2003.13911). + +A standard embedding network trained with **Proxy-Anchor Loss** achieves state-of-the-art performance and most quickly converges . + +This repository provides source code of experiments on four datasets (CUB-200-2011, Cars-196, Stanford Online Products and In-shop) and pretrained models. + +#### Accuracy in Recall@1 versus training time on the Cars-196 + +

graph

+ + + +## Requirements + +- Python3 +- PyTorch (> 1.0) +- NumPy +- tqdm +- wandb +- [Pytorch-Metric-Learning](https://github.com/KevinMusgrave/pytorch-metric-learning) + + + +## Datasets + +1. Download four public benchmarks for deep metric learning + - [CUB-200-2011](http://www.vision.caltech.edu/visipedia-data/CUB-200/images.tgz) + - Cars-196 ([Img](http://imagenet.stanford.edu/internal/car196/car_ims.tgz), [Annotation](http://imagenet.stanford.edu/internal/car196/cars_annos.mat)) + - [Stanford Online Products](ftp://cs.stanford.edu/cs/cvgl/Stanford_Online_Products.zip) + - In-shop Clothes Retrieval ([Link](http://mmlab.ie.cuhk.edu.hk/projects/DeepFashion.html)) + +2. Extract the tgz or zip file into `./data/` (Exceptionally, for Cars-196, put the files in a `./data/cars196`) + + + +## Training Embedding Network + +Note that a sufficiently large batch size and good parameters resulted in better overall performance than the performance described in the paper. You can download the trained model through the hyperlink in the table. + +### CUB-200-2011 + +- Train a embedding network of Inception-BN (d=512) using **Proxy-Anchor loss** + +```bash +python train.py --gpu-id 0 --loss Proxy_Anchor--model bn_inception --embedding-size 512 --batch-size 180 --lr 1e-4 --dataset cub --warm 1 --bn-freeze 1 --lr-decay-step 10 +``` + +- Train a embedding network of ResNet-50 (d=512) using **Proxy-Anchor loss** + +```bash +python train.py --gpu-id 0 --loss Proxy_Anchor --model resnet50 --embedding-size 512 --batch-size 120 --lr 1e-4 --dataset cub --warm 5 --bn-freeze 1 --lr-decay-step 5 +``` + +| Method | Backbone | R@1 | R@2 | R@4 | R@8 | +|:-:|:-:|:-:|:-:|:-:|:-:| +| [Proxy-Anchor512](https://drive.google.com/file/d/1twaY6S2QIR8eanjDB6PoVPlCTsn-6ZJW/view?usp=sharing) | Inception-BN | 69.1 | 78.9 | 86.1 | 91.2 | +| [Proxy-Anchor512](https://drive.google.com/file/d/1s-cRSEL2PhPFL9S7bavkrD_c59bJXL_u/view?usp=sharing) | ResNet-50 | 69.9 | 79.6 | 86.6 | 91.4 | + +### Cars-196 + +- Train a embedding network of Inception-BN (d=512) using **Proxy-Anchor loss** + +```bash +python train.py --gpu-id 0 --loss Proxy_Anchor --model bn_inception --embedding-size 512 --batch-size 180 --lr 1e-4 --dataset cars --warm 1 --bn-freeze 1 --lr-decay-step 20 +``` + +- Train a embedding network of ResNet-50 (d=512) using **Proxy-Anchor loss** + +```bash +python train.py --gpu-id 0 --loss Proxy_Anchor --model resnet50 --embedding-size 512 --batch-size 120 --lr 1e-4 --dataset cars --warm 5 --bn-freeze 1 --lr-decay-step 10 +``` + +| Method | Backbone | R@1 | R@2 | R@4 | R@8 | +|:-:|:-:|:-:|:-:|:-:|:-:| +| [Proxy-Anchor512](https://drive.google.com/file/d/1wwN4ojmOCEAOaSYQHArzJbNdJQNvo4E1/view?usp=sharing) | Inception-BN | 86.4 | 91.9 | 95.0 | 97.0 | +| [Proxy-Anchor512](https://drive.google.com/file/d/1_4P90jZcDr0xolRduNpgJ9tX9HZ1Ih7n/view?usp=sharing) | ResNet-50 | 87.7 | 92.7 | 95.5 | 97.3 | + +### Stanford Online Products + +- Train a embedding network of Inception-BN (d=512) using **Proxy-Anchor loss** + +```bash +python train.py --gpu-id 0 --loss Proxy_Anchor --model bn_inception --embedding-size 512 --batch-size 180 --lr 6e-4 --dataset SOP --warm 1 --bn-freeze 0 --l2-norm 1 --lr-decay-step 20 --lr-decay-gamma 0.25 +``` + +| Method | Backbone | R@1 | R@10 | R@100 | R@1000 | +|:-:|:-:|:-:|:-:|:-:|:-:| +|[Proxy-Anchor512](https://drive.google.com/file/d/1hBdWhLP2J83JlOMRgZ4LLZY45L-9Gj2X/view?usp=sharing) | Inception-BN | 79.2 | 90.7 | 96.2 | 98.6 | + +### In-Shop Clothes Retrieval + +- Train a embedding network of Inception-BN (d=512) using **Proxy-Anchor loss** + +```bash +python train.py --gpu-id 0 --loss Proxy_Anchor --model bn_inception --embedding-size 512 --batch-size 180 --lr 6e-4 --dataset Inshop --warm 1 --bn-freeze 0 --l2-norm 1 --lr-decay-step 20 --lr-decay-gamma 0.25 +``` + +| Method | Backbone | R@1 | R@10 | R@20 | R@30 | R@40 | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| [Proxy-Anchor512](https://drive.google.com/file/d/1VE7psay7dblDyod8di72Sv7Z2xGtUGra/view?usp=sharing) | Inception-BN | 91.9 | 98.1 | 98.7 | 99.0 | 99.1 | + + + +## Evaluating Image Retrieval + +Follow the steps below to evaluate the provided pretrained model or your trained model. Trained best model will be saved in the `./logs/folder_name`. + +```bash +# The parameters should be changed according to the model to be evaluated. +python evaluate.py --gpu-id 0 --batch-size 120 --model bn_inception --embedding-size 512 --dataset cub --resume /set/your/model/path/best_model.pth +``` + + + +## Acknowledgements + +Our code is modified and adapted on these great repositories: + +- [No Fuss Distance Metric Learning using Proxies](https://github.com/dichotomies/proxy-nca) +- [PyTorch Metric learning](https://github.com/KevinMusgrave/pytorch-metric-learning) + + + +## Other Implementations + +- [Pytorch, Tensorflow and Mxnet implementations](https://github.com/geonm/proxy-anchor-loss) (Thank you for Geonmo Gu :D) + + + +## Citation + +If you use this method or this code in your research, please cite as: + + @inproceedings{kim2020proxy, + title={Proxy Anchor Loss for Deep Metric Learning}, + author={Kim, Sungyeon and Kim, Dongwon and Cho, Minsu and Kwak, Suha}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + year={2020} + } + diff --git a/code/dataset/Inshop.py b/code/dataset/Inshop.py new file mode 100644 index 00000000..bfa6c902 --- /dev/null +++ b/code/dataset/Inshop.py @@ -0,0 +1,72 @@ +from .base import * + +import numpy as np, os, sys, pandas as pd, csv, copy +import torch +import torchvision +import PIL.Image + + +class Inshop_Dataset(torch.utils.data.Dataset): + def __init__(self, root, mode, transform = None): + self.root = root + '/Inshop_Clothes' + self.mode = mode + self.transform = transform + self.train_ys, self.train_im_paths = [], [] + self.query_ys, self.query_im_paths = [], [] + self.gallery_ys, self.gallery_im_paths = [], [] + + data_info = np.array(pd.read_table(self.root +'/Eval/list_eval_partition.txt', header=1, delim_whitespace=True))[:,:] + #Separate into training dataset and query/gallery dataset for testing. + train, query, gallery = data_info[data_info[:,2]=='train'][:,:2], data_info[data_info[:,2]=='query'][:,:2], data_info[data_info[:,2]=='gallery'][:,:2] + + #Generate conversions + lab_conv = {x:i for i,x in enumerate(np.unique(np.array([int(x.split('_')[-1]) for x in train[:,1]])))} + train[:,1] = np.array([lab_conv[int(x.split('_')[-1])] for x in train[:,1]]) + + lab_conv = {x:i for i,x in enumerate(np.unique(np.array([int(x.split('_')[-1]) for x in np.concatenate([query[:,1], gallery[:,1]])])))} + query[:,1] = np.array([lab_conv[int(x.split('_')[-1])] for x in query[:,1]]) + gallery[:,1] = np.array([lab_conv[int(x.split('_')[-1])] for x in gallery[:,1]]) + + #Generate Image-Dicts for training, query and gallery of shape {class_idx:[list of paths to images belong to this class] ...} + for img_path, key in train: + self.train_im_paths.append(os.path.join(self.root, 'Img', img_path)) + self.train_ys += [int(key)] + + for img_path, key in query: + self.query_im_paths.append(os.path.join(self.root, 'Img', img_path)) + self.query_ys += [int(key)] + + for img_path, key in gallery: + self.gallery_im_paths.append(os.path.join(self.root, 'Img', img_path)) + self.gallery_ys += [int(key)] + + if self.mode == 'train': + self.im_paths = self.train_im_paths + self.ys = self.train_ys + elif self.mode == 'query': + self.im_paths = self.query_im_paths + self.ys = self.query_ys + elif self.mode == 'gallery': + self.im_paths = self.gallery_im_paths + self.ys = self.gallery_ys + + def nb_classes(self): + return len(set(self.ys)) + + def __len__(self): + return len(self.ys) + + def __getitem__(self, index): + + def img_load(index): + im = PIL.Image.open(self.im_paths[index]) + # convert gray to rgb + if len(list(im.split())) == 1 : im = im.convert('RGB') + if self.transform is not None: + im = self.transform(im) + return im + + im = img_load(index) + target = self.ys[index] + + return im, target diff --git a/code/dataset/SOP.py b/code/dataset/SOP.py new file mode 100644 index 00000000..be81919b --- /dev/null +++ b/code/dataset/SOP.py @@ -0,0 +1,20 @@ +from .base import * + +class SOP(BaseDataset): + def __init__(self, root, mode, transform = None): + self.root = root + '/Stanford_Online_Products' + self.mode = mode + self.transform = transform + if self.mode == 'train': + self.classes = range(0,11318) + elif self.mode == 'eval': + self.classes = range(11318,22634) + + BaseDataset.__init__(self, self.root, self.mode, self.transform) + metadata = open(os.path.join(self.root, 'Ebay_train.txt' if self.classes == range(0, 11318) else 'Ebay_test.txt')) + for i, (image_id, class_id, _, path) in enumerate(map(str.split, metadata)): + if i > 0: + if int(class_id)-1 in self.classes: + self.ys += [int(class_id)-1] + self.I += [int(image_id)-1] + self.im_paths.append(os.path.join(self.root, path)) \ No newline at end of file diff --git a/code/dataset/__init__.py b/code/dataset/__init__.py new file mode 100644 index 00000000..ee74a180 --- /dev/null +++ b/code/dataset/__init__.py @@ -0,0 +1,16 @@ +from .cars import Cars +from .cub import CUBirds +from .SOP import SOP +from .import utils +from .base import BaseDataset + + +_type = { + 'cars': Cars, + 'cub': CUBirds, + 'SOP': SOP +} + +def load(name, root, mode, transform = None): + return _type[name](root = root, mode = mode, transform = transform) + diff --git a/code/dataset/base.py b/code/dataset/base.py new file mode 100644 index 00000000..aaf450e2 --- /dev/null +++ b/code/dataset/base.py @@ -0,0 +1,45 @@ + +from __future__ import print_function +from __future__ import division + +import os +import torch +import torchvision +import numpy as np +import PIL.Image + +class BaseDataset(torch.utils.data.Dataset): + def __init__(self, root, mode, transform = None): + self.root = root + self.mode = mode + self.transform = transform + self.ys, self.im_paths, self.I = [], [], [] + + def nb_classes(self): + assert set(self.ys) == set(self.classes) + return len(self.classes) + + def __len__(self): + return len(self.ys) + + def __getitem__(self, index): + def img_load(index): + im = PIL.Image.open(self.im_paths[index]) + # convert gray to rgb + if len(list(im.split())) == 1 : im = im.convert('RGB') + if self.transform is not None: + im = self.transform(im) + return im + + im = img_load(index) + target = self.ys[index] + + return im, target + + def get_label(self, index): + return self.ys[index] + + def set_subset(self, I): + self.ys = [self.ys[i] for i in I] + self.I = [self.I[i] for i in I] + self.im_paths = [self.im_paths[i] for i in I] \ No newline at end of file diff --git a/code/dataset/cars.py b/code/dataset/cars.py new file mode 100644 index 00000000..98e8f112 --- /dev/null +++ b/code/dataset/cars.py @@ -0,0 +1,25 @@ +from .base import * +import scipy.io + +class Cars(BaseDataset): + def __init__(self, root, mode, transform = None): + self.root = root + '/cars196' + self.mode = mode + self.transform = transform + if self.mode == 'train': + self.classes = range(0,98) + elif self.mode == 'eval': + self.classes = range(98,196) + + BaseDataset.__init__(self, self.root, self.mode, self.transform) + annos_fn = 'cars_annos.mat' + cars = scipy.io.loadmat(os.path.join(self.root, annos_fn)) + ys = [int(a[5][0] - 1) for a in cars['annotations'][0]] + im_paths = [a[0][0] for a in cars['annotations'][0]] + index = 0 + for im_path, y in zip(im_paths, ys): + if y in self.classes: # choose only specified classes + self.im_paths.append(os.path.join(self.root, im_path)) + self.ys.append(y) + self.I += [index] + index += 1 \ No newline at end of file diff --git a/code/dataset/cub.py b/code/dataset/cub.py new file mode 100644 index 00000000..c6460bf0 --- /dev/null +++ b/code/dataset/cub.py @@ -0,0 +1,25 @@ +from .base import * + +class CUBirds(BaseDataset): + def __init__(self, root, mode, transform = None): + self.root = root + '/CUB_200_2011' + self.mode = mode + self.transform = transform + if self.mode == 'train': + self.classes = range(0,100) + elif self.mode == 'eval': + self.classes = range(100,200) + + BaseDataset.__init__(self, self.root, self.mode, self.transform) + index = 0 + for i in torchvision.datasets.ImageFolder(root = + os.path.join(self.root, 'images')).imgs: + # i[1]: label, i[0]: root + y = i[1] + # fn needed for removing non-images starting with `._` + fn = os.path.split(i[0])[1] + if y in self.classes and fn[:2] != '._': + self.ys += [y] + self.I += [index] + self.im_paths.append(os.path.join(self.root, i[0])) + index += 1 \ No newline at end of file diff --git a/code/dataset/sampler.py b/code/dataset/sampler.py new file mode 100644 index 00000000..6b291b55 --- /dev/null +++ b/code/dataset/sampler.py @@ -0,0 +1,31 @@ +import numpy as np +import torch +import torch.nn.functional as F +from torch.utils.data.sampler import Sampler +from tqdm import * + +class BalancedSampler(Sampler): + def __init__(self, data_source, batch_size, images_per_class=3): + self.data_source = data_source + self.ys = data_source.ys + self.num_groups = batch_size // images_per_class + self.batch_size = batch_size + self.num_instances = images_per_class + self.num_samples = len(self.ys) + self.num_classes = len(set(self.ys)) + + def __len__(self): + return self.num_samples + + def __iter__(self): + num_batches = len(self.data_source) // self.batch_size + ret = [] + while num_batches > 0: + sampled_classes = np.random.choice(self.num_classes, self.num_groups, replace=False) + for i in range(len(sampled_classes)): + ith_class_idxs = np.nonzero(np.array(self.ys) == sampled_classes[i])[0] + class_sel = np.random.choice(ith_class_idxs, size=self.num_instances, replace=True) + ret.extend(np.random.permutation(class_sel)) + num_batches -= 1 + return iter(ret) + \ No newline at end of file diff --git a/code/dataset/utils.py b/code/dataset/utils.py new file mode 100644 index 00000000..0d410f52 --- /dev/null +++ b/code/dataset/utils.py @@ -0,0 +1,100 @@ +from __future__ import print_function +from __future__ import division + +import torchvision +from torchvision import transforms +import PIL.Image +import torch +import random + +def std_per_channel(images): + images = torch.stack(images, dim = 0) + return images.view(3, -1).std(dim = 1) + + +def mean_per_channel(images): + images = torch.stack(images, dim = 0) + return images.view(3, -1).mean(dim = 1) + + +class Identity(): # used for skipping transforms + def __call__(self, im): + return im + +class print_shape(): + def __call__(self, im): + print(im.size) + return im + +class RGBToBGR(): + def __call__(self, im): + assert im.mode == 'RGB' + r, g, b = [im.getchannel(i) for i in range(3)] + # RGB mode also for BGR, `3x8-bit pixels, true color`, see PIL doc + im = PIL.Image.merge('RGB', [b, g, r]) + return im + +class pad_shorter(): + def __call__(self, im): + h,w = im.size[-2:] + s = max(h, w) + new_im = PIL.Image.new("RGB", (s, s)) + new_im.paste(im, ((s-h)//2, (s-w)//2)) + return new_im + + +class ScaleIntensities(): + def __init__(self, in_range, out_range): + """ Scales intensities. For example [-1, 1] -> [0, 255].""" + self.in_range = in_range + self.out_range = out_range + + def __oldcall__(self, tensor): + tensor.mul_(255) + return tensor + + def __call__(self, tensor): + tensor = ( + tensor - self.in_range[0] + ) / ( + self.in_range[1] - self.in_range[0] + ) * ( + self.out_range[1] - self.out_range[0] + ) + self.out_range[0] + return tensor + + +def make_transform(is_train = True, is_inception = False): + # Resolution Resize List : 256, 292, 361, 512 + # Resolution Crop List: 224, 256, 324, 448 + + resnet_sz_resize = 256 + resnet_sz_crop = 224 + resnet_mean = [0.485, 0.456, 0.406] + resnet_std = [0.229, 0.224, 0.225] + resnet_transform = transforms.Compose([ + transforms.RandomResizedCrop(resnet_sz_crop) if is_train else Identity(), + transforms.RandomHorizontalFlip() if is_train else Identity(), + transforms.Resize(resnet_sz_resize) if not is_train else Identity(), + transforms.CenterCrop(resnet_sz_crop) if not is_train else Identity(), + transforms.ToTensor(), + transforms.Normalize(mean=resnet_mean, std=resnet_std) + ]) + + inception_sz_resize = 256 + inception_sz_crop = 224 + inception_mean = [104, 117, 128] + inception_std = [1, 1, 1] + inception_transform = transforms.Compose( + [ + RGBToBGR(), + transforms.RandomResizedCrop(inception_sz_crop) if is_train else Identity(), + transforms.RandomHorizontalFlip() if is_train else Identity(), + transforms.Resize(inception_sz_resize) if not is_train else Identity(), + transforms.CenterCrop(inception_sz_crop) if not is_train else Identity(), + transforms.ToTensor(), + ScaleIntensities([0, 1], [0, 255]), + transforms.Normalize(mean=inception_mean, std=inception_std) + ]) + + return inception_transform if is_inception else resnet_transform \ No newline at end of file diff --git a/code/evaluate.py b/code/evaluate.py new file mode 100644 index 00000000..8a67c9fc --- /dev/null +++ b/code/evaluate.py @@ -0,0 +1,156 @@ +import torch, math, time, argparse, json, os, sys +import random, dataset, utils, losses, net +import numpy as np +import matplotlib.pyplot as plt + +from dataset.Inshop import Inshop_Dataset +from net.resnet import * +from net.googlenet import * +from net.bn_inception import * +from dataset import sampler +from torch.utils.data.sampler import BatchSampler +from torch.utils.data.dataloader import default_collate + +from tqdm import * +import wandb + +seed = 1 +random.seed(seed) +np.random.seed(seed) +torch.manual_seed(seed) +torch.cuda.manual_seed_all(seed) # set random seed for all gpus + +parser = argparse.ArgumentParser(description= + 'Official implementation of `Proxy Anchor Loss for Deep Metric Learning`' + + 'Our code is modified from `https://github.com/dichotomies/proxy-nca`' +) +parser.add_argument('--dataset', + default='cub', + help = 'Training dataset, e.g. cub, cars, SOP, Inshop' +) +parser.add_argument('--embedding-size', default = 512, type = int, + dest = 'sz_embedding', + help = 'Size of embedding that is appended to backbone model.' +) +parser.add_argument('--batch-size', default = 150, type = int, + dest = 'sz_batch', + help = 'Number of samples per batch.' +) +parser.add_argument('--gpu-id', default = 0, type = int, + help = 'ID of GPU that is used for training.' +) +parser.add_argument('--workers', default = 4, type = int, + dest = 'nb_workers', + help = 'Number of workers for dataloader.' +) +parser.add_argument('--model', default = 'bn_inception', + help = 'Model for training' +) +parser.add_argument('--l2-norm', default = 1, type = int, + help = 'L2 normlization' +) +parser.add_argument('--resume', default = '', + help = 'Path of resuming model' +) +parser.add_argument('--remark', default = '', + help = 'Any reamrk' +) + +args = parser.parse_args() + +if args.gpu_id != -1: + torch.cuda.set_device(args.gpu_id) + +# Data Root Directory +os.chdir('../data/') +data_root = os.getcwd() + +# Dataset Loader and Sampler +if args.dataset != 'Inshop': + ev_dataset = dataset.load( + name = args.dataset, + root = data_root, + mode = 'eval', + transform = dataset.utils.make_transform( + is_train = False, + is_inception = (args.model == 'bn_inception') + )) + + dl_ev = torch.utils.data.DataLoader( + ev_dataset, + batch_size = args.sz_batch, + shuffle = False, + num_workers = args.nb_workers, + pin_memory = True + ) + +else: + query_dataset = Inshop_Dataset( + root = data_root, + mode = 'query', + transform = dataset.utils.make_transform( + is_train = False, + is_inception = (args.model == 'bn_inception') + )) + + dl_query = torch.utils.data.DataLoader( + query_dataset, + batch_size = args.sz_batch, + shuffle = False, + num_workers = args.nb_workers, + pin_memory = True + ) + + gallery_dataset = Inshop_Dataset( + root = data_root, + mode = 'gallery', + transform = dataset.utils.make_transform( + is_train = False, + is_inception = (args.model == 'bn_inception') + )) + + dl_gallery = torch.utils.data.DataLoader( + gallery_dataset, + batch_size = args.sz_batch, + shuffle = False, + num_workers = args.nb_workers, + pin_memory = True + ) + +# Backbone Model +if args.model.find('googlenet')+1: + model = googlenet(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = 1) +elif args.model.find('bn_inception')+1: + model = bn_inception(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = 1) +elif args.model.find('resnet18')+1: + model = Resnet18(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = 1) +elif args.model.find('resnet50')+1: + model = Resnet50(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = 1) +elif args.model.find('resnet101')+1: + model = Resnet101(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = 1) +model = model.cuda() + +if args.gpu_id == -1: + model = nn.DataParallel(model) + +if os.path.isfile(args.resume): + print('=> loading checkpoint {}'.format(args.resume)) + checkpoint = torch.load(args.resume) + model.load_state_dict(checkpoint['model_state_dict']) +else: + print('=> No checkpoint found at {}'.format(args.resume)) + sys.exit(0) + +with torch.no_grad(): + print("**Evaluating...**") + if args.dataset == 'Inshop': + NMI = 0 + Recalls = utils.evaluate_cos_Inshop(model, dl_query, dl_gallery) + + elif args.dataset != 'SOP': + Recalls = utils.evaluate_cos(model, dl_ev) + + else: + Recalls = utils.evaluate_cos_SOP(model, dl_ev) + + \ No newline at end of file diff --git a/code/losses.py b/code/losses.py new file mode 100644 index 00000000..a6296525 --- /dev/null +++ b/code/losses.py @@ -0,0 +1,120 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import math +import random +from pytorch_metric_learning import miners, losses + +def binarize(T, nb_classes): + T = T.cpu().numpy() + import sklearn.preprocessing + T = sklearn.preprocessing.label_binarize( + T, classes = range(0, nb_classes) + ) + T = torch.FloatTensor(T).cuda() + return T + +def l2_norm(input): + input_size = input.size() + buffer = torch.pow(input, 2) + normp = torch.sum(buffer, 1).add_(1e-12) + norm = torch.sqrt(normp) + _output = torch.div(input, norm.view(-1, 1).expand_as(input)) + output = _output.view(input_size) + return output + +class Proxy_Anchor(torch.nn.Module): + def __init__(self, nb_classes, sz_embed, mrg = 0.1, alpha = 32): + torch.nn.Module.__init__(self) + # Proxy Anchor Initialization + self.proxies = torch.nn.Parameter(torch.randn(nb_classes, sz_embed).cuda()) + nn.init.kaiming_normal_(self.proxies, mode='fan_out') + + self.nb_classes = nb_classes + self.sz_embed = sz_embed + self.mrg = mrg + self.alpha = alpha + + def forward(self, X, T): + P = self.proxies + + cos = F.linear(l2_norm(X), l2_norm(P)) # Calcluate cosine similarity + P_one_hot = binarize(T = T, nb_classes = self.nb_classes) + N_one_hot = 1 - P_one_hot + + pos_exp = torch.exp(-self.alpha * (cos - self.mrg)) + neg_exp = torch.exp(self.alpha * (cos + self.mrg)) + + with_pos_proxies = torch.nonzero(P_one_hot.sum(dim = 0) != 0).squeeze(dim = 1) # The set of positive proxies of data in the batch + num_valid_proxies = len(with_pos_proxies) # The number of positive proxies + + P_sim_sum = torch.where(P_one_hot == 1, pos_exp, torch.zeros_like(pos_exp)).sum(dim=0) + N_sim_sum = torch.where(N_one_hot == 1, neg_exp, torch.zeros_like(neg_exp)).sum(dim=0) + + pos_term = torch.log(1 + P_sim_sum).sum() / num_valid_proxies + neg_term = torch.log(1 + N_sim_sum).sum() / self.nb_classes + loss = pos_term + neg_term + + return loss + +# We use PyTorch Metric Learning library for the following codes. +# Please refer to "https://github.com/KevinMusgrave/pytorch-metric-learning" for details. +class Proxy_NCA(torch.nn.Module): + def __init__(self, nb_classes, sz_embed, scale=32): + super(Proxy_NCA, self).__init__() + self.nb_classes = nb_classes + self.sz_embed = sz_embed + self.scale = scale + self.loss_func = losses.ProxyNCALoss(num_classes = self.nb_classes, embedding_size = self.sz_embed, softmax_scale = self.scale).cuda() + + def forward(self, embeddings, labels): + loss = self.loss_func(embeddings, labels) + return loss + +class MultiSimilarityLoss(torch.nn.Module): + def __init__(self, ): + super(MultiSimilarityLoss, self).__init__() + self.thresh = 0.5 + self.epsilon = 0.1 + self.scale_pos = 2 + self.scale_neg = 50 + + self.miner = miners.MultiSimilarityMiner(epsilon=self.epsilon) + self.loss_func = losses.MultiSimilarityLoss(self.scale_pos, self.scale_neg, self.thresh) + + def forward(self, embeddings, labels): + hard_pairs = self.miner(embeddings, labels) + loss = self.loss_func(embeddings, labels, hard_pairs) + return loss + +class ContrastiveLoss(nn.Module): + def __init__(self, margin=0.5, **kwargs): + super(ContrastiveLoss, self).__init__() + self.margin = margin + self.loss_func = losses.ContrastiveLoss(neg_margin=self.margin) + + def forward(self, embeddings, labels): + loss = self.loss_func(embeddings, labels) + return loss + +class TripletLoss(nn.Module): + def __init__(self, margin=0.1, **kwargs): + super(TripletLoss, self).__init__() + self.margin = margin + self.miner = miners.TripletMarginMiner(margin, type_of_triplets = 'semihard') + self.loss_func = losses.TripletMarginLoss(margin = self.margin) + + def forward(self, embeddings, labels): + hard_pairs = self.miner(embeddings, labels) + loss = self.loss_func(embeddings, labels, hard_pairs) + return loss + +class NPairLoss(nn.Module): + def __init__(self, l2_reg=0): + super(NPairLoss, self).__init__() + self.l2_reg = l2_reg + self.loss_func = losses.NPairsLoss(l2_reg_weight=self.l2_reg, normalize_embeddings = False) + + def forward(self, embeddings, labels): + loss = self.loss_func(embeddings, labels) + return loss \ No newline at end of file diff --git a/code/net/bn_inception.py b/code/net/bn_inception.py new file mode 100644 index 00000000..bbbc8306 --- /dev/null +++ b/code/net/bn_inception.py @@ -0,0 +1,530 @@ +import torch +import torch.nn as nn +import torch.nn.init as init +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo +import random + +__all__ = ['BNInception', 'bn_inception'] + +""" +Inception v2 was ported from Caffee to pytorch 0.2, see +https://github.com/Cadene/pretrained-models.pytorch. I've ported it to +PyTorch 0.4 for the Proxy-NCA implementation, see +https://github.com/dichotomies/proxy-nca. +""" + +class bn_inception(nn.Module): + def __init__(self, embedding_size, pretrained = True, is_norm=True, bn_freeze = True): + super(bn_inception, self).__init__() + self.model = BNInception(embedding_size, pretrained, is_norm) + if pretrained: +# weight = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/bn_inception-239d2248.pth') + weight = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/bn_inception-52deb4733.pth') + weight = {k: v.squeeze(0) if v.size(0) == 1 else v for k, v in weight.items()} + self.model.load_state_dict(weight) + + self.model.gap = nn.AdaptiveAvgPool2d(1) + self.model.gmp = nn.AdaptiveMaxPool2d(1) + + self.model.embedding = nn.Linear(self.model.num_ftrs, self.model.embedding_size) + init.kaiming_normal_(self.model.embedding.weight, mode='fan_out') + init.constant_(self.model.embedding.bias, 0) + + if bn_freeze: + for m in self.model.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + m.weight.requires_grad_(False) + m.bias.requires_grad_(False) + + + def forward(self, input): + return self.model.forward(input) + +class BNInception(nn.Module): + + def __init__(self, embedding_size, pretrained = True, is_norm=True): + super(BNInception, self).__init__() + + inplace = True + self.embedding_size = embedding_size + self.num_ftrs = 1024 + + self.is_norm = is_norm + + self.conv1_7x7_s2 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3)) + self.conv1_7x7_s2_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.conv1_relu_7x7 = nn.ReLU (inplace) + self.pool1_3x3_s2 = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True) + self.conv2_3x3_reduce = nn.Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1)) + self.conv2_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.conv2_relu_3x3_reduce = nn.ReLU (inplace) + self.conv2_3x3 = nn.Conv2d(64, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.conv2_3x3_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.conv2_relu_3x3 = nn.ReLU (inplace) + self.pool2_3x3_s2 = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True) + self.inception_3a_1x1 = nn.Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3a_1x1_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_3a_relu_1x1 = nn.ReLU (inplace) + self.inception_3a_3x3_reduce = nn.Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3a_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_3a_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_3a_3x3 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_3a_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_3a_relu_3x3 = nn.ReLU (inplace) + self.inception_3a_double_3x3_reduce = nn.Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3a_double_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_3a_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_3a_double_3x3_1 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_3a_double_3x3_1_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_3a_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_3a_double_3x3_2 = nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_3a_double_3x3_2_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_3a_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_3a_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True) + self.inception_3a_pool_proj = nn.Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3a_pool_proj_bn = nn.BatchNorm2d(32, eps=1e-05, momentum=0.9, affine=True) + self.inception_3a_relu_pool_proj = nn.ReLU (inplace) + self.inception_3b_1x1 = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3b_1x1_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_3b_relu_1x1 = nn.ReLU (inplace) + self.inception_3b_3x3_reduce = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3b_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_3b_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_3b_3x3 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_3b_3x3_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_3b_relu_3x3 = nn.ReLU (inplace) + self.inception_3b_double_3x3_reduce = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3b_double_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_3b_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_3b_double_3x3_1 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_3b_double_3x3_1_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_3b_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_3b_double_3x3_2 = nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_3b_double_3x3_2_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_3b_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_3b_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True) + self.inception_3b_pool_proj = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3b_pool_proj_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_3b_relu_pool_proj = nn.ReLU (inplace) + self.inception_3c_3x3_reduce = nn.Conv2d(320, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3c_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_3c_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_3c_3x3 = nn.Conv2d(128, 160, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + self.inception_3c_3x3_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True) + self.inception_3c_relu_3x3 = nn.ReLU (inplace) + self.inception_3c_double_3x3_reduce = nn.Conv2d(320, 64, kernel_size=(1, 1), stride=(1, 1)) + self.inception_3c_double_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_3c_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_3c_double_3x3_1 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_3c_double_3x3_1_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_3c_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_3c_double_3x3_2 = nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + self.inception_3c_double_3x3_2_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_3c_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_3c_pool = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True) + self.inception_4a_1x1 = nn.Conv2d(576, 224, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4a_1x1_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True) + self.inception_4a_relu_1x1 = nn.ReLU (inplace) + self.inception_4a_3x3_reduce = nn.Conv2d(576, 64, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4a_3x3_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.9, affine=True) + self.inception_4a_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_4a_3x3 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4a_3x3_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_4a_relu_3x3 = nn.ReLU (inplace) + self.inception_4a_double_3x3_reduce = nn.Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4a_double_3x3_reduce_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_4a_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_4a_double_3x3_1 = nn.Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4a_double_3x3_1_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4a_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_4a_double_3x3_2 = nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4a_double_3x3_2_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4a_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_4a_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True) + self.inception_4a_pool_proj = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4a_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4a_relu_pool_proj = nn.ReLU (inplace) + self.inception_4b_1x1 = nn.Conv2d(576, 192, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4b_1x1_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.inception_4b_relu_1x1 = nn.ReLU (inplace) + self.inception_4b_3x3_reduce = nn.Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4b_3x3_reduce_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_4b_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_4b_3x3 = nn.Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4b_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4b_relu_3x3 = nn.ReLU (inplace) + self.inception_4b_double_3x3_reduce = nn.Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4b_double_3x3_reduce_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_4b_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_4b_double_3x3_1 = nn.Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4b_double_3x3_1_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4b_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_4b_double_3x3_2 = nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4b_double_3x3_2_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4b_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_4b_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True) + self.inception_4b_pool_proj = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4b_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4b_relu_pool_proj = nn.ReLU (inplace) + self.inception_4c_1x1 = nn.Conv2d(576, 160, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4c_1x1_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True) + self.inception_4c_relu_1x1 = nn.ReLU (inplace) + self.inception_4c_3x3_reduce = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4c_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4c_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_4c_3x3 = nn.Conv2d(128, 160, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4c_3x3_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True) + self.inception_4c_relu_3x3 = nn.ReLU (inplace) + self.inception_4c_double_3x3_reduce = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4c_double_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4c_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_4c_double_3x3_1 = nn.Conv2d(128, 160, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4c_double_3x3_1_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True) + self.inception_4c_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_4c_double_3x3_2 = nn.Conv2d(160, 160, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4c_double_3x3_2_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True) + self.inception_4c_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_4c_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True) + self.inception_4c_pool_proj = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4c_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4c_relu_pool_proj = nn.ReLU (inplace) + self.inception_4d_1x1 = nn.Conv2d(608, 96, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4d_1x1_bn = nn.BatchNorm2d(96, eps=1e-05, momentum=0.9, affine=True) + self.inception_4d_relu_1x1 = nn.ReLU (inplace) + self.inception_4d_3x3_reduce = nn.Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4d_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4d_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_4d_3x3 = nn.Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4d_3x3_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.inception_4d_relu_3x3 = nn.ReLU (inplace) + self.inception_4d_double_3x3_reduce = nn.Conv2d(608, 160, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4d_double_3x3_reduce_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True) + self.inception_4d_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_4d_double_3x3_1 = nn.Conv2d(160, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4d_double_3x3_1_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.inception_4d_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_4d_double_3x3_2 = nn.Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4d_double_3x3_2_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.inception_4d_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_4d_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True) + self.inception_4d_pool_proj = nn.Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4d_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4d_relu_pool_proj = nn.ReLU (inplace) + self.inception_4e_3x3_reduce = nn.Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4e_3x3_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_4e_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_4e_3x3 = nn.Conv2d(128, 192, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + self.inception_4e_3x3_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.inception_4e_relu_3x3 = nn.ReLU (inplace) + self.inception_4e_double_3x3_reduce = nn.Conv2d(608, 192, kernel_size=(1, 1), stride=(1, 1)) + self.inception_4e_double_3x3_reduce_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.inception_4e_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_4e_double_3x3_1 = nn.Conv2d(192, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_4e_double_3x3_1_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.9, affine=True) + self.inception_4e_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_4e_double_3x3_2 = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + self.inception_4e_double_3x3_2_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.9, affine=True) + self.inception_4e_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_4e_pool = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True) + self.inception_5a_1x1 = nn.Conv2d(1056, 352, kernel_size=(1, 1), stride=(1, 1)) + self.inception_5a_1x1_bn = nn.BatchNorm2d(352, eps=1e-05, momentum=0.9, affine=True) + self.inception_5a_relu_1x1 = nn.ReLU (inplace) + self.inception_5a_3x3_reduce = nn.Conv2d(1056, 192, kernel_size=(1, 1), stride=(1, 1)) + self.inception_5a_3x3_reduce_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.inception_5a_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_5a_3x3 = nn.Conv2d(192, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_5a_3x3_bn = nn.BatchNorm2d(320, eps=1e-05, momentum=0.9, affine=True) + self.inception_5a_relu_3x3 = nn.ReLU (inplace) + self.inception_5a_double_3x3_reduce = nn.Conv2d(1056, 160, kernel_size=(1, 1), stride=(1, 1)) + self.inception_5a_double_3x3_reduce_bn = nn.BatchNorm2d(160, eps=1e-05, momentum=0.9, affine=True) + self.inception_5a_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_5a_double_3x3_1 = nn.Conv2d(160, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_5a_double_3x3_1_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True) + self.inception_5a_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_5a_double_3x3_2 = nn.Conv2d(224, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_5a_double_3x3_2_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True) + self.inception_5a_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_5a_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True) + self.inception_5a_pool_proj = nn.Conv2d(1056, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_5a_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_5a_relu_pool_proj = nn.ReLU (inplace) + self.inception_5b_1x1 = nn.Conv2d(1024, 352, kernel_size=(1, 1), stride=(1, 1)) + self.inception_5b_1x1_bn = nn.BatchNorm2d(352, eps=1e-05, momentum=0.9, affine=True) + self.inception_5b_relu_1x1 = nn.ReLU (inplace) + self.inception_5b_3x3_reduce = nn.Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1)) + self.inception_5b_3x3_reduce_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.inception_5b_relu_3x3_reduce = nn.ReLU (inplace) + self.inception_5b_3x3 = nn.Conv2d(192, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_5b_3x3_bn = nn.BatchNorm2d(320, eps=1e-05, momentum=0.9, affine=True) + self.inception_5b_relu_3x3 = nn.ReLU (inplace) + self.inception_5b_double_3x3_reduce = nn.Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1)) + self.inception_5b_double_3x3_reduce_bn = nn.BatchNorm2d(192, eps=1e-05, momentum=0.9, affine=True) + self.inception_5b_relu_double_3x3_reduce = nn.ReLU (inplace) + self.inception_5b_double_3x3_1 = nn.Conv2d(192, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_5b_double_3x3_1_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True) + self.inception_5b_relu_double_3x3_1 = nn.ReLU (inplace) + self.inception_5b_double_3x3_2 = nn.Conv2d(224, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + self.inception_5b_double_3x3_2_bn = nn.BatchNorm2d(224, eps=1e-05, momentum=0.9, affine=True) + self.inception_5b_relu_double_3x3_2 = nn.ReLU (inplace) + self.inception_5b_pool = nn.MaxPool2d ((3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), ceil_mode=True) + self.inception_5b_pool_proj = nn.Conv2d(1024, 128, kernel_size=(1, 1), stride=(1, 1)) + self.inception_5b_pool_proj_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.9, affine=True) + self.inception_5b_relu_pool_proj = nn.ReLU (inplace) + self.global_pool = nn.AvgPool2d(7, stride=1, padding=0, ceil_mode=True, count_include_pad=True) + self.last_linear = nn.Linear(1024, 1000) + + def features(self, input): + conv1_7x7_s2_out = self.conv1_7x7_s2(input) + conv1_7x7_s2_bn_out = self.conv1_7x7_s2_bn(conv1_7x7_s2_out) + conv1_relu_7x7_out = self.conv1_relu_7x7(conv1_7x7_s2_bn_out) + pool1_3x3_s2_out = self.pool1_3x3_s2(conv1_7x7_s2_bn_out) + conv2_3x3_reduce_out = self.conv2_3x3_reduce(pool1_3x3_s2_out) + conv2_3x3_reduce_bn_out = self.conv2_3x3_reduce_bn(conv2_3x3_reduce_out) + conv2_relu_3x3_reduce_out = self.conv2_relu_3x3_reduce(conv2_3x3_reduce_bn_out) + conv2_3x3_out = self.conv2_3x3(conv2_3x3_reduce_bn_out) + conv2_3x3_bn_out = self.conv2_3x3_bn(conv2_3x3_out) + conv2_relu_3x3_out = self.conv2_relu_3x3(conv2_3x3_bn_out) + pool2_3x3_s2_out = self.pool2_3x3_s2(conv2_3x3_bn_out) + inception_3a_1x1_out = self.inception_3a_1x1(pool2_3x3_s2_out) + inception_3a_1x1_bn_out = self.inception_3a_1x1_bn(inception_3a_1x1_out) + inception_3a_relu_1x1_out = self.inception_3a_relu_1x1(inception_3a_1x1_bn_out) + inception_3a_3x3_reduce_out = self.inception_3a_3x3_reduce(pool2_3x3_s2_out) + inception_3a_3x3_reduce_bn_out = self.inception_3a_3x3_reduce_bn(inception_3a_3x3_reduce_out) + inception_3a_relu_3x3_reduce_out = self.inception_3a_relu_3x3_reduce(inception_3a_3x3_reduce_bn_out) + inception_3a_3x3_out = self.inception_3a_3x3(inception_3a_3x3_reduce_bn_out) + inception_3a_3x3_bn_out = self.inception_3a_3x3_bn(inception_3a_3x3_out) + inception_3a_relu_3x3_out = self.inception_3a_relu_3x3(inception_3a_3x3_bn_out) + inception_3a_double_3x3_reduce_out = self.inception_3a_double_3x3_reduce(pool2_3x3_s2_out) + inception_3a_double_3x3_reduce_bn_out = self.inception_3a_double_3x3_reduce_bn(inception_3a_double_3x3_reduce_out) + inception_3a_relu_double_3x3_reduce_out = self.inception_3a_relu_double_3x3_reduce(inception_3a_double_3x3_reduce_bn_out) + inception_3a_double_3x3_1_out = self.inception_3a_double_3x3_1(inception_3a_double_3x3_reduce_bn_out) + inception_3a_double_3x3_1_bn_out = self.inception_3a_double_3x3_1_bn(inception_3a_double_3x3_1_out) + inception_3a_relu_double_3x3_1_out = self.inception_3a_relu_double_3x3_1(inception_3a_double_3x3_1_bn_out) + inception_3a_double_3x3_2_out = self.inception_3a_double_3x3_2(inception_3a_double_3x3_1_bn_out) + inception_3a_double_3x3_2_bn_out = self.inception_3a_double_3x3_2_bn(inception_3a_double_3x3_2_out) + inception_3a_relu_double_3x3_2_out = self.inception_3a_relu_double_3x3_2(inception_3a_double_3x3_2_bn_out) + inception_3a_pool_out = self.inception_3a_pool(pool2_3x3_s2_out) + inception_3a_pool_proj_out = self.inception_3a_pool_proj(inception_3a_pool_out) + inception_3a_pool_proj_bn_out = self.inception_3a_pool_proj_bn(inception_3a_pool_proj_out) + inception_3a_relu_pool_proj_out = self.inception_3a_relu_pool_proj(inception_3a_pool_proj_bn_out) + inception_3a_output_out = torch.cat([inception_3a_1x1_bn_out,inception_3a_3x3_bn_out,inception_3a_double_3x3_2_bn_out,inception_3a_pool_proj_bn_out], 1) + inception_3b_1x1_out = self.inception_3b_1x1(inception_3a_output_out) + inception_3b_1x1_bn_out = self.inception_3b_1x1_bn(inception_3b_1x1_out) + inception_3b_relu_1x1_out = self.inception_3b_relu_1x1(inception_3b_1x1_bn_out) + inception_3b_3x3_reduce_out = self.inception_3b_3x3_reduce(inception_3a_output_out) + inception_3b_3x3_reduce_bn_out = self.inception_3b_3x3_reduce_bn(inception_3b_3x3_reduce_out) + inception_3b_relu_3x3_reduce_out = self.inception_3b_relu_3x3_reduce(inception_3b_3x3_reduce_bn_out) + inception_3b_3x3_out = self.inception_3b_3x3(inception_3b_3x3_reduce_bn_out) + inception_3b_3x3_bn_out = self.inception_3b_3x3_bn(inception_3b_3x3_out) + inception_3b_relu_3x3_out = self.inception_3b_relu_3x3(inception_3b_3x3_bn_out) + inception_3b_double_3x3_reduce_out = self.inception_3b_double_3x3_reduce(inception_3a_output_out) + inception_3b_double_3x3_reduce_bn_out = self.inception_3b_double_3x3_reduce_bn(inception_3b_double_3x3_reduce_out) + inception_3b_relu_double_3x3_reduce_out = self.inception_3b_relu_double_3x3_reduce(inception_3b_double_3x3_reduce_bn_out) + inception_3b_double_3x3_1_out = self.inception_3b_double_3x3_1(inception_3b_double_3x3_reduce_bn_out) + inception_3b_double_3x3_1_bn_out = self.inception_3b_double_3x3_1_bn(inception_3b_double_3x3_1_out) + inception_3b_relu_double_3x3_1_out = self.inception_3b_relu_double_3x3_1(inception_3b_double_3x3_1_bn_out) + inception_3b_double_3x3_2_out = self.inception_3b_double_3x3_2(inception_3b_double_3x3_1_bn_out) + inception_3b_double_3x3_2_bn_out = self.inception_3b_double_3x3_2_bn(inception_3b_double_3x3_2_out) + inception_3b_relu_double_3x3_2_out = self.inception_3b_relu_double_3x3_2(inception_3b_double_3x3_2_bn_out) + inception_3b_pool_out = self.inception_3b_pool(inception_3a_output_out) + inception_3b_pool_proj_out = self.inception_3b_pool_proj(inception_3b_pool_out) + inception_3b_pool_proj_bn_out = self.inception_3b_pool_proj_bn(inception_3b_pool_proj_out) + inception_3b_relu_pool_proj_out = self.inception_3b_relu_pool_proj(inception_3b_pool_proj_bn_out) + inception_3b_output_out = torch.cat([inception_3b_1x1_bn_out,inception_3b_3x3_bn_out,inception_3b_double_3x3_2_bn_out,inception_3b_pool_proj_bn_out], 1) + inception_3c_3x3_reduce_out = self.inception_3c_3x3_reduce(inception_3b_output_out) + inception_3c_3x3_reduce_bn_out = self.inception_3c_3x3_reduce_bn(inception_3c_3x3_reduce_out) + inception_3c_relu_3x3_reduce_out = self.inception_3c_relu_3x3_reduce(inception_3c_3x3_reduce_bn_out) + inception_3c_3x3_out = self.inception_3c_3x3(inception_3c_3x3_reduce_bn_out) + inception_3c_3x3_bn_out = self.inception_3c_3x3_bn(inception_3c_3x3_out) + inception_3c_relu_3x3_out = self.inception_3c_relu_3x3(inception_3c_3x3_bn_out) + inception_3c_double_3x3_reduce_out = self.inception_3c_double_3x3_reduce(inception_3b_output_out) + inception_3c_double_3x3_reduce_bn_out = self.inception_3c_double_3x3_reduce_bn(inception_3c_double_3x3_reduce_out) + inception_3c_relu_double_3x3_reduce_out = self.inception_3c_relu_double_3x3_reduce(inception_3c_double_3x3_reduce_bn_out) + inception_3c_double_3x3_1_out = self.inception_3c_double_3x3_1(inception_3c_double_3x3_reduce_bn_out) + inception_3c_double_3x3_1_bn_out = self.inception_3c_double_3x3_1_bn(inception_3c_double_3x3_1_out) + inception_3c_relu_double_3x3_1_out = self.inception_3c_relu_double_3x3_1(inception_3c_double_3x3_1_bn_out) + inception_3c_double_3x3_2_out = self.inception_3c_double_3x3_2(inception_3c_double_3x3_1_bn_out) + inception_3c_double_3x3_2_bn_out = self.inception_3c_double_3x3_2_bn(inception_3c_double_3x3_2_out) + inception_3c_relu_double_3x3_2_out = self.inception_3c_relu_double_3x3_2(inception_3c_double_3x3_2_bn_out) + inception_3c_pool_out = self.inception_3c_pool(inception_3b_output_out) + inception_3c_output_out = torch.cat([inception_3c_3x3_bn_out,inception_3c_double_3x3_2_bn_out,inception_3c_pool_out], 1) + inception_4a_1x1_out = self.inception_4a_1x1(inception_3c_output_out) + inception_4a_1x1_bn_out = self.inception_4a_1x1_bn(inception_4a_1x1_out) + inception_4a_relu_1x1_out = self.inception_4a_relu_1x1(inception_4a_1x1_bn_out) + inception_4a_3x3_reduce_out = self.inception_4a_3x3_reduce(inception_3c_output_out) + inception_4a_3x3_reduce_bn_out = self.inception_4a_3x3_reduce_bn(inception_4a_3x3_reduce_out) + inception_4a_relu_3x3_reduce_out = self.inception_4a_relu_3x3_reduce(inception_4a_3x3_reduce_bn_out) + inception_4a_3x3_out = self.inception_4a_3x3(inception_4a_3x3_reduce_bn_out) + inception_4a_3x3_bn_out = self.inception_4a_3x3_bn(inception_4a_3x3_out) + inception_4a_relu_3x3_out = self.inception_4a_relu_3x3(inception_4a_3x3_bn_out) + inception_4a_double_3x3_reduce_out = self.inception_4a_double_3x3_reduce(inception_3c_output_out) + inception_4a_double_3x3_reduce_bn_out = self.inception_4a_double_3x3_reduce_bn(inception_4a_double_3x3_reduce_out) + inception_4a_relu_double_3x3_reduce_out = self.inception_4a_relu_double_3x3_reduce(inception_4a_double_3x3_reduce_bn_out) + inception_4a_double_3x3_1_out = self.inception_4a_double_3x3_1(inception_4a_double_3x3_reduce_bn_out) + inception_4a_double_3x3_1_bn_out = self.inception_4a_double_3x3_1_bn(inception_4a_double_3x3_1_out) + inception_4a_relu_double_3x3_1_out = self.inception_4a_relu_double_3x3_1(inception_4a_double_3x3_1_bn_out) + inception_4a_double_3x3_2_out = self.inception_4a_double_3x3_2(inception_4a_double_3x3_1_bn_out) + inception_4a_double_3x3_2_bn_out = self.inception_4a_double_3x3_2_bn(inception_4a_double_3x3_2_out) + inception_4a_relu_double_3x3_2_out = self.inception_4a_relu_double_3x3_2(inception_4a_double_3x3_2_bn_out) + inception_4a_pool_out = self.inception_4a_pool(inception_3c_output_out) + inception_4a_pool_proj_out = self.inception_4a_pool_proj(inception_4a_pool_out) + inception_4a_pool_proj_bn_out = self.inception_4a_pool_proj_bn(inception_4a_pool_proj_out) + inception_4a_relu_pool_proj_out = self.inception_4a_relu_pool_proj(inception_4a_pool_proj_bn_out) + inception_4a_output_out = torch.cat([inception_4a_1x1_bn_out,inception_4a_3x3_bn_out,inception_4a_double_3x3_2_bn_out,inception_4a_pool_proj_bn_out], 1) + inception_4b_1x1_out = self.inception_4b_1x1(inception_4a_output_out) + inception_4b_1x1_bn_out = self.inception_4b_1x1_bn(inception_4b_1x1_out) + inception_4b_relu_1x1_out = self.inception_4b_relu_1x1(inception_4b_1x1_bn_out) + inception_4b_3x3_reduce_out = self.inception_4b_3x3_reduce(inception_4a_output_out) + inception_4b_3x3_reduce_bn_out = self.inception_4b_3x3_reduce_bn(inception_4b_3x3_reduce_out) + inception_4b_relu_3x3_reduce_out = self.inception_4b_relu_3x3_reduce(inception_4b_3x3_reduce_bn_out) + inception_4b_3x3_out = self.inception_4b_3x3(inception_4b_3x3_reduce_bn_out) + inception_4b_3x3_bn_out = self.inception_4b_3x3_bn(inception_4b_3x3_out) + inception_4b_relu_3x3_out = self.inception_4b_relu_3x3(inception_4b_3x3_bn_out) + inception_4b_double_3x3_reduce_out = self.inception_4b_double_3x3_reduce(inception_4a_output_out) + inception_4b_double_3x3_reduce_bn_out = self.inception_4b_double_3x3_reduce_bn(inception_4b_double_3x3_reduce_out) + inception_4b_relu_double_3x3_reduce_out = self.inception_4b_relu_double_3x3_reduce(inception_4b_double_3x3_reduce_bn_out) + inception_4b_double_3x3_1_out = self.inception_4b_double_3x3_1(inception_4b_double_3x3_reduce_bn_out) + inception_4b_double_3x3_1_bn_out = self.inception_4b_double_3x3_1_bn(inception_4b_double_3x3_1_out) + inception_4b_relu_double_3x3_1_out = self.inception_4b_relu_double_3x3_1(inception_4b_double_3x3_1_bn_out) + inception_4b_double_3x3_2_out = self.inception_4b_double_3x3_2(inception_4b_double_3x3_1_bn_out) + inception_4b_double_3x3_2_bn_out = self.inception_4b_double_3x3_2_bn(inception_4b_double_3x3_2_out) + inception_4b_relu_double_3x3_2_out = self.inception_4b_relu_double_3x3_2(inception_4b_double_3x3_2_bn_out) + inception_4b_pool_out = self.inception_4b_pool(inception_4a_output_out) + inception_4b_pool_proj_out = self.inception_4b_pool_proj(inception_4b_pool_out) + inception_4b_pool_proj_bn_out = self.inception_4b_pool_proj_bn(inception_4b_pool_proj_out) + inception_4b_relu_pool_proj_out = self.inception_4b_relu_pool_proj(inception_4b_pool_proj_bn_out) + inception_4b_output_out = torch.cat([inception_4b_1x1_bn_out,inception_4b_3x3_bn_out,inception_4b_double_3x3_2_bn_out,inception_4b_pool_proj_bn_out], 1) + inception_4c_1x1_out = self.inception_4c_1x1(inception_4b_output_out) + inception_4c_1x1_bn_out = self.inception_4c_1x1_bn(inception_4c_1x1_out) + inception_4c_relu_1x1_out = self.inception_4c_relu_1x1(inception_4c_1x1_bn_out) + inception_4c_3x3_reduce_out = self.inception_4c_3x3_reduce(inception_4b_output_out) + inception_4c_3x3_reduce_bn_out = self.inception_4c_3x3_reduce_bn(inception_4c_3x3_reduce_out) + inception_4c_relu_3x3_reduce_out = self.inception_4c_relu_3x3_reduce(inception_4c_3x3_reduce_bn_out) + inception_4c_3x3_out = self.inception_4c_3x3(inception_4c_3x3_reduce_bn_out) + inception_4c_3x3_bn_out = self.inception_4c_3x3_bn(inception_4c_3x3_out) + inception_4c_relu_3x3_out = self.inception_4c_relu_3x3(inception_4c_3x3_bn_out) + inception_4c_double_3x3_reduce_out = self.inception_4c_double_3x3_reduce(inception_4b_output_out) + inception_4c_double_3x3_reduce_bn_out = self.inception_4c_double_3x3_reduce_bn(inception_4c_double_3x3_reduce_out) + inception_4c_relu_double_3x3_reduce_out = self.inception_4c_relu_double_3x3_reduce(inception_4c_double_3x3_reduce_bn_out) + inception_4c_double_3x3_1_out = self.inception_4c_double_3x3_1(inception_4c_double_3x3_reduce_bn_out) + inception_4c_double_3x3_1_bn_out = self.inception_4c_double_3x3_1_bn(inception_4c_double_3x3_1_out) + inception_4c_relu_double_3x3_1_out = self.inception_4c_relu_double_3x3_1(inception_4c_double_3x3_1_bn_out) + inception_4c_double_3x3_2_out = self.inception_4c_double_3x3_2(inception_4c_double_3x3_1_bn_out) + inception_4c_double_3x3_2_bn_out = self.inception_4c_double_3x3_2_bn(inception_4c_double_3x3_2_out) + inception_4c_relu_double_3x3_2_out = self.inception_4c_relu_double_3x3_2(inception_4c_double_3x3_2_bn_out) + inception_4c_pool_out = self.inception_4c_pool(inception_4b_output_out) + inception_4c_pool_proj_out = self.inception_4c_pool_proj(inception_4c_pool_out) + inception_4c_pool_proj_bn_out = self.inception_4c_pool_proj_bn(inception_4c_pool_proj_out) + inception_4c_relu_pool_proj_out = self.inception_4c_relu_pool_proj(inception_4c_pool_proj_bn_out) + inception_4c_output_out = torch.cat([inception_4c_1x1_bn_out,inception_4c_3x3_bn_out,inception_4c_double_3x3_2_bn_out,inception_4c_pool_proj_bn_out], 1) + inception_4d_1x1_out = self.inception_4d_1x1(inception_4c_output_out) + inception_4d_1x1_bn_out = self.inception_4d_1x1_bn(inception_4d_1x1_out) + inception_4d_relu_1x1_out = self.inception_4d_relu_1x1(inception_4d_1x1_bn_out) + inception_4d_3x3_reduce_out = self.inception_4d_3x3_reduce(inception_4c_output_out) + inception_4d_3x3_reduce_bn_out = self.inception_4d_3x3_reduce_bn(inception_4d_3x3_reduce_out) + inception_4d_relu_3x3_reduce_out = self.inception_4d_relu_3x3_reduce(inception_4d_3x3_reduce_bn_out) + inception_4d_3x3_out = self.inception_4d_3x3(inception_4d_3x3_reduce_bn_out) + inception_4d_3x3_bn_out = self.inception_4d_3x3_bn(inception_4d_3x3_out) + inception_4d_relu_3x3_out = self.inception_4d_relu_3x3(inception_4d_3x3_bn_out) + inception_4d_double_3x3_reduce_out = self.inception_4d_double_3x3_reduce(inception_4c_output_out) + inception_4d_double_3x3_reduce_bn_out = self.inception_4d_double_3x3_reduce_bn(inception_4d_double_3x3_reduce_out) + inception_4d_relu_double_3x3_reduce_out = self.inception_4d_relu_double_3x3_reduce(inception_4d_double_3x3_reduce_bn_out) + inception_4d_double_3x3_1_out = self.inception_4d_double_3x3_1(inception_4d_double_3x3_reduce_bn_out) + inception_4d_double_3x3_1_bn_out = self.inception_4d_double_3x3_1_bn(inception_4d_double_3x3_1_out) + inception_4d_relu_double_3x3_1_out = self.inception_4d_relu_double_3x3_1(inception_4d_double_3x3_1_bn_out) + inception_4d_double_3x3_2_out = self.inception_4d_double_3x3_2(inception_4d_double_3x3_1_bn_out) + inception_4d_double_3x3_2_bn_out = self.inception_4d_double_3x3_2_bn(inception_4d_double_3x3_2_out) + inception_4d_relu_double_3x3_2_out = self.inception_4d_relu_double_3x3_2(inception_4d_double_3x3_2_bn_out) + inception_4d_pool_out = self.inception_4d_pool(inception_4c_output_out) + inception_4d_pool_proj_out = self.inception_4d_pool_proj(inception_4d_pool_out) + inception_4d_pool_proj_bn_out = self.inception_4d_pool_proj_bn(inception_4d_pool_proj_out) + inception_4d_relu_pool_proj_out = self.inception_4d_relu_pool_proj(inception_4d_pool_proj_bn_out) + inception_4d_output_out = torch.cat([inception_4d_1x1_bn_out,inception_4d_3x3_bn_out,inception_4d_double_3x3_2_bn_out,inception_4d_pool_proj_bn_out], 1) + inception_4e_3x3_reduce_out = self.inception_4e_3x3_reduce(inception_4d_output_out) + inception_4e_3x3_reduce_bn_out = self.inception_4e_3x3_reduce_bn(inception_4e_3x3_reduce_out) + inception_4e_relu_3x3_reduce_out = self.inception_4e_relu_3x3_reduce(inception_4e_3x3_reduce_bn_out) + inception_4e_3x3_out = self.inception_4e_3x3(inception_4e_3x3_reduce_bn_out) + inception_4e_3x3_bn_out = self.inception_4e_3x3_bn(inception_4e_3x3_out) + inception_4e_relu_3x3_out = self.inception_4e_relu_3x3(inception_4e_3x3_bn_out) + inception_4e_double_3x3_reduce_out = self.inception_4e_double_3x3_reduce(inception_4d_output_out) + inception_4e_double_3x3_reduce_bn_out = self.inception_4e_double_3x3_reduce_bn(inception_4e_double_3x3_reduce_out) + inception_4e_relu_double_3x3_reduce_out = self.inception_4e_relu_double_3x3_reduce(inception_4e_double_3x3_reduce_bn_out) + inception_4e_double_3x3_1_out = self.inception_4e_double_3x3_1(inception_4e_double_3x3_reduce_bn_out) + inception_4e_double_3x3_1_bn_out = self.inception_4e_double_3x3_1_bn(inception_4e_double_3x3_1_out) + inception_4e_relu_double_3x3_1_out = self.inception_4e_relu_double_3x3_1(inception_4e_double_3x3_1_bn_out) + inception_4e_double_3x3_2_out = self.inception_4e_double_3x3_2(inception_4e_double_3x3_1_bn_out) + inception_4e_double_3x3_2_bn_out = self.inception_4e_double_3x3_2_bn(inception_4e_double_3x3_2_out) + inception_4e_relu_double_3x3_2_out = self.inception_4e_relu_double_3x3_2(inception_4e_double_3x3_2_bn_out) + inception_4e_pool_out = self.inception_4e_pool(inception_4d_output_out) + inception_4e_output_out = torch.cat([inception_4e_3x3_bn_out,inception_4e_double_3x3_2_bn_out,inception_4e_pool_out], 1) + inception_5a_1x1_out = self.inception_5a_1x1(inception_4e_output_out) + inception_5a_1x1_bn_out = self.inception_5a_1x1_bn(inception_5a_1x1_out) + inception_5a_relu_1x1_out = self.inception_5a_relu_1x1(inception_5a_1x1_bn_out) + inception_5a_3x3_reduce_out = self.inception_5a_3x3_reduce(inception_4e_output_out) + inception_5a_3x3_reduce_bn_out = self.inception_5a_3x3_reduce_bn(inception_5a_3x3_reduce_out) + inception_5a_relu_3x3_reduce_out = self.inception_5a_relu_3x3_reduce(inception_5a_3x3_reduce_bn_out) + inception_5a_3x3_out = self.inception_5a_3x3(inception_5a_3x3_reduce_bn_out) + inception_5a_3x3_bn_out = self.inception_5a_3x3_bn(inception_5a_3x3_out) + inception_5a_relu_3x3_out = self.inception_5a_relu_3x3(inception_5a_3x3_bn_out) + inception_5a_double_3x3_reduce_out = self.inception_5a_double_3x3_reduce(inception_4e_output_out) + inception_5a_double_3x3_reduce_bn_out = self.inception_5a_double_3x3_reduce_bn(inception_5a_double_3x3_reduce_out) + inception_5a_relu_double_3x3_reduce_out = self.inception_5a_relu_double_3x3_reduce(inception_5a_double_3x3_reduce_bn_out) + inception_5a_double_3x3_1_out = self.inception_5a_double_3x3_1(inception_5a_double_3x3_reduce_bn_out) + inception_5a_double_3x3_1_bn_out = self.inception_5a_double_3x3_1_bn(inception_5a_double_3x3_1_out) + inception_5a_relu_double_3x3_1_out = self.inception_5a_relu_double_3x3_1(inception_5a_double_3x3_1_bn_out) + inception_5a_double_3x3_2_out = self.inception_5a_double_3x3_2(inception_5a_double_3x3_1_bn_out) + inception_5a_double_3x3_2_bn_out = self.inception_5a_double_3x3_2_bn(inception_5a_double_3x3_2_out) + inception_5a_relu_double_3x3_2_out = self.inception_5a_relu_double_3x3_2(inception_5a_double_3x3_2_bn_out) + inception_5a_pool_out = self.inception_5a_pool(inception_4e_output_out) + inception_5a_pool_proj_out = self.inception_5a_pool_proj(inception_5a_pool_out) + inception_5a_pool_proj_bn_out = self.inception_5a_pool_proj_bn(inception_5a_pool_proj_out) + inception_5a_relu_pool_proj_out = self.inception_5a_relu_pool_proj(inception_5a_pool_proj_bn_out) + inception_5a_output_out = torch.cat([inception_5a_1x1_bn_out,inception_5a_3x3_bn_out,inception_5a_double_3x3_2_bn_out,inception_5a_pool_proj_bn_out], 1) + inception_5b_1x1_out = self.inception_5b_1x1(inception_5a_output_out) + inception_5b_1x1_bn_out = self.inception_5b_1x1_bn(inception_5b_1x1_out) + inception_5b_relu_1x1_out = self.inception_5b_relu_1x1(inception_5b_1x1_bn_out) + inception_5b_3x3_reduce_out = self.inception_5b_3x3_reduce(inception_5a_output_out) + inception_5b_3x3_reduce_bn_out = self.inception_5b_3x3_reduce_bn(inception_5b_3x3_reduce_out) + inception_5b_relu_3x3_reduce_out = self.inception_5b_relu_3x3_reduce(inception_5b_3x3_reduce_bn_out) + inception_5b_3x3_out = self.inception_5b_3x3(inception_5b_3x3_reduce_bn_out) + inception_5b_3x3_bn_out = self.inception_5b_3x3_bn(inception_5b_3x3_out) + inception_5b_relu_3x3_out = self.inception_5b_relu_3x3(inception_5b_3x3_bn_out) + inception_5b_double_3x3_reduce_out = self.inception_5b_double_3x3_reduce(inception_5a_output_out) + inception_5b_double_3x3_reduce_bn_out = self.inception_5b_double_3x3_reduce_bn(inception_5b_double_3x3_reduce_out) + inception_5b_relu_double_3x3_reduce_out = self.inception_5b_relu_double_3x3_reduce(inception_5b_double_3x3_reduce_bn_out) + inception_5b_double_3x3_1_out = self.inception_5b_double_3x3_1(inception_5b_double_3x3_reduce_bn_out) + inception_5b_double_3x3_1_bn_out = self.inception_5b_double_3x3_1_bn(inception_5b_double_3x3_1_out) + inception_5b_relu_double_3x3_1_out = self.inception_5b_relu_double_3x3_1(inception_5b_double_3x3_1_bn_out) + inception_5b_double_3x3_2_out = self.inception_5b_double_3x3_2(inception_5b_double_3x3_1_bn_out) + inception_5b_double_3x3_2_bn_out = self.inception_5b_double_3x3_2_bn(inception_5b_double_3x3_2_out) + inception_5b_relu_double_3x3_2_out = self.inception_5b_relu_double_3x3_2(inception_5b_double_3x3_2_bn_out) + inception_5b_pool_out = self.inception_5b_pool(inception_5a_output_out) + inception_5b_pool_proj_out = self.inception_5b_pool_proj(inception_5b_pool_out) + inception_5b_pool_proj_bn_out = self.inception_5b_pool_proj_bn(inception_5b_pool_proj_out) + inception_5b_relu_pool_proj_out = self.inception_5b_relu_pool_proj(inception_5b_pool_proj_bn_out) + inception_5b_output_out = torch.cat([inception_5b_1x1_bn_out,inception_5b_3x3_bn_out,inception_5b_double_3x3_2_bn_out,inception_5b_pool_proj_bn_out], 1) + return inception_5b_output_out + + def l2_norm(self,input): + input_size = input.size() + buffer = torch.pow(input, 2) + normp = torch.sum(buffer, 1).add_(1e-12) + norm = torch.sqrt(normp) + _output = torch.div(input, norm.view(-1, 1).expand_as(input)) + output = _output.view(input_size) + return output + + def forward(self, input): + x = self.features(input) + avg_x = self.gap(x) + max_x = self.gmp(x) + + x = avg_x + max_x + x = x.view(x.size(0), -1) + x = self.embedding(x) + + if self.is_norm: + x = self.l2_norm(x) + return x \ No newline at end of file diff --git a/code/net/googlenet.py b/code/net/googlenet.py new file mode 100644 index 00000000..4d9de087 --- /dev/null +++ b/code/net/googlenet.py @@ -0,0 +1,256 @@ +import torch +import torch.nn as nn +import math +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import torch.nn.init as init +import torch.utils.model_zoo as model_zoo + + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils import model_zoo + +__all__ = ['GoogLeNet', 'googlenet'] + +model_urls = { + # GoogLeNet ported from TensorFlow + 'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth', +} + +class GoogLeNet(nn.Module): + + def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, init_weights=True): + super(GoogLeNet, self).__init__() + self.aux_logits = aux_logits + self.transform_input = transform_input + + self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3) + self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.conv2 = BasicConv2d(64, 64, kernel_size=1) + self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1) + self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + + self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32) + self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64) + self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + + self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64) + self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64) + self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64) + self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64) + self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128) + self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128) + self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128) + if aux_logits: + self.aux1 = InceptionAux(512, num_classes) + self.aux2 = InceptionAux(528, num_classes) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.dropout = nn.Dropout(0.4) + self.fc = nn.Linear(1024, num_classes) + + if init_weights: + self._initialize_weights() + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0.2) + elif isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + + x = self.conv1(x) + x = self.maxpool1(x) + x = self.conv2(x) + x = self.conv3(x) + x = self.maxpool2(x) + + x = self.inception3a(x) + x = self.inception3b(x) + x = self.maxpool3(x) + x = self.inception4a(x) + if self.training and self.aux_logits: + aux1 = self.aux1(x) + + x = self.inception4b(x) + x = self.inception4c(x) + x = self.inception4d(x) + if self.training and self.aux_logits: + aux2 = self.aux2(x) + + x = self.inception4e(x) + x = self.maxpool4(x) + x = self.inception5a(x) + x = self.inception5b(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.dropout(x) + x = self.fc(x) + if self.training and self.aux_logits: + return aux1, aux2, x + return x + + +class Inception(nn.Module): + + def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj): + super(Inception, self).__init__() + + self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1) + + self.branch2 = nn.Sequential( + BasicConv2d(in_channels, ch3x3red, kernel_size=1), + BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1) + ) + + self.branch3 = nn.Sequential( + BasicConv2d(in_channels, ch5x5red, kernel_size=1), + BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1) + ) + + self.branch4 = nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True), + BasicConv2d(in_channels, pool_proj, kernel_size=1) + ) + + def forward(self, x): + branch1 = self.branch1(x) + branch2 = self.branch2(x) + branch3 = self.branch3(x) + branch4 = self.branch4(x) + + outputs = [branch1, branch2, branch3, branch4] + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes): + super(InceptionAux, self).__init__() + self.conv = BasicConv2d(in_channels, 128, kernel_size=1) + + self.fc1 = nn.Linear(2048, 1024) + self.fc2 = nn.Linear(1024, num_classes) + + def forward(self, x): + x = F.adaptive_avg_pool2d(x, (4, 4)) + x = self.conv(x) + x = x.view(x.size(0), -1) + x = F.relu(self.fc1(x), inplace=True) + x = F.dropout(x, 0.7, training=self.training) + x = self.fc2(x) + + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) + + +class googlenet(nn.Module): + def __init__(self,embedding_size, pretrained=True, is_norm=True, bn_freeze = True): + super(googlenet, self).__init__() + + self.model = GoogLeNet() + if pretrained: + self.model.load_state_dict(model_zoo.load_url(model_urls['googlenet']),strict=False) + + self.transform_input=False + self.is_norm = is_norm + self.embedding_size = embedding_size + self.num_ftrs = self.model.fc.in_features + self.model.embedding = nn.Linear(self.num_ftrs, self.embedding_size) + self.model.gap = nn.AdaptiveAvgPool2d(1) + self.model.gmp = nn.AdaptiveMaxPool2d(1) + + self._initialize_weights() + + if bn_freeze: + for m in self.model.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + m.weight.requires_grad_(False) + m.bias.requires_grad_(False) + + + def l2_norm(self,input): + input_size = input.size() + buffer = torch.pow(input, 2) + normp = torch.sum(buffer, 1).add_(1e-5) + norm = torch.sqrt(normp) + _output = torch.div(input, norm.view(-1, 1).expand_as(input)) + output = _output.view(input_size) + + return output + + def forward(self, x): + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + + x = self.model.conv1(x) + x = self.model.maxpool1(x) + x = self.model.conv2(x) + x = self.model.conv3(x) + x = self.model.maxpool2(x) + + x = self.model.inception3a(x) + x = self.model.inception3b(x) + x = self.model.maxpool3(x) + x = self.model.inception4a(x) + + x = self.model.inception4b(x) + x = self.model.inception4c(x) + x = self.model.inception4d(x) + + x = self.model.inception4e(x) + x = self.model.maxpool4(x) + x = self.model.inception5a(x) + x = self.model.inception5b(x) + + avg_x = self.model.gap(x) + max_x = self.model.gmp(x) + + x = max_x + avg_x + x = x.view(x.size(0), -1) + x = self.model.embedding(x) + + if self.is_norm: + x = self.l2_norm(x) + + self.features = x + + return self.features + + def _initialize_weights(self): + init.kaiming_normal_(self.model.embedding.weight, mode='fan_out') + init.constant_(self.model.embedding.bias, 0) \ No newline at end of file diff --git a/code/net/resnet.py b/code/net/resnet.py new file mode 100644 index 00000000..41a32de3 --- /dev/null +++ b/code/net/resnet.py @@ -0,0 +1,254 @@ +import torch +import torch.nn as nn +import math +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import torch.nn.init as init +from torchvision.models import resnet18 +from torchvision.models import resnet34 +from torchvision.models import resnet50 +from torchvision.models import resnet101 +import torch.utils.model_zoo as model_zoo + +class Resnet18(nn.Module): + def __init__(self,embedding_size, pretrained=True, is_norm=True, bn_freeze = True): + super(Resnet18, self).__init__() + + self.model = resnet18(pretrained) + self.is_norm = is_norm + self.embedding_size = embedding_size + self.num_ftrs = self.model.fc.in_features + self.model.gap = nn.AdaptiveAvgPool2d(1) + self.model.gmp = nn.AdaptiveMaxPool2d(1) + + self.model.embedding = nn.Linear(self.num_ftrs, self.embedding_size) + self._initialize_weights() + + if bn_freeze: + for m in self.model.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + m.weight.requires_grad_(False) + m.bias.requires_grad_(False) + + def l2_norm(self,input): + input_size = input.size() + buffer = torch.pow(input, 2) + + normp = torch.sum(buffer, 1).add_(1e-12) + norm = torch.sqrt(normp) + + _output = torch.div(input, norm.view(-1, 1).expand_as(input)) + + output = _output.view(input_size) + + return output + + def forward(self, x): + x = self.model.conv1(x) + x = self.model.bn1(x) + x = self.model.relu(x) + x = self.model.maxpool(x) + x = self.model.layer1(x) + x = self.model.layer2(x) + x = self.model.layer3(x) + x = self.model.layer4(x) + + avg_x = self.model.gap(x) + max_x = self.model.gmp(x) + + x = max_x + avg_x + + x = x.view(x.size(0), -1) + x = self.model.embedding(x) + + if self.is_norm: + x = self.l2_norm(x) + + return x + + def _initialize_weights(self): + init.kaiming_normal_(self.model.embedding.weight, mode='fan_out') + init.constant_(self.model.embedding.bias, 0) + +class Resnet34(nn.Module): + def __init__(self,embedding_size, pretrained=True, is_norm=True, bn_freeze = True): + super(Resnet34, self).__init__() + + self.model = resnet34(pretrained) + self.is_norm = is_norm + self.embedding_size = embedding_size + self.num_ftrs = self.model.fc.in_features + self.model.gap = nn.AdaptiveAvgPool2d(1) + self.model.gmp = nn.AdaptiveMaxPool2d(1) + + self.model.embedding = nn.Linear(self.num_ftrs, self.embedding_size) + self._initialize_weights() + + if bn_freeze: + for m in self.model.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + m.weight.requires_grad_(False) + m.bias.requires_grad_(False) + + def l2_norm(self,input): + input_size = input.size() + buffer = torch.pow(input, 2) + + normp = torch.sum(buffer, 1).add_(1e-12) + norm = torch.sqrt(normp) + + _output = torch.div(input, norm.view(-1, 1).expand_as(input)) + + output = _output.view(input_size) + + return output + + def forward(self, x): + x = self.model.conv1(x) + x = self.model.bn1(x) + x = self.model.relu(x) + x = self.model.maxpool(x) + x = self.model.layer1(x) + x = self.model.layer2(x) + x = self.model.layer3(x) + x = self.model.layer4(x) + + avg_x = self.model.gap(x) + max_x = self.model.gmp(x) + + x = avg_x + max_x + + x = x.view(x.size(0), -1) + x = self.model.embedding(x) + + if self.is_norm: + x = self.l2_norm(x) + + return x + + def _initialize_weights(self): + init.kaiming_normal_(self.model.embedding.weight, mode='fan_out') + init.constant_(self.model.embedding.bias, 0) + +class Resnet50(nn.Module): + def __init__(self,embedding_size, pretrained=True, is_norm=True, bn_freeze = True): + super(Resnet50, self).__init__() + + self.model = resnet50(pretrained) + self.is_norm = is_norm + self.embedding_size = embedding_size + self.num_ftrs = self.model.fc.in_features + self.model.gap = nn.AdaptiveAvgPool2d(1) + self.model.gmp = nn.AdaptiveMaxPool2d(1) + + self.model.embedding = nn.Linear(self.num_ftrs, self.embedding_size) + self._initialize_weights() + + if bn_freeze: + for m in self.model.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + m.weight.requires_grad_(False) + m.bias.requires_grad_(False) + + def l2_norm(self,input): + input_size = input.size() + buffer = torch.pow(input, 2) + + normp = torch.sum(buffer, 1).add_(1e-12) + norm = torch.sqrt(normp) + + _output = torch.div(input, norm.view(-1, 1).expand_as(input)) + + output = _output.view(input_size) + + return output + + def forward(self, x): + x = self.model.conv1(x) + x = self.model.bn1(x) + x = self.model.relu(x) + x = self.model.maxpool(x) + x = self.model.layer1(x) + x = self.model.layer2(x) + x = self.model.layer3(x) + x = self.model.layer4(x) + + avg_x = self.model.gap(x) + max_x = self.model.gmp(x) + + x = max_x + avg_x + x = x.view(x.size(0), -1) + x = self.model.embedding(x) + + if self.is_norm: + x = self.l2_norm(x) + + return x + + def _initialize_weights(self): + init.kaiming_normal_(self.model.embedding.weight, mode='fan_out') + init.constant_(self.model.embedding.bias, 0) + +class Resnet101(nn.Module): + def __init__(self,embedding_size, pretrained=True, is_norm=True, bn_freeze = True): + super(Resnet101, self).__init__() + + self.model = resnet101(pretrained) + self.is_norm = is_norm + self.embedding_size = embedding_size + self.num_ftrs = self.model.fc.in_features + self.model.gap = nn.AdaptiveAvgPool2d(1) + self.model.gmp = nn.AdaptiveMaxPool2d(1) + + self.model.embedding = nn.Linear(self.num_ftrs, self.embedding_size) + self._initialize_weights() + + if bn_freeze: + for m in self.model.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + m.weight.requires_grad_(False) + m.bias.requires_grad_(False) + + def l2_norm(self,input): + input_size = input.size() + buffer = torch.pow(input, 2) + + normp = torch.sum(buffer, 1).add_(1e-12) + norm = torch.sqrt(normp) + + _output = torch.div(input, norm.view(-1, 1).expand_as(input)) + + output = _output.view(input_size) + + return output + + def forward(self, x): + x = self.model.conv1(x) + x = self.model.bn1(x) + x = self.model.relu(x) + x = self.model.maxpool(x) + x = self.model.layer1(x) + x = self.model.layer2(x) + x = self.model.layer3(x) + x = self.model.layer4(x) + + avg_x = self.model.gap(x) + max_x = self.model.gmp(x) + + x = max_x + avg_x + x = x.view(x.size(0), -1) + x = self.model.embedding(x) + + if self.is_norm: + x = self.l2_norm(x) + + return x + + def _initialize_weights(self): + init.kaiming_normal_(self.model.embedding.weight, mode='fan_out') + init.constant_(self.model.embedding.bias, 0) \ No newline at end of file diff --git a/code/train.py b/code/train.py new file mode 100644 index 00000000..cafa428b --- /dev/null +++ b/code/train.py @@ -0,0 +1,355 @@ +import torch, math, time, argparse, os +import random, dataset, utils, losses, net +import numpy as np +import matplotlib.pyplot as plt + +from dataset.Inshop import Inshop_Dataset +from net.resnet import * +from net.googlenet import * +from net.bn_inception import * +from dataset import sampler +from torch.utils.data.sampler import BatchSampler +from torch.utils.data.dataloader import default_collate + +from tqdm import * +import wandb + +seed = 1 +random.seed(seed) +np.random.seed(seed) +torch.manual_seed(seed) +torch.cuda.manual_seed_all(seed) # set random seed for all gpus + +parser = argparse.ArgumentParser(description= + 'Official implementation of `Proxy Anchor Loss for Deep Metric Learning`' + + 'Our code is modified from `https://github.com/dichotomies/proxy-nca`' +) +# export directory, training and val datasets, test datasets +parser.add_argument('--LOG_DIR', + default='../logs', + help = 'Path to log folder' +) +parser.add_argument('--dataset', + default='cub', + help = 'Training dataset, e.g. cub, cars, SOP, Inshop' +) +parser.add_argument('--embedding-size', default = 512, type = int, + dest = 'sz_embedding', + help = 'Size of embedding that is appended to backbone model.' +) +parser.add_argument('--batch-size', default = 150, type = int, + dest = 'sz_batch', + help = 'Number of samples per batch.' +) +parser.add_argument('--epochs', default = 60, type = int, + dest = 'nb_epochs', + help = 'Number of training epochs.' +) +parser.add_argument('--gpu-id', default = 0, type = int, + help = 'ID of GPU that is used for training.' +) +parser.add_argument('--workers', default = 4, type = int, + dest = 'nb_workers', + help = 'Number of workers for dataloader.' +) +parser.add_argument('--model', default = 'bn_inception', + help = 'Model for training' +) +parser.add_argument('--loss', default = 'Proxy_Anchor', + help = 'Criterion for training' +) +parser.add_argument('--optimizer', default = 'adamw', + help = 'Optimizer setting' +) +parser.add_argument('--lr', default = 1e-4, type =float, + help = 'Learning rate setting' +) +parser.add_argument('--weight-decay', default = 1e-4, type =float, + help = 'Weight decay setting' +) +parser.add_argument('--lr-decay-step', default = 10, type =int, + help = 'Learning decay step setting' +) +parser.add_argument('--lr-decay-gamma', default = 0.5, type =float, + help = 'Learning decay gamma setting' +) +parser.add_argument('--alpha', default = 32, type = float, + help = 'Scaling Parameter setting' +) +parser.add_argument('--mrg', default = 0.1, type = float, + help = 'Margin parameter setting' +) +parser.add_argument('--IPC', type = int, + help = 'Balanced sampling, images per class' +) +parser.add_argument('--warm', default = 1, type = int, + help = 'Warmup training epochs' +) +parser.add_argument('--bn-freeze', default = 1, type = int, + help = 'Batch normalization parameter freeze' +) +parser.add_argument('--l2-norm', default = 1, type = int, + help = 'L2 normlization' +) +parser.add_argument('--remark', default = '', + help = 'Any reamrk' +) + +args = parser.parse_args() + +if args.gpu_id != -1: + torch.cuda.set_device(args.gpu_id) + +# Directory for Log +LOG_DIR = args.LOG_DIR + '/logs_{}/{}_{}_embedding{}_alpha{}_mrg{}_{}_lr{}_batch{}{}'.format(args.dataset, args.model, args.loss, args.sz_embedding, args.alpha, + args.mrg, args.optimizer, args.lr, args.sz_batch, args.remark) +# Wandb Initialization +wandb.init(project=args.dataset + '_ProxyAnchor', notes=LOG_DIR) +wandb.config.update(args) + +os.chdir('../data/') +data_root = os.getcwd() +# Dataset Loader and Sampler +if args.dataset != 'Inshop': + trn_dataset = dataset.load( + name = args.dataset, + root = data_root, + mode = 'train', + transform = dataset.utils.make_transform( + is_train = True, + is_inception = (args.model == 'bn_inception') + )) +else: + trn_dataset = Inshop_Dataset( + root = data_root, + mode = 'train', + transform = dataset.utils.make_transform( + is_train = True, + is_inception = (args.model == 'bn_inception') + )) + +if args.IPC: + balanced_sampler = sampler.BalancedSampler(trn_dataset, batch_size=args.sz_batch, images_per_class = args.IPC) + batch_sampler = BatchSampler(balanced_sampler, batch_size = args.sz_batch, drop_last = True) + dl_tr = torch.utils.data.DataLoader( + trn_dataset, + num_workers = args.nb_workers, + pin_memory = True, + batch_sampler = batch_sampler + ) + print('Balanced Sampling') + +else: + dl_tr = torch.utils.data.DataLoader( + trn_dataset, + batch_size = args.sz_batch, + shuffle = True, + num_workers = args.nb_workers, + drop_last = True, + pin_memory = True + ) + print('Random Sampling') + +if args.dataset != 'Inshop': + ev_dataset = dataset.load( + name = args.dataset, + root = data_root, + mode = 'eval', + transform = dataset.utils.make_transform( + is_train = False, + is_inception = (args.model == 'bn_inception') + )) + + dl_ev = torch.utils.data.DataLoader( + ev_dataset, + batch_size = args.sz_batch, + shuffle = False, + num_workers = args.nb_workers, + pin_memory = True + ) + +else: + query_dataset = Inshop_Dataset( + root = data_root, + mode = 'query', + transform = dataset.utils.make_transform( + is_train = False, + is_inception = (args.model == 'bn_inception') + )) + + dl_query = torch.utils.data.DataLoader( + query_dataset, + batch_size = args.sz_batch, + shuffle = False, + num_workers = args.nb_workers, + pin_memory = True + ) + + gallery_dataset = Inshop_Dataset( + root = data_root, + mode = 'gallery', + transform = dataset.utils.make_transform( + is_train = False, + is_inception = (args.model == 'bn_inception') + )) + + dl_gallery = torch.utils.data.DataLoader( + gallery_dataset, + batch_size = args.sz_batch, + shuffle = False, + num_workers = args.nb_workers, + pin_memory = True + ) + +nb_classes = trn_dataset.nb_classes() + +# Backbone Model +if args.model.find('googlenet')+1: + model = googlenet(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = args.bn_freeze) +elif args.model.find('bn_inception')+1: + model = bn_inception(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = args.bn_freeze) +elif args.model.find('resnet18')+1: + model = Resnet18(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = args.bn_freeze) +elif args.model.find('resnet50')+1: + model = Resnet50(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = args.bn_freeze) +elif args.model.find('resnet101')+1: + model = Resnet101(embedding_size=args.sz_embedding, pretrained=True, is_norm=args.l2_norm, bn_freeze = args.bn_freeze) +model = model.cuda() + +if args.gpu_id == -1: + model = nn.DataParallel(model) + +# DML Losses +if args.loss == 'Proxy_Anchor': + criterion = losses.Proxy_Anchor(nb_classes = nb_classes, sz_embed = args.sz_embedding, mrg = args.mrg, alpha = args.alpha).cuda() +elif args.loss == 'Proxy_NCA': + criterion = losses.Proxy_NCA().cuda() +elif args.loss == 'MS': + criterion = losses.MultiSimilarityLoss().cuda() +elif args.loss == 'Contrastive': + criterion = losses.ContrastiveLoss().cuda() +elif args.loss == 'Triplet': + criterion = losses.TripletLoss().cuda() +elif args.loss == 'NPair': + criterion = losses.NPairLoss().cuda() + +# Train Parameters +param_groups = [ + {'params': list(set(model.parameters()).difference(set(model.model.embedding.parameters()))) if args.gpu_id != -1 else + list(set(model.module.parameters()).difference(set(model.module.model.embedding.parameters())))}, + {'params': model.model.embedding.parameters() if args.gpu_id != -1 else model.module.model.embedding.parameters(), 'lr':float(args.lr) * 1}, +] +if args.loss == 'Proxy_Anchor': + param_groups.append({'params': criterion.proxies, 'lr':float(args.lr) * 100}) + +# Optimizer Setting +if args.optimizer == 'sgd': + opt = torch.optim.SGD(param_groups, lr=float(args.lr), weight_decay = args.weight_decay, momentum = 0.9, nesterov=True) +elif args.optimizer == 'adam': + opt = torch.optim.Adam(param_groups, lr=float(args.lr), weight_decay = args.weight_decay) +elif args.optimizer == 'rmsprop': + opt = torch.optim.RMSprop(param_groups, lr=float(args.lr), alpha=0.9, weight_decay = args.weight_decay, momentum = 0.9) +elif args.optimizer == 'adamw': + opt = torch.optim.AdamW(param_groups, lr=float(args.lr), weight_decay = args.weight_decay) + +scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=args.lr_decay_step, gamma = args.lr_decay_gamma) + +print("Training parameters: {}".format(vars(args))) +print("Training for {} epochs.".format(args.nb_epochs)) +losses_list = [] +best_recall=[0] +best_epoch = 0 + +for epoch in range(0, args.nb_epochs): + model.train() + bn_freeze = args.bn_freeze + if bn_freeze: + modules = model.model.modules() if args.gpu_id != -1 else model.module.model.modules() + for m in modules: + if isinstance(m, nn.BatchNorm2d): + m.eval() + + losses_per_epoch = [] + + # Warmup: Train only new params, helps stabilize learning. + if args.warm > 0: + if args.gpu_id != -1: + unfreeze_model_param = list(model.model.embedding.parameters()) + list(criterion.parameters()) + else: + unfreeze_model_param = list(model.module.model.embedding.parameters()) + list(criterion.parameters()) + + if epoch == 0: + for param in list(set(model.parameters()).difference(set(unfreeze_model_param))): + param.requires_grad = False + if epoch == args.warm: + for param in list(set(model.parameters()).difference(set(unfreeze_model_param))): + param.requires_grad = True + + pbar = tqdm(enumerate(dl_tr)) + + for batch_idx, (x, y) in pbar: + m = model(x.squeeze().cuda()) + loss = criterion(m, y.squeeze().cuda()) + + opt.zero_grad() + loss.backward() + + torch.nn.utils.clip_grad_value_(model.parameters(), 10) + if args.loss == 'Proxy_Anchor': + torch.nn.utils.clip_grad_value_(criterion.parameters(), 10) + + losses_per_epoch.append(loss.data.cpu().numpy()) + opt.step() + + pbar.set_description( + 'Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format( + epoch, batch_idx + 1, len(dl_tr), + 100. * batch_idx / len(dl_tr), + loss.item())) + + losses_list.append(np.mean(losses_per_epoch)) + wandb.log({'loss': losses_list[-1]}, step=epoch) + scheduler.step() + + if(epoch >= 0): + with torch.no_grad(): + print("**Evaluating...**") + if args.dataset == 'Inshop': + NMI = 0 + Recalls = utils.evaluate_cos_Inshop(model, dl_query, dl_gallery) + elif args.dataset != 'SOP': + Recalls = utils.evaluate_cos(model, dl_ev) + else: + Recalls = utils.evaluate_cos_SOP(model, dl_ev) + + # Logging Evaluation Score + if args.dataset == 'Inshop': + for i, K in enumerate([1,10,20,30,40,50]): + wandb.log({"R@{}".format(K): Recalls[i]}, step=epoch) + elif args.dataset != 'SOP': + for i in range(6): + wandb.log({"R@{}".format(2**i): Recalls[i]}, step=epoch) + else: + for i in range(4): + wandb.log({"R@{}".format(10**i): Recalls[i]}, step=epoch) + + # Best model save + if best_recall[0] < Recalls[0]: + best_recall = Recalls + best_epoch = epoch + if not os.path.exists('{}'.format(LOG_DIR)): + os.makedirs('{}'.format(LOG_DIR)) + torch.save({'model_state_dict':model.state_dict()}, '{}/{}_{}_best.pth'.format(LOG_DIR, args.dataset, args.model)) + with open('{}/{}_{}_best_results.txt'.format(LOG_DIR, args.dataset, args.model), 'w') as f: + f.write('Best Epoch: {}\n'.format(best_epoch)) + if args.dataset == 'Inshop': + for i, K in enumerate([1,10,20,30,40,50]): + f.write("Best Recall@{}: {:.4f}\n".format(K, best_recall[i] * 100)) + elif args.dataset != 'SOP': + for i in range(6): + f.write("Best Recall@{}: {:.4f}\n".format(2**i, best_recall[i] * 100)) + else: + for i in range(4): + f.write("Best Recall@{}: {:.4f}\n".format(10**i, best_recall[i] * 100)) + + \ No newline at end of file diff --git a/code/utils.py b/code/utils.py new file mode 100644 index 00000000..0e275f39 --- /dev/null +++ b/code/utils.py @@ -0,0 +1,166 @@ +import numpy as np +import torch +import logging +import losses +import json +from tqdm import tqdm +import torch.nn.functional as F +import math + +def l2_norm(input): + input_size = input.size() + buffer = torch.pow(input, 2) + normp = torch.sum(buffer, 1).add_(1e-12) + norm = torch.sqrt(normp) + _output = torch.div(input, norm.view(-1, 1).expand_as(input)) + output = _output.view(input_size) + + return output + +def calc_recall_at_k(T, Y, k): + """ + T : [nb_samples] (target labels) + Y : [nb_samples x k] (k predicted labels/neighbours) + """ + + s = 0 + for t,y in zip(T,Y): + if t in torch.Tensor(y).long()[:k]: + s += 1 + return s / (1. * len(T)) + + +def predict_batchwise(model, dataloader): + device = "cuda" + model_is_training = model.training + model.eval() + + ds = dataloader.dataset + A = [[] for i in range(len(ds[0]))] + with torch.no_grad(): + # extract batches (A becomes list of samples) + for batch in tqdm(dataloader): + for i, J in enumerate(batch): + # i = 0: sz_batch * images + # i = 1: sz_batch * labels + # i = 2: sz_batch * indices + if i == 0: + # move images to device of model (approximate device) + J = model(J.cuda()) + + for j in J: + A[i].append(j) + model.train() + model.train(model_is_training) # revert to previous training state + + return [torch.stack(A[i]) for i in range(len(A))] + +def proxy_init_calc(model, dataloader): + nb_classes = dataloader.dataset.nb_classes() + X, T, *_ = predict_batchwise(model, dataloader) + + proxy_mean = torch.stack([X[T==class_idx].mean(0) for class_idx in range(nb_classes)]) + + return proxy_mean + +def evaluate_cos(model, dataloader): + nb_classes = dataloader.dataset.nb_classes() + + # calculate embeddings with model and get targets + X, T = predict_batchwise(model, dataloader) + X = l2_norm(X) + + # get predictions by assigning nearest 8 neighbors with cosine + K = 32 + Y = [] + xs = [] + + cos_sim = F.linear(X, X) + Y = T[cos_sim.topk(1 + K)[1][:,1:]] + Y = Y.float().cpu() + + recall = [] + for k in [1, 2, 4, 8, 16, 32]: + r_at_k = calc_recall_at_k(T, Y, k) + recall.append(r_at_k) + print("R@{} : {:.3f}".format(k, 100 * r_at_k)) + + return recall + +def evaluate_cos_Inshop(model, query_dataloader, gallery_dataloader): + nb_classes = query_dataloader.dataset.nb_classes() + + # calculate embeddings with model and get targets + query_X, query_T = predict_batchwise(model, query_dataloader) + gallery_X, gallery_T = predict_batchwise(model, gallery_dataloader) + + query_X = l2_norm(query_X) + gallery_X = l2_norm(gallery_X) + + # get predictions by assigning nearest 8 neighbors with cosine + K = 50 + Y = [] + xs = [] + + cos_sim = F.linear(query_X, gallery_X) + + def recall_k(cos_sim, query_T, gallery_T, k): + m = len(cos_sim) + match_counter = 0 + + for i in range(m): + pos_sim = cos_sim[i][gallery_T == query_T[i]] + neg_sim = cos_sim[i][gallery_T != query_T[i]] + + thresh = torch.max(pos_sim).item() + + if torch.sum(neg_sim > thresh) < k: + match_counter += 1 + + return match_counter / m + + # calculate recall @ 1, 2, 4, 8 + recall = [] + for k in [1, 10, 20, 30, 40, 50]: + r_at_k = recall_k(cos_sim, query_T, gallery_T, k) + recall.append(r_at_k) + print("R@{} : {:.3f}".format(k, 100 * r_at_k)) + + return recall + +def evaluate_cos_SOP(model, dataloader): + nb_classes = dataloader.dataset.nb_classes() + + # calculate embeddings with model and get targets + X, T = predict_batchwise(model, dataloader) + X = l2_norm(X) + + # get predictions by assigning nearest 8 neighbors with cosine + K = 1000 + Y = [] + xs = [] + for x in X: + if len(xs)<10000: + xs.append(x) + else: + xs.append(x) + xs = torch.stack(xs,dim=0) + cos_sim = F.linear(xs,X) + y = T[cos_sim.topk(1 + K)[1][:,1:]] + Y.append(y.float().cpu()) + xs = [] + + # Last Loop + xs = torch.stack(xs,dim=0) + cos_sim = F.linear(xs,X) + y = T[cos_sim.topk(1 + K)[1][:,1:]] + Y.append(y.float().cpu()) + Y = torch.cat(Y, dim=0) + + # calculate recall @ 1, 2, 4, 8 + recall = [] + for k in [1, 10, 100, 1000]: + r_at_k = calc_recall_at_k(T, Y, k) + recall.append(r_at_k) + print("R@{} : {:.3f}".format(k, 100 * r_at_k)) + return recall diff --git a/misc/Recall_Trainingtime.jpg b/misc/Recall_Trainingtime.jpg new file mode 100644 index 0000000000000000000000000000000000000000..afd7cbf864464996886b8e0a3844d50bc6b2bd72 GIT binary patch literal 80327 zcmd422Ut@}w?B+xJ&KA5BJ~iObm;;DD!l}xlK|n6&_h6a2P?e@NGPF5FCjo^krFsc z2^}FJKxj(uNbmUJx$k-J>-&8F@44UeJ@?*kcJ|sczqM!9?6oE{vuCgQG5q5z)h$g` z4OOah=cuU8ogGv^MycLVT{;VwFI~QJMg2s7tWnOclpwl-!5Ogc!~Nd)w%Q37cO3+y?mRF zoycj!5xFt@lYECZM1Q$y?c3T_E019u)JkJqoVKmDLkU8g=PhnAX_ zN|9>m-A^ghT>^;&D=b+AQprO9&5mCKQV;n34(sHbnxu%%ic+@YjX-`hr%*I~i;y&i zze`xyJGUR;KA5lUgne1{LI8tkW97^h#PCfD)pv}7>i}r^YJ+E=12{e29!0&{5qM9# zT%mK48!eB2fd|CPz^vo&WiUz%JheVMdnY^l!_O-GHwCwKw#_7Wa2arc5??H))^eag z_TFT?tZG%&Mok?WN~igT&mt~|EuzIJ2|o~2QPc^lbNxOP?K6U1?Edm`KeLAy`oyM- zC5pPdJz6i(>#p`(MTxQrDNh2My(>T&G^9>{crI_j29!MLYdSF7Cv0j3YEBFSY9aqa}w;CRMVOH+nLVa<+m+IKIvgCQX;P4U>$I?lkS_6hi*wgeGxw`Z}5ROIyV1Cdy6ky*Myv zRP_0a-29o2{T~HiMmkSu<|6zLev7e)dcUZmY%xu}-kH8}MEFVm3@DmpZ}^K`{vQAV zHnNA)OVsqZ3X!VAj-EA=%2?CQ151U#`6NND_OQ>MAsreL@pJ3&3Gvwt%ElYIRv{XYQ)xNjn$wH9nWN|Vfv zTDRQeog&Z+)W84z|D{fl+{~wr-8awuLB+rdS)SAS0>s;rKptO^3(0>oFIu&pGRv7ke~*vHrpWr2{C1if7qc4u7%f`&gDQI1QLqKn?>>gT zCPPaJbjUnji!GWXem-Tf`=bv`2G=gET9my7EhuS(<^)P1HX zXLgMAYR5S>iPDrk(JkU0U@Y?Fq)(9yA3NmdwAe~|SEjt;R`r{4AMMAU5S%jxL(4rU zWtS^XfEbNuM3Y%5Pl5S9m{!5Jx{F-bLYtd;g{)=-gFnTLcYc?B%^I5*PB6^Uyc2=< zL9vJyWMSk@{(q?6ztnuWb>*38k#D`cqs`xH|9O>BdQL@kUQW-qf!$ucat=50_+Pna z@!!9lj%2QUN&6K(_qwowaQvs8@sJO$jpC=Qo=_#7I& z8b4MZNlYE0%XFW9+1X*R&Em|%XJ768X3QXD>7i}Kq50{KU&~?3%g*$aiM`dUVa~!+?XQ40^>jEHnSKWadJQ zXdM;6eJqpbE!`rW8z@t#vOKtSWPgbJVgpRO`y`zp~E# zw!S)eBzfNcSNL*zY^%mf(aX>+<(ppF*d7iliA7Uh!M~WjyZz?FW ze6L1LR(MKoHjf8jv375yC-`>%Jn3u9dz_$JF_xUVL$0(+d{ zNI?Zv!j@iFt>%Fc+^kG(_#yiRqP>Ky>?pmR)WZq#)?8(0`lv*S8nh~+gAuR!wy5A^ zUJx@&ROx_nQ+_!4HG@WOi!WgHuARrR*~Rr5N-EJ2gq@fo%fxleRFCZih0K~Md?dM> z2fnYt7_GQu-AWMl&Gc;hDj(ULS8BFACE`D1UzTHpRO=FqSFf*KGjcm|M5LO5EQtfP z%njvoHQf445IAC@Ec|-|i&Ch~09dMo45<9x?W5(@wgDkl0JWY2O#EwhHCBF5-6Qz`$_op&1RVRA&v zjafB-^B`9*+7?81+d*Lz;`s$1F7IhL0NylGZ|2fpX=ef*0}s4lQ5j*6e8 z(!KZB{qSoRHZMhb-irK7>^*ek4;sJb34pXx zGbN!a$@$Oz%EV^S$LceKRLe1K88^~|&NEEM{=TcCXGv{dRLfwF1!iPcrl&XEOV`<( zDV6oeG}F9nz6|#5Hd1%w&qnVhMi^auR1*oYWj9^vyWZ9O(86PHZ<>29*w2#ob-PWe z3b;XJyo)@mldI%ocTGG4(w;H5rI4cXfU-e1TX#c<5`CGb=b@Nqlc!XSrKRdn!Z#re zUjfa5wtCN;h)C%IHy?OR*&|`^C45oc!%5+L1*Xe4ylv~vAl$*N)uMO0=u;wnL6Hp1 z0)he4e43c`;hxI8%IdkgU!C~P_$|0Z^%FH?rv5$uyHfy8 z`v6`Z^As0pd_FnzUP8Q&uFlndnc?JT^Fa5vJmIym+@72xEng^ z^q4swk~%u5QP9|n&tC)Z$_^q-y|>%7#D5Nh?Z4Xc{0;Wil+&K8fBp(H%0_kvcVF^! zk&YaG{447`{p2Wm*YU!aU*Vf_>=6kg2Glp_!k3?M{rL-1K?M3TTB~Pd+2H;ee)${_ zJ|3{-lE56}y@QYMILxDyjHJU`Nc8z9dhd7DDk*4G?}mR5tj8WRv%L<1_4ug;rpn9( zOmWZb)lSNE)_bR4ZMKlV_CS9ads_w({FPClQTO9&O{qr%T@t&NrKA9J&~6;+TRv1U zEjouup&@2GattC1`z_f?-Y>mWdHi%B;ajT&J|7}ib&^Be5We~0Qv|V3&P>n`XoWrB z{OLdHeq~i;ah?cJU z@v##0YHDeWYAs!@F3DZ_R9|w{qTb7ZnSeg~ZP2H#$C<`g8_DV2*%te0hbs?*YwV6B zcz;nA71bY0BBDR29^4yjm=Ll0H{KPKVCS-tLb}Jl;*VB>_;qcxuPdhVVjd^_%G<|B zT#^)#w!>(DIAf^Hi_uPFy)g2lZjAMQ-C(cq_lM>ND1+B`RJQ^W%hr=v!C0Ys1D#V` z7ohl%gadp_u*`2Lct6`vGSBq(x?xm;-PpsudokA{L-peZ4$vA0`gm^e^mC9v?R|$l z2ij$x-f`J?%MG6Di^Ny;bhd3R%1;rJwlet;wePRXT9F=%qBD^mH5#My1NUB39hCp0 zuAJ3(QJ;3-yf&E zin+zNx?wIK^CT_Ih%3dPmgf9vL5F2ziV3G)AkC_NjXB<{xW;_k3%&8OOCzFwV^2MO zkkfFa!f(_@wIAxQBbg7%mv5jD1<46){tfx$O(C9KSAgkuS=JS^Iy8q{(t{mkqk~@i z8h?g9Yh`0MNv4wMO)OjJDtDLh{=(vD*IwLu9K(U4`!r9-KqAsRt{7b>6UZGNNtVca zdz-T5#@moUKUY~DKh;3?s-)w@OvBPT<2LIva!V8Y%sbqB(!&QAm6y0nA@J2(@wK%T zZp}^?mZ|<1{>CjGGt18~+>v5A04U37iRWy|l!pF=8NYz%fm-+it@$P|P_X5r7(zt|2N0oaFmTW`meFxd9ajTp1ECHJ6{ z9+@c~*Z>o7N=S6^O2i@Yo?tXIXQXbm4h4tAI?CMjFdV;bzy*=L({cB^4+sZA%eZ$& zx%g#%#3)HPaJg0YSZeY$ZZwWo_Asw}LP<(se^R!$;Wr?I^mQ02DN^9FuPW1}gQN+91p01ki>&9Ze&PRj27Kz?*b6xTM zLNRq6I%+bjt8hG*5up!+OO>6PFB!@6m3m@2C+M~bTh*D;q|u6h6&qinYjASkfdQ}A ziUB3Pb~`A9j38T@ny?zRl5HB_H*ZS;_E085OB1zahPO-YgaD^fMv8%#(_KvByN-dr zZf0nRIWm3V;R$zqrGU(?pwo6SQV1az@%mj6sCe$sFq46`Zbadcf|v6Ac5D(w1CB58 z+mX303ue2-k4f>>8Vj+zw3jlLuCSLf{+b~)T?D~ZSPnSd;Z@A~*vDsGSURGp^dPPbggeWr%k)Sy|k#j3Ig&`@VH$W*uMIy{+@Bv2;Vc*uCLA#EGY#O zB|!@+s~laSnI#_YLC?2VAM~~@Bu_&uI%MF77->fjN1`3zN?iC3GmLP@j_rQI;QOlv z{~H~p(cz1Ew)=KAIOOJ)8HnS}jEOy_5EM~*i#%}OtV-i-HplVwXi|L5oM+!h(WdHP zQ+3p8J~M3+ox4p=ApG$=<n7@*JTtPOdC`rlGtU&Lb zARjL;wllw9JfvsJjwUWe5HmK83F>6~#ik`2#RAIgD$ka_C_R!e6IH1JQivd0P zf@u>IlMe5CSd;u@j2KbgML2igCCUSc>Fuu-Sf`=3Lexrn+fihHB0~~SCug2Fxc*}T z8(SkWz{l?hn7|x?i3v@8S(v`0?qpix*4AYdV;bgLLd!2hA3ZXTS1nH!VP~C9x2X4? zL~V0mu;fvR8rf0nohXFdtrGpmt#+=rdwAc%;`AG*Tn7Te2^yQaN?=BCbc|Fr-N&8b z2~O*{0jYEEsHFZeg4}xkgUYFI*Iwrz>Ax?ZND$cRAL3}=2FT~XvA?sFo{n2qKI3`u zEABgdY?#rR%or@83x3J-H}Bks4H3)1FT+xxIh5q+4P%3s{`3ab>xZcqrPY|fza_l~ zrP{j5I;Aws&?hdQN8hatYqt63LSKnA$o+{RvXu@4vI8@-Ezv%T;zjvcI$ zG)~V;E{-6j(6is(C2;zseRlA_bKMc*+y_p1e75Gn=fy7}2uf^^j*iIoduq;DAmwD5 z)3z6Jdf+;Eq;F9VZ65OO02C4HsBa~Y%Y*3a6|84J=H)NP+w8=Q&qga=7;u1n9uZ*o z;iI)$0v}v6Oh*GgyN4fNeK19lJWH>4 zKS|&4@JI!-R77I-kuncVH{$QP-QQNGqIw;Aya6VW!%7SPdHc`U|GP=q z%o=aOc0%;<^*iI9AHZ?)nFd1{?`~j~m1WqYvH8>yU~jqeZuBb{xl`Py-&vni&Fb7X zLx0$rEBMoHBGI<0M02Z$MA)Pc+F&tjFc~~_r^~LC%&)yrQ)Ym?@;khL-6*km!ldYf z)pwZYql6{>^SxY-BO+#AV(8shiWxMtRx0w$EfyZWnHhva%AB&d5ev+>fO3ae|G+?NZQlDZW z9((u)>lv{kKKwzIf9@~(GyN~%`Mcsl)yXg?H$buco%l$;SV!qZF;Bp`xPQ4>@@EOA zf0gi)vYw^?r3C8l$G!64qCSqP?tDdDhU14}>us{!VyO1?Y4YWt#G_Yn@x@Q_UxQ+s zx9+f@xY{pp3L9%z5#5OIFiZOWe`$~UpE~^Wry`sGTR`lYnf^nNkD@@V8-T0*a;Apf zp;xjDe2}-GJwn;UIfN7D5Ml`8cc7!SbzUGT!ry;LuVI3cF))8K`$=0UPXNbW?!t*I zTSp=RkiFH8sOjchvvVDUf!M`F+y~MF{1pl^dWc@3e*vf7D*Uk%x`2 zW)d+g@h^1??`*w!YU?^4&d+wE^Vx`03f1={(dcmLKqSXPccc8uIFhOUP~CsUVtZ#&G^0RUDK&3_x7N}GZnS;{6|wO2_{ngZ0J7=GE~+`c&<<>@T5MwJG6KX zd*rG8LKe=x-5x|I6r-X7N+`UUE)X+njY1FjNDIW8YzAcIS4xJ5rG$+qhi6{#S4$*d z6-{EqcSXb)rfM+`vDEjxr>2vx_s`Y!!`4Q zSZU6V_~asKd^UjD7+JMSJGe5IcX7mbxADSTCz z#_&ldhs#abMM_4br%Q5a>$OZ|x^xWjPKS0P>*2hT)4j7N8R8L8CYx3ZX;8j&{xreQ zPDfPS;6I!4A3gE83lrhuew2dcOA|(hYvYgK{j;N;8OvnPP)hug1wOpDjlmd-tYjt;XVI?>IPx2gD{;^my|V7L`O z2F>03w368EnYV2FXK(u2F_Br@SE#cW0?)JGy{EQt>$+fcKp?-AQys%9i?eIH*PI!{ ze4L_=is8wUA*rwKF!byE&D{{t$!~Qo%8PY|-z6=$uJ&p68b%;a?kX+Z!m5Ym!DhZc zf*Bx8?vQ7zadSN`<8)sZ>ewH@{wajHwm_x-@PdMe4f}`3oZ|F_BmxMPqC67vI@ey! zovSplAzwA3_-#PC+w@N|z>uW!^WpvREB~iY7sR>M6?x>Lp#K;fbClTa5Ec^iJ-n4W&4IeqMMVsI68H zrjc?OD@bGkO1&S>$pn#;rD<_D$g z|BOEVuf_m51r}}zPq76Yj1ke?e!`(&e!uZkZL;?0bXVwX_tv1%gSjH3xM7low;;`%!0|)y(Z9`p{ zYaI31(jBUNoy0a817{l4sj_^U+N-Mv)xTSj9yoJi!w=s-Md{|}l5I!CU=vt2v(MVH zauZv0-xwUZiQbs__69>+Q4B#sK*iy zGs#0i@ME0gMc<3qy7P-y@$z$tjx%ICk8yj{4#0~;H_~+!E}-oLfScAFZ z9B9SmKFg6FVQN`dWVmnb8h`9FHaI%ZYt`J904nN1h3QvTS(Y3M8H)}iezKn6RHWuo zU;7*bJDCf-NC8YlWW;$2w!N{?HfBD2qTL0ThUdTs}j4W)xtT$&&}u;B{Hz@bCIz^!{gxF}JRP`a1=kY{9a-*OquuY92<97mCUe_SK=fiM~90;%RdpS zyb1ZJN%%|*cJaZSr3O;SCv5RHQB1~{tDX~G29WUTQRZ?zjK4yeqZG$3ExFENy&?@_ zLH;x88^D4Q%H7rM)`!b%3=~=~cO-Y*T67V!3AWI0s$9CF&%a;FZuw(@6J?b z#il+Qb|{LlE?EhQe5TXl&CgKpzF{IQ;f3PDs1cgXXap$o9+T{l#G2iMX7TJ+TKllg zg-Np{qBD%Of|pNDE?Q18sSIiuli*7$T|GVzi*#L)?gx78PV7;50_m$LT5YEDF;OcF z&TpQ}3iPjGmf`h1F5E6%h%U~1m2`8}@>g7U#-Nmpd-|e1(n|l(U$s=KOe4|#X{Ryn zPBE&QB`tSd$&r+zdGpnM2T5r^0KWaX#3Rk;&(oc6xH#I%Ch{n6e8HKo-$UW$ z<)0%bb+5V2od^|mJ$j~`n{(0s`o)^1plzw9YKYcjqBKI%gTEYoO0GN{IU2rf2o*~n z%a>|YjYa~&_Wdh~=s|49!NK&D2=j)gy$nxQ^jy3Kh};@;xg`GKjgqc8EF-^`3%4Ir zIcHqMOsQ)A@^DB~@iI=&{t;2sKn>Or zTO47oL#oP$y*(=2xgh|Y5}a{kxOYcX)CA+@5wk6tzV=Lp@O9|v`G$&Yro(IvVjB*Wk=X}}tHF7zpBisy;2%odj6a?mS38it(?p@~%D zkhjcnE;j~HDd#7RkwQ)<5AGJUeJKMQ!Q&N|6XklGSz_$iZJBlAt&J>*NOa{p<2c%? zgTxZ2iG*-Bw<(o^N=2ZSX5Qf*znU#C0NXR^)~Fp&&r!`&&)0(}(m$S10*!ud-S5oI z&Ald(;hz#=YhvgWUgxB1QDizUhyb^(_h4i)T61hYM4W$6gmu8ag0cX^6y@xn( z@=}vIa)WYF-*)v|npPd(m5*YcPcv25-okgIcG@whGS!5nG8L_N1V|ewP$PB5$fB;E z%^K7?>nZ^5%5kS}kA)?C0rt?*ZfW6z{Q>0MJ{#J1CF_>O5+k~rhp=Ze^wMvaciQvi zBP4en-{oYv-)OpH3{Y)}8DCS`X6V6lzO8DL0=K6jvz^nGr8~$ndK0J2;-Wr&4-1W% z;x@tPx_Hz(?e6aGoRpMzm#C<|Z&ISuIXrsQ%5s7#9;LWWhoLB(z9Y#cqo1Yy*8)8H zE9ddICv>;F-9p`! z@W?2uGk7LDbR<;pgUU2h(*Fy^&6n}MQM_HPwCWD8U;Oh4Zu2zWrVgJu0w^-jD&L8Y zDMmf^p1$~GhJP1g9bzf+-n@U-%9$exGBdMfk6wP5o=!gX4ukAY`t8NzFs;N3%D$V$ zD5g+u?x7LuUKh*OGgpz!P>W$0Eq6(*rRIBy^eoci<}9jZ0$0_ooZpd6@H6JjZ4o0v za4zDOQ`*p-x6GPzooU2`hBXH{-Pcl%`rE0>2gTN3Ak``1@PqL`7G<96S)hh1n36;u zM9nAe%E&ihhG4H-J|fX(#rUeNpmp}_x$BG@(_u|fdEA2_KG8)Zi;C5XjzvSpRdKIX z4X4u8B_pT^{QZDX@iaKXAh@zsu#X3c%#3JPM~|mka={>_vrZ-3&MqDvVhHAyzGW!$ z_YB$uOMf%2Zdie;q-^T9un6Bu_d~Euaa&c6a=gT-^R~qHzzBnKMK=lNEh~H4XVcZ9 zD7rFT4O@E>YDV9@>RIXV`sw3x?0RJrA5DBsD#0XwUnbYk&uL!7VX%MKMXLY>oO2P^ zffOHj;+jIyAn%(=>A^>kZXSV!H0aZXv537x$diJVnha#sY=?AAy^{a(x+tw_(1xFC zjnqJQg?r2id*SfRD?U00#vxuZD*epEqdAAD!|umJl$z`@k@BDIxVs$^oiRX@-X(cQ z74Hukj_{rK&^B*IPUGSXis-g==#QkeHdv6E*edOl<+#%1X4+|6Wxv}f1|2ug;H49l zV0St<%#g8Z+L?X^*JaX7UIy_+(v+|JR@XXZ>Y~?$oLy9EU$ls`szi;*5O~%Nn)UvG zbqtw^((<_UK(uB)jQ z{`47VV>=cJa}aIIcU`5&MV48p3MO)Gm)e^WqG8DD&;#)rMh1s z-ww6M>|%vE3cZz~dSO!iblqj7|J%KyTD4GIl*02rz7a8VNr05(%umHk=djy?abi zel^s$H7K*GooH7IRyJ$nNOY$OiuT)u3@QhM5r@3r);n!%LZsbhAo*1}feK|8=QUQ< ze7nRAu>mGAn|2suR@Qql*0cvoMw_ekNp5~*hN$i1L@P&eXH5OGy;R8M+~$I_?v=8C zfDqOJLbR;ICv!ojkbxBbUEhx>y`o(k=4vy~_gn`gV;zgp-Vlwct?vkDwjWfyJR1@H zv|FM2sSRGU;Tu&_s~oT4$wyw$ae673xt&cFQdTXuB)xiXb+UXLtV12E_xZsIk8ziU1}k_llg0LR%EJi5WqL${;fuT_M*ZEfUy(2QGB94hMn`J6_Fn z7Pyd(6UG)nC+MG3Cze4aQW6z!VYonmMjp z%NP@*DUcLb3!9R_oBMNv(&B3uah6%v_T@yG5Kc4ryOh<5{lRMI@ z1Jl^OMIPC^YjkIC-UIA1lZ=+d?ON&Rf;9nb1fEznnf(3b0RX^!%=chwRJ(#f!q2HN zA`^5Fb7k6SMieuNvle~0C>05W`D`KRM46pWUO%V$KYYn~T_izfH-Or;*}%EVHIC8< zys+WMw~u9ZCNE7W$F~}Q%tBB{f0!x`>Mgu&&Dswxs4}|8K~UY(67G^u@d8;>6T)RL z<$r8`q6%Lx+~?KEPAr+H!|LgQwH8i9O*9uf8GU#Hx=V(Ry>|rC_5{X0m{7W2`oZ-k z6S(!ehr24|Ejhm_d$}rJ*=p)Xn3mAs8;E&Pnrm&dt6V-gdkK7hi`T8y_q(wJM zohE2^e&0<@s?&@Y6gC>y^Z#w9L)0PDP}DbkeBLaiim9ihdrxsmk--O{0ZD5Om83a3 zl3JH7m%Q{^osYmHS-e+Sv}&nw;e^|fMoxZY+PIsr7CuJw>1t3Ltg>FbBBU7Yr6mBq zpc~n#rnsamQc_CIo{&o!sC3p0FyVD%>~S_T>_A{>sD+vr(9^ldoQZ=Ja-sb*n+j5K zm%em+`#1?Bt`SKvhKpM7QtJQ(U1tQV^%cNwOij6Y>r4RA!tlfUO2{$tXg`CNi^ynl zD=>{@NCVfCa&&qFlznki&gYP>HU?SGw!15kP<~{ar!lQQ6JDk>yR0PFr{w#GaWizO zy+TP}N)CT3eu++;tGk`*dnJ^=Ujbwu)|S7NYsC5_>SL+;JdpS$PmppVb$z?pe-*B} z$QQDgOL>%>7VJj%rBCnX+BV zybCi37_v>=9qcpV#F^}|iI}zSfDfli+K3t`o!(L*$p>4yXopxc3md~|)4@18XVB%o zIX5?%9*;7kwG}DS2Ey)Ez94<13+rI1WK{gQy$sMN-s$2uo;UQcS?4qM>zO3Kj?B(gIwOs zaRC?m8E!wajB=>_-ZgXyxEh#kr3=i*i??)pgw*PfkE!IhlZ+LeTUQJX{0PfF)^1Xy zfY^0jtB!9h&|?K+K@YH~N71df0BGJnVM4I2OeUE)j~QV&N0?2fI0Jd7Xbe(cHe^6f z2k0wYG1iV-wt1@(D8#LaI&+J02N%}5#wH`%j2Ul{eGUONa}cZscUvC6%1iW$1;t9} zm`)^2(}-j@JFZ$X+)y*NMsyZg7)$xl%Bd&TrzHf=mKWw(=-!!5XDSwH``0mP^}v>-K0PL9(G` zCr6R*RZ~ zlvrZ)nS<2ojdtvCl*w^8gI)D!`P*W~=^EhQZN81vr*5Yu!|#eh#EGc^WdfW!iyr~m zrMTNNn+0Hztm{P{UTi9s5!*X_%g>+F2yM$c+Qd2wH1uU`&P{^i#U`MB&TSSn5tH-n z@Ou{QUJdmm$1bKb|7DpmwMvQntBt7$c+0+K0MjC>#X|B;e@`?>ehuf9L$f;g)x&(C zZ9;X_&1-3R^?3|EkSz(h6ONxAH6kq`l6W@c5MKvVa$I}r`*zUMlkX5EFjEp792SNG zFgy&VY0g^r|F~B|l=8Qko$Q#Rc!I?9tgow;!Q-yS?>_P%(A_C9=J8H`BQBMBF91-f zUZi5;GVoO0O318dYWLZ_Gk5pzhw1sUFJ~64{3&~?gKl2JZ8aDKv$hK<{LEPs@?uy@ ziD_JRcgi#$DpTx`!0I+IylSy*PXU$mAQ-}}`kP@3p3T3x9ClGO;~c7ntA^3BtN;O; z3hPEK7YC`wVWbsaiuCvg9}WGgGI(xyjGGsK7G{IUoQ-7tAv1n31sbS$g?nG6ZDNs|E0^k57+&8Yp}}7l>h2_ISgTR_TC& zPd}hY#FTl-pQ`yTmGiH}K*)U0d34#9CY~X4=KAZ;zN8d3i~3Xt3@mQC05J+Qbi6Ho z&uceG;nr)xAHsBafVkBBnfC+QWFEay6Vr?^RNB+-VHNB9ZPF7;Pl_scY(U6vbW_&J zo54>p*s83W0A%{~r_dgoO*fz`Ggrg}PVr+R23_F_6Vi)Y3l@@sCspU~=DEFa=_Or|_snD^3?-0I-L3}FuFfmy)YnxIfYkdmGXbUFM>2LT-`>32zxdRWw$yD~ zCUq0RWnVa+zE#5zF<(N+O{^}bC_Hb3_a+QQ0BdF?@0_IJ^dFry#{c^rMSpz``&ZZK z2V%Pc39Qb+IpMvc3b&Ik%60EMY(TI_vu#M*K$KYb?6`HD$$rsbs=%!=$oX8e0PkwH zopmFAhrNzgBK#)gO2q5bI%aj`6|Q^w00k@E2Gr#%f8TB4fKaVIWn-hB37 zpMQZmh&2S(JMVcHMWskbM^5^TJh-Aqb#D3*t?spNiD~)r_5A0qh7cbfKZ@@-&yPP% zl%kWWq7cQpd$&HS^@I|1R%g&mWrfP!KBA4?0VKhYwCiv^TDEj`?VtY7N>sNlef^U* z!Di$82*OuTlFRbRjU2sDUGS(-O?Z{DLdC@U>NzOK@1@GXy(LLBrR=WW(ge7@XtYBB zz8{jQ6d{5P2mml!eF~_d+}f2KgxHL=2+zwi?=r1`34#Q4k{#e|J=y?7@gEhIA(}Y0 zxQHq^$H-q$vC9aGHJ>Y3#CS7!Ca~X zv@Fkz$FN4E(=ramj&P%`>`n4!!fy*YW-Wfa<2v$P1r=GM%8jUBu({7CE{!MnNGsAOOtZk zXtJTo+Xuz3%|2JvmH6h=hvCm&)FTZNScb~fk;oI;_s&c4syTP_DsHr^&5EL7(6w1h zjT!jJxUIYnWAa?VBNx$y#aH8L557RNtdqYY&UWfL6E z9@;D0L5e%NI1wFn`oB>>A8xA#CxWn-_&P%m3%ofR%U;=^t$9bef;N#Kt3eJy z4oDqNZT$~vIDiJPAL_0+dp~ZULE@3zr377VSM`)FWJ;x_&_PDp`%y$)l&OQ!+w}lf z$x?5}EVp4Sh15XwEdb|4f&s9>_`*m^SV0TyY^nVg#Q{{MD=12SG>luUSk*u`VONJq z4^rhF!Jt){vYE0L<%g70Iwp))(xbl0*on7k;1ohAH)osKwEwv=qrbX_f8Uz*plqh- zOSny?xanlc@t3Y?7pw)us~z8-KbAB*=gun9eff)m6u*Y z&HdmL*!yLLdAznGVZHsGM6(C=wgtT^t4}ZUmVV#PQ%^>V9U6rQyxMIT>POjP;0!xr zveWl0*;&I_;uUn-PwuPePXX^{a~HA=t~+ph;8ME}s?vcnQ;#x0Y+S8${d`Zl)=h3R zA_6s6I%tqv!iG!UAG#NXI+{?l#bpW;KzBWOmre$Fe@7LI#yw0gc8ps>s95Mtmrv$<-m{ zyHH`n0N|0C#SO07>akJJy*RolG*I?bSExuN$IBgOk;3eM%iy9b#YOx}qi=_4u9|U> zfr>S@>YjsFaNfe?lbg{k)?50u<<-421!ie3=A=}2?R|+flSsD~uKNzx2hl|kb#`XG zAo1}LET8-1F`Hg!RkwHRqE&SDP3!{{1+5<$P%dWZg@S(LA&x%S z^sE~HTG3&##%n?HmQ*IATIL;a9DVH7vxUHFhHo5Mwp?Bv_4sT{_7DOrXKwjVazUOK zXN_5(gv)>px2wL4oip~TutUV43%8Wv%I`%EB}UBg;E)R%tZR~(Ra*PlhX{$1Zf(LV zC>H?d!*vR-!brd zXHHp;ry*dBR(!`a%w%96w5E7v#Ji_?!_Rov`l3aG8*#3Taiv*W0F_ueinAFj@I=h% zCcDcxblCPhJ$s+Q(l0lB=vmbV;JuSVs>|DqTl?R(mpGZ{jW+a8VWD-JWO-1RbKedt zi_!78RWPd;!PLnI!ye!HY%`gb&T|CgotsK{zI7j&W#-tX8l&R&ds=n$*BbTZYx(z6 zSe#%6){X#5D@q6CaY30EI;z19p@Zma9e_mmF?Ye2&Sa>iUtxghjw4>@4q*> zYc7$F+1fE{&Yu6&KmdK$wDGKD;6l(Gz$vpj&5FRK#Ht}BH)SV`ilH}@pYD&7@KR0nG$C8#FoEYV+$3Ya9?_KB*Ssj_d&!+B1BLGL?-nWP0!{9brU?GJ zLIUJTuAwwPb9eiV6kXhtKjS5hM-v<04`(K6Lo-d1)oG=GafHHwU3Xwe3fgymE<7w# zO_8UbJHK9aR7=Y?s$uN4X>sIvxgbjt_<2kFv^4^lN>Ww|y7+lLD_wiC4N(>NJK2#_ z+Pm_LlON6d%#@{168s}MYJkH#2au`vV~6NQ|COwL(|UG%O>I z)*(Hc5vA;z;RUb0_oYPM>O_l;#U|98;H_&u#n-!BL8m4(8Tr4TddfM@ z$|$Xyt1p~HyZInD?N|d)EW+(CC1j$pva?}aQ${(EOj)t@a;Fdx}JN(cx4{J40ewBmOnVQU55sUdpJ+T%gsjV zX|7Bp7YI&_qN}Qd*=ihX)YR(G)(#a@5(-M38oOz0E*=w7W$@_r_^Oy-`FQABd_@_}2lN77U~X4B4+T7{TB7ZTVq5TMEYffJaI3q)F=w7X1K z=^5EmH3r2vlYz3}V(p+1!H&Ka%DS)NiwCaja|!*jdUnA{kkBdKso|-iAgMP|GmRyV zlQvf*@7cO-XI>ZpUU>!MwIRa>c~cE>JJoMo5U?J;zc)&@Fq1KYz)OQ}6n^ zAcAV-y4c@M;OF(fxRdVu?)&w-!X-Y3o{hy} zam|w;J!$tg0+$`x}y)wZF~K5g>lf)*%<#8?)ZJkz~a&97P3r2$%o)Tklqac z!_M6m`ay%fcwWW}4!nEl8l0grH`2b18}OYFB~&cz=!Me5@`(-*qJuyJ3#Ge0bxuW< z+g|F##uboC$<|Xt)7z~W-)tj@)rk|qmlAhXix;xgaL4($hG^G zw1Tuqi=hC#UM^4}UMR+Q(>vq;V(%;C+G^H))6!CbLMg?qMT-{+6z^MHgC#h$I0Px~ zQo$RvxD|&4LLmf-x5eE;aAn7;^uS6*lHdSec#ILoG4@1a#c#fF6orz0ZvMxB`)79E zc(ZUu4)}T~K3TndXmQ)_2A|NTmO)YJ%Q$^q5g$fm`@y=V{`dME&$o~srQRX-_+if{ z1@;z|y-o`j$OGDP1AC4*4!+RvC7_RXA`y*8Ob?xqY%0*FdLg40O}Qr;29r@N-l)>C z4o~>dtq@w0VKlOTG+JFr?W11M0Nqa*tOItin8$VMb!nfxB@ndC@~s<*g){OxEXEIO zPHfFjk3&c7>n1j7Lfo=tV$V+6ys6(-fQBkL5>H}{Kz;DToYlx>WR%*AUk1v8e~_8Pr5y2*j871)zIZ}m?*&JvX|s=f=$XA$hVD+w-Mc9oA>l+9a>u>Q2ZElZuf7(B&eqp{Nx751C;YNO#G}J%0vl=IpHea+epk6v- z!aTBa?=}Af^T-0Bl0G#|<*SihcnP#x#mYH#@ZR*~V8T#0p4 zT1Rv_=`Ll1@apP0ywGVDwD*_%bD?dgqZfN89fK?+`lS_x60$ohQDvT1@4 zdL0^PzN&?_+1C{u_iR#<=a~TY^fnmGR|)nFtYU#M8Cyn1hU^|wP8#{Qp3feilvClY z{7o4d@DC>NSMtn(4U!3Z6^1R&IWe^J>#|yIW&<1Yj0YSJZxh=D!~5W(<`F$P{r42w z$B`~EBx1OeGd@?|t(;gnJ+qX)w8Hw}oMYK${)zANQsze0eZO%(o*sJSm=Yg4#p|PO z_N}?YpUQyI9z{ZdZm~>L=w^f$-z^fN6BaZ`9VW7u=NfU810o+-aS6vZoBMyElSKCR zfuWKfcALZdMV5lc->N@Mz`>J=dy^}$^4;&xX2;B8OyTp9dFqj`za=U zn9fLqCsjG0=%IuGt(}ZqS<$L$r1rQ~4<zgNe8w+$zMRAQqAUsLo1db#0ESOi*B8IrbNq0$CMygZK)7|sF-jun69 z>8gv8k=c~?>9V1;munl*HiwzAPKVI;GJH5M3JJBWLP6)la?j7T7P~}!o+B?whDiel zM;^9U8mP#a`(^4UIJ358eWNB!kzko)YlIf9?#UU(@O}SCw3_{2MZmdAAW*pTDJZPc zxVEwE?g2Wna8jjeE{2X;4j!&JH8GZ{CojTTpEF21X4#y;)g_kbQNOy;g-j?$=qP`& zR20!f*S)pMGYnckK0!pS=DcBsjLZy}Kt`wvOSyKHyI?vFp#O^SO*qPAjgSg0AZ= zAFl|z@+YdkNawT@%@MI~yq`B?o(b$-qV;0c$KY6lBqS0yZw^MmN^Q~Y;SuBE&@n;_ zQ9<*?hfuODS@(8JZaz{%6`5}3=Qunlx65z_m4CBa1N`4XLj9Ttf5*VRvLrC@&L^>~ zsJeBmqx!t{nF%(%#YiXU#eU|O9;xCTf>X0%QRNVDLPJ7+c)EiQM=ETB5TR+4QOiUL z2SrMRi<{6idbN56)w@w#I@tJ@R{QH2*CM9j`7`pMd1Sc=W0**Rn1x1hq}}^mFX=Eu zSf!Gtc$TZys|O{7QiaXZ%;Vw@xjdf|mfyh;itLg-k1tac&_8l#%(OQy)g(*tA}Izb z>bvPO3aCsAwQQZIwu*sxK@9lm@mRNes8q$(6T0C>r-E5N@;#y9Egfg!sV%LRwS#~J z&)`&0L2{agq*Skwir~4_^ z_h<>rzp5H<$D5sY#1ub5-l-Rw2i5P3t?1c+>WfckduzI{gl%-)%U!O^7ar;WsyGN; zs<}guBVwgTg4k*(pI0Wky&WVhS3ZsBNd*zNQ_M(=aJ(p1W{cmchT@JAM5$Phe){rj z+NvX;m{;e#%}Sl)!kqz(ZFg&KG4Wu*&{3Ao~?@0AO!)m%_>|h>3WhbYn$INY*%bAA0+YFqbd5uhyGrQVBbcIP*bD8R0piF{FdOYU!F+KYKS;m zqqlU=(n^fsF0Q*%aUVzb#e6ZaSGYX^$#_K1x3g!m{|rIv77jt{@+;|cuO3rM%F_5{ z9Cr*f>24YydORH|yPpj!us<~OoUtEwC>S2zUg>TPV+m60lUBV!R_aVL5i>%ONIKvh z*Pqv|?^!95ypwrdJS7aN5V{?FK%3-%X8NFDodbPKG;v~zjx_-lJZQdCP zRx~$C!AvPR@}<`%rl!Ei7-uY7Gvd13Qw_XdwB@yJS?rwhE<4GLI~#m0X}X?I+3i|n zAJ(gq+3DA`g0EGB>fZMT`ZH30`*wff`$#6OQ%~HIz;NmsoA1Dmr|_GV)vP|8RXc8@ z^dI0B;!(Gpg5!%!-Q^oCR~cEYtW7N$>Dd!oL>kF4N(}?2A)Jv3+L2;>q9AFF*2#o@ zC`Bm(JN${`^GSXTB~Gsd4)HjSBcR_eGDs{x{FF#UzqcGE-ef%_SDN!YEww_C&COw$N$S*DJokMZ+9*I6-T@Z}|gok2Oi%LSR&CoO}x@9Y#T_fw9UF&78>7ld2`(Uvk=U%Z=Xq3!8d+|cJMTw;9 zTBLpW$lg@N_7A|#(|G1;aoaJL^%*p^ZVT;Ughd7NdGQF@^J|K*%3lNNXY?<#=3j8$ zP$IKJJ2b!iLO`~wbi|~Oo15UL`N(r7!8=e+LiPD|)Jk?xXv?JsT3JkS&0Yji)X+=6 zs@y)t64uck3pBynzIx(i!4GtztQ{lZn15YNoMK_?P}n~7zV-nb+eOwBm;Iy0Q=GB0 zwHob(#MWinom$zQ8){H?BQwa;CVQWIn-iS+S>M`o<$5kb~%lp zonFys;hM468S!B|L3ThL*JeQ5#^Um|?K#G9Uc<RE|?ALeVn5RuViVAvZ4{DU>VNLZbAgzhQk~0+JMwa{J021XTsmg8Xm$3DEPSSc>@ieG$S}PA8 z|Kx!E5&m_UPm4T~i@fH0H9oTH0cnzK-I=9aee*Km*Vo>B?~QDFk=-gETp(~yA}`ij z_o%xeQ9^rB3`iPGYn$E4{Ov4X7xH|Exk{@lk4sWnTl1V6=R zS+gtBx0$Y2aaeeasmM-H;UlF{ zVxTH;(ruZ(Ng98;nr=Rps_!V)U8dh+X8laMN)@#JP{{T#8N~0y@e4-M?9%*qgwGFu z4$r!g}{q+(5{}=3H=Sdz#EU?1^14J ztNNP02gIzu3OJy@q4N_LbR?_fy^lw@s~jBJ?!zrU!u2DeXOP~Ypu5sSt~r7F=#qLel}QoJ#&fW-|K+#y%-dGzv-lpSmo+rgVoRXl@Sc2 zA#-`H&Qvs3Xc`n#H~8dp9VYK3*cddvuQEHBr|P>qq^>Vgp^EJS@v?S%<|idn(-*dn z&9q}XCB#h1*z+G(<&-RF)Kiuh=oIcgVXc`SobQ-ZAzL{JQQDcZhMI^e+bN0ESM9zf zkQbS~d$0-JinbD$mj4+&#YwE11%E|1{>zy+*MofDYE*Ty%Cm6`fr@)9#Jl2V#~dcE zto*F(z|W|k{o+a(Ay$-LYgC#muMFxn@E~`T& zB~yGIOZEoBv|Rl!370a%B1GH*ltV~qh@sv;{)qhw~OaWKDD>$<)5 zo|VrM8Y-#GiI=-+N_k+{t;ewUKD67KAR$i=-XQ2xGa00`D%8^>xjMDKW5)*)YI-xI z$E7M0ERuvnpXq4HNd^4?c-r1Ip2SuZ z-aqZpe?{h?Kyu?CZBVkvrhM;dV9c)gNSMv(FLDw;=*1aK{)__uBCqxTZsw~wZj~n8 zO{2FH4k#(YO;y2(Dz~e@GtY4=4-Qrwo=^P!R!F08b9grMNg z>b@R(FUvH<)>Uw*;)2sGt7Jp^Yy#0{hndNX(2L?35;7d;{M%XqDMKz>mqJf|$0!(7 z_Qw4IMa<4G@Fcq#9C(t8BiZ7@s$7{E&Fy=%yqEb%?~zqmj8=h>&I~USDkXd@)w1^` z>Uga5Y287UM1W%FT-N@)A(kEyP-E0{_yeFMDW7<0=BQL~CD15_!i_WS5(z1eFFH+s z*fN(UzeC5|<3D)T0&)H1Gnx75>9@zE3v5eEQx9dQIvaBhA2+v;Vu3Yxp7(=y?BI&l6F<@d$z1n}@gRHO}A8Zw{tY*Pg@ zP`&0X+Xd4faj)(g4T{iD$_B3)CJPxQb(4z*JzC2p3}6u*)}U}>hU9^*9zKtXo9d8G zwo5?gd$_JCVHt56n63`DC)291a>yTmY&+LgodUcEL7}+z$oytkRd32Y-@>O^nOy8y zekJ=UuC{lh;rp1i#%0pyxSPKVOfCbvj}8y30ntc|{tnL%z=x23z*@S-S%YdQ{QU0# z*d;p%{|RQ7#!hB1)-9L^-EAtI_h_r93Y7u#(Y@{jn-jjfBbXe~VI2W+<`a&N<4~q< zT=OA`s1|NgyJqQ$es_+LccF#dk>*Dc##m+~h<}shxJwfh;pR&yL zrg;h+Vsqm_(tRPD6cljYtuBQvz=$#68ql6MPKqf=Kmp9G0M-@oL7_MRof|C%|pY2 zVMa8;lFfTk$fce)36Z~6bT(tWVeag15;C;^QGOGScY{I1f%T~V?h2(rg`J~|I?8KC zQt?z$O2WlFGpE0)eJVU&(aL)r_Ka%`8bPFzu$^Z)bi`cjqK`g^2i|u_jX9vfI<;=6 zDTe#2Et+*NTc=Vm;*FMN6D(-`uH)%F63){#Su^@ayTR%bE`I^t{{tvKSmtUq)*C&# zzbon>o7}OJewyd_Zs_FQdStwa0mo)}#s=9PSI@Bjp+UxtqUip^&2D5sLWm7F}Slg-HV z9RKO>!{+}Xx_rSRZWf(=+*G+&TOFQ-7M;%2b;^JkdoVeJu<@OS9yIf(cr+A)98XGe z&I0NW8YV$(_}?oo>;r-{yfs|j`_7aT<;~#he;6)nO)pWKJC-ikJwwR zJOAt&9r!yT0Dt3U!w%K)=UnkLhV`?#1aH0@+iP5`D73_khwq2ar>|9I6^$AXeJ|w& zVq6xgq_~ZfR9Maz!6Ju^F%7%*YOhN)sr$<*lVCVCFd4yNeOA+$?|1gm>G^weyw9}_ z{w(YtWtV^E|I55y{CeRZA`aY?@G_rQ*g2e;BKQI52ffYvLbKFDUxj$bJT)j)Z=5#+ z*GV3#@uojkIwh*k1aF*g=rRE8wDmjZ(5c95;T$6zP4q4kxTxe6GfxAHFMhL z#&^5#%DYtVSUr|0-4W+p=L~6>%G#64D0zCa45(;|?&kg&KhhFiVhbcSml-1-TgNfV z8EN}*W*&b8d0SrLHyeZzpX=|*EaV~GMB>~;DikIcJ z6qX0oS|zG(dW<4a$ha-YK#%?Y-If}88iPh>JMa5dQ|^D(1b6*sY8bfe{~gY$Fg8~b zk(%_`oMGx-5>5|Kwt>Oe&p)=Ix`?5rgUp~;|5alN2iuP3%7}8i0|#-=tDCM2HvFfc z@3*Hs?E`j|ltcP|04nM)y19p$OJN23dAs2OK8JbgwrgE3kD~cJ>;lLnEMNsY-C0p# zY3UgyQ1@Q{!@zbq_23H+%@4J2idVQ)Q zj@rAA`<^Fn4-{AH6M|H5I>4GEBfGwzVos?D?mP*ox2ijT2s&t>V?cCp*MmmBt0-H+u3u@ZK0*C}Chms?Cj?r889PjH%K*X)FR zBG>VJ7f*1*OnQkQ2j0-5zgn=D;`sqKsC0-98(&xl8Y!lgl3#5J-$S2Q%maIjTQpT% zQ;i!^b2>ar?AOX;+g@y0kPOPzW{X*gOsyJGd&q0FT8?TT_zl|`erj8<26VVHLor3 z_dV_3o@B&UtRyHV02NMX9Ik!%TmH|~zt@+0L?ppe*&?qj5!5qPD*H}>`?qW1&1S=3 zgGcu*X_#8OH-J?U?MvKWx*(jh>0pv}WiN)W6pb1`0KG4XnU0?wtxBEr(#vlLZD9iT z((sR(L?_KHtD1TIT#TsGegLHU+7T49CS{lhzv}Ua5;3lyOFi?1g$u|sOCQpkpsEJO z*B87#Z4pI{3@cbKdu_-^Ssfh-r`uh+Xn$0?pRZx}(F{vH1&V)z-*%2lnqM}X4_rJaJeQM z5zbQ%eDl?ae|tQ;a@;*M3MDqFX=xz~pe} zl1IAi^RIb^vx)n4RQJan)#K$U@+PRBNuFS$gva!jeb|j1`f)O(iR6oZq^A>w2z=!T zh}5*wY)r$DkJPBXTlVYD^r@X#b8NMtPti3@(I~>dN+DL=ndiLVZX$IUyKxr1JC&1M z?@H}?d8rfieE`jGG8oZtbK<@VPR$Q9cK6h~2Ft0B70t6-^?oC3-u|&z^9l?d1*n}q64}gVL{(8WLY}V<-36P#5|1=&kMH5wR#krTL z^8>J`JtKRlCmFlyzISH(18}a7B&ZaXQkI);lL*E90PqoHEf*(PR&2JicC3l#qkJzu z^nc%NQ9yOtT)^&ZtX6zk)BdY;@T6=H^Go@tunWhvILwoPS7GRc^BuyDxm2zfZ;19s%!pG>bxJzP5Q%YORTyM9mHd~|?}Sz&2qbx%xmS-TOpSF&_Z z#u&upu=}&k^i(RX4A(WdEqK2`C=}i)T6*)r7i6`q2`xQ{lM5@$5?Eo2`{LS%3){qENBTHTZA+awhXIGdo>t)^S@E%QhdD`y93n{B zb#3j9e}tCCIWgNy7mebkos zH^D*XTl>JgilQ}7hKkun>g@?o{NSeVtv#$QsZ3_?;PUq^O2JE%vlfJ#D`6VSR3(2A z_qqHt;2O9@FSqPaJYzAd^*2$TF51c*vjIP)++2Y#;wlMLdd!Zh<3C%cwy%x140xtm zyzi%&KtmlzdcUxi%dv{oFrxx|)wTqV{a`*^5NVyhXAWyWEG<(1-DOPbnr716)pJ8{ zKU?X{_2L6M*bhLVtI4HF2V%R$OQa+BLcy@0M2KbT8D>L~Ycn_H$vIYDy2{9NQQ-%G zS2haCD#s#PvM!g(6N6Lcn{}UZb}L%gZ%o_ZfzRLzCp4lyw9uE2Us-+sXUBoZ?!(<~ z+-1172hO{P!(Llb{AjwxEgAhr(s;9MjhmA=xWkt7d~Ue27qY8V@JtIqXS?$owH+2n za?1?@TT|GT&nw=5f>I~R$OryTFXReC^xkR1z=J=F0pOjKWuo54sjprO$DGMJcg5e6 zcLTC$W~-YEN#k8CZ-W3&*6P28JJ-G!n8q;vXu>* zRv4ODuO$4Pg3EdPG;YpYA|fHg0)&btHhB{=e_F@>=boerBqzp)zHWKs_01pkv_bDU>wTw%!A>HEH<%6G5{l4cr?H2mz?}JuYlB*b;B=h=r{wo4s&vCncZ?u6*DoItKr`r38ix37Z$~2 z_6_q9Tz8en82z|H)SB+AH@9cOVp^NWifx(nF`#FRSN0dxBvvLZGyO>+F9zP1D@MzS zrHrMaWpr`_U9VplM77xH0`Y%~EiWH=t>;zAIh=Ob>Yv^-=W-mLY-opIf!+a|#S58) z!-r*`3f!iNtXX0S#@Uw5viR_t$9XhJ7SRx$zm>aiROkEHcte7}ThJ!C%30;j+_X*; zBQU1kk5DlEP5@t%8XHecy}d>2WO^B@)~k_B;OSc6jmJBrT1y8RWvaLGt#m)YI=hC@ z?(zu#0B(O+@R(XVT}bDinw$vsP>3PAqLb^3h-R9{_El_na$ZsNM>?@a zt(S8j&YbxOci{^zW+#uhl)?}-OfiK9@3Ry*q-nH@ie<#B@XnX+%_cV*be$VRb%uy; zpC>;*aPa(nYF@`P9Hw$@HD!xOs_7T7WS}|xDBeWzMJGU2!C~ppgKA~Zs(ji6?Jo45 zS!wJZLJh4O`-Bna+bsAuO-oA;98d==km`4~%Pn$W?AfDh{?dJ`XEK3*%zOZV=E3?Nxbf374huNop}uUquS6b?Hl~w9OOA&D1kC;a$Q=gna|m zMS0(#*2I}mCorXDw#rgnk~5VfyISW3og(fp5(X(>*F^R$pN;qE4fb8Lh9ecM-YkDc z9A|wfvF@(7wrVdQoPh0Rqr-cjId>hsG32ZbMF$Ooy4tuPPKEo$F>nr-^{cOY8abwU zEp*71){5Aa8f{Td%^DU*S?_`QuJp)*inB~^D;wFi>$KRx1%YpK^;(Jaj^}y~Do4Gk^ z^x?1W`XhlCbxA7q{#a@ZaTI<-b$ezPPszWd{DKtAou*E-{ZKFiJT+hf4&gdz?@G#1`YJr5owrgO>_5EsH6P!WMm7+VuKCd0_t z$lWq%)*Kk0Gaz=8u`FTFT`|kfWO_Ic<&0Y>VLX~|3q`z5MRwn2#HQ3*bUND;%VJx7 zoILcG&_XCxn$Yr(!xjl(Z>E!5f@|aDysM9I*4uwM7|_MH+DS>aECcVjv^OutldZyV z{#dlKk41`+wo`OuseKEE2vKbM4}e0kjJ78^Uo*qas9x&JzCZ$h$G7BBS^3|CBe=)v z>4fCku{_cu%~pBZMV@&1$jRaLK8f&7%Jv19MdN&~DAO{^8}XA30q>}na>2lyrChfo!qOYZ`TG~-R-zPzQ`&6gHb zaT?{#mUXti|H$u3-a$fT%j6d`?(zmv8c8h&MdNw1c^{2p+w~h(FOSp=2Tz45f~vVR zRl!#Bk}P1eY8R1Z`=`?;AT3zrX{+~5b7@vlYp8h#5d;3_VmcxbU?0oJKs zof^l_?Koh&Ik+1k1xt9fK#2XO}8%l}O+C~^k5%`aH$cc>6eG&$TnZ}h{2iARBYxWIJ z&QT@}O@C4A73s62Hkt*(aBx5T{O+>2MK-m=axleE&7Iy5YN|(FALYMkO+I-lc^Ubw zp}5%)O&0`Zixr`2=xkeeX}i6@nU6W-KemH_+By0N9ouzfz{b*K8pU`7kd@*sLZF-V z+WfGkxm}q)Mxkss1zuF1HFIp2>P%^9v~LQu9}4!Xt;RBa?s(v)WFMZ1xzQtWBARJw z`8VIsH-yd~fKO<9Lx*CjH7Yl72(N}wc3f8vhM#o%>>HcWu;gpzCUuzVRg34oW{$Ey zoD#b`n@-bI?^p!sK6PQ@(p}EiovSvLAC`xE@B^a>uKH6uEm7R7$uCIhbA4%!Q(9H5 z#xKfuwZC`)PnOoR4f|G$SF>!O2N@chqV7OFT`y4$eSP@8+l(YMrXwJj~M$w=Rjy0F8o&+0C|us=f(4gUZI*8|%)zdGL&&XNptd*J!Eq zo#fkn5?#UU4xEO;J#fSEL%|TKF}*c0(jNfzl>nT!X-ED_L5^vAL7r~#M)p#8rO{Bw z=%}%Meuj~d*NKwD*ue%7TuLYW9%Wm(-5hI-Kd)%1=EO>hRB_|yz4S!b{&-p@y^U8- zfbv{v-itE_NBXc8_2fElkD?XKwVt`5`e&9qE7KoHg`?CCZl$&rcn7LXONe1u9qH3o zuLyYVZ~c0>@YkdAssl2s&e=!n_;+9$AtlFMHzOvWE*xwE-dl_fJg+P{IAU|VyKrS5IhC55c#T& zjY4YHlJld{i$>h)Ohe%KT_mdfl-lx?vG{S1CB=m@KcB%5K;RvQ5MTBI{U*HYRkk+Y z>r(Sl!g{u-6B44nUBbSuuDV;>CQj7Ldts3~$exqbdP@AD?Nr&73(5L@^CR(P9!kp{ z@_k6+Yr-Ip?n!eJE_O!efo+sk{%2=Z$G+JgfIYj+gBvPNTYFaT&%9^619cj2g!fkE z0NpH+U4mWIyj8cfUP6H&Wv4r&kN4q`;oi=Y#<{_utyhDzwphxWg5+wc{J?ya*SqkW zW-;`L+5vEj@)skqn5wKN5#o0KBZZQAj$8E~8-D=m?pOtiMv=Y63hBj^(GpfGzKGPL zu*?nJcVu~M0!%R0+J)^f&3=QQ6yT%*D?KD#8f-@Y@{y%;K0dqe9 zZ~GP`i4ScrW?v2Y3ZDlKW|l+!#(wjC6mUs|Yum4H;HlXj;0NH+sPQuP{?V7rU6jBX zxBo$)g)i$5z@x_u-x!WU~J?ZY+ zMsYDa;17-=cYPV9ouVG z)B~c$WwNyP3ZpGPV9^-BrJHpfJ}G z=$1Lp_=u{wGD0%lHN$FFZ8_HM`nwx~v&46(3yTUrE;rhK<^a8I8ov-JFdm*F-@9B- z_{NI(%o-d)BlN6}ya*&LLR-#6rh9}K_~x=jz6V4*k`yevDRW(Brx9D3_i;(Rb+|XI zun$X*@0ZoU3}5s;r=YlXkdR)L_4p%FsX}WIKFSuXsNcqCDgq5l%)dxYQ0PId zT-i}HfycF;@ z!Uo)C)BT=Tp}fpw{1(kHn;=Z*l`o31bm&gIo(wdRGOjy)qOHqG`kczt+opTJA;g?= z<~t=Fk+Va9EyFD(0jVO?SW!F94*k|mI&SnhGrc#d@ppBv*<)Cn-nHkSlC(6TSyOAjhLAbRyqL{AM2A z+v2G2P;4SSF*%Qy*l_2_t|=Nbn%IuG&Ftc*aL7@nhA?iqDw$`yi40#(fy3)id--$DzxMIzG;Et3oU`b4?e9-0G(7IW*yx~ z8jI-xNvH?O>l|1fEnyhEFq5h?-8Og7Y-K(5b5kl*wM2j?o|CLt=2z8z)nT~oXMNuM ztRo<}7yO!f8^Htx8>g--v?;;SwO?maG6ElOw2p2J;q(P$HP!!G)2_=0ep?i{QrwtZ zX0fg;EM}@^p^LF~SuVsMX_mP14tv~Lv8fkZi|FfZ5ws59?1|5p3mWj;AbDeGB$6rG z;L*ceDkCq_iznByEk+T8CB{T{>zO0#>5wm&Y^&Tkcv?!+noLo;wYuFfNT0sD-63za zhE|z6k!Rz!Sxd)@E@jQp*9_7uuUL$za8PC9mW4u&Nzp2mSU@m^A+T4^WEo~y^v+T>g zkctWxf&=JGLYG~4EF#xd*gnn_&uH5~d3D0EeXqQ~?gyY%^)_QU$Ue~e)65$lY8o!T z3t7u<6Z!kMZq$R9WM<1e*Cj>KKe2pusXD@04;im0tZu!T&g@-aYgg}3hWT#kblsDQ zlR+8}+d2%<^S+5pvLwMIvRK$RxGS8&TQ@_SwdeUh=u;ERV)~ES`P?=YElMj+P8?-y zH}_?PKK#{jezuJap zJU?a_lRuFHA~`mbH+I!|KW~YcH^f0^r<#hz_c{Aq1hVd&iXx??#gth(=x+CClu8zc zUgXQlns^mj6Oo$j^Jq@3R_m?Tcu#g#$}wq>zK_J437uqbgGONN$2&n_=(93lSs7h* z7au9lrCD6xAynfh81p;l&Ah~3ftND~O!po9x$3AQgqY1W;hb>;VmRzQzW7?=EMoM1>r=KCsVE-4v(4|mI#yuBfhjRU>M(2Y3xfA^ zNS|LZ@`_=CSDp-IE=GQ#9s$2>6}x2jPjYf zV64S9$~585!j>uw7qhiL0PKO1_neqT%~WMi6UiFC3TX77Q#L_WZ-jNQtXNMi^mZ`I zF&D?saBS29s$AKw3m(QcOukRZZFHAvQD!1}7qwWVgZUWNM3;s2gJa%MXm@WI`K!hly5}vI%m&5)<9cYVpqOwKnqdP z76(%!#ZGXXwVbA&xy-s%^AzQ0u1L`}{ZDjkl8m>sy2_o3xnW{@H_WE(CLauu+SK2A zhY%x6u$snfZ2Eq7ED*tUvEqF7I^<&u-x}~vvW4DwvstxMCw@op@o(0O+dH6`4DZBy zf=jl!y#8C4vlXR+Mk-OHPd;3jSmu0hQIA1fm1kS2LlkPGN|70Vx?I>xouN|yw#k)NeU+Su$>^Twrq7Dne zEIJm#$r#XQ&cz$QuFhBFg;U~E_cbz!U%7Zil(RnF_;COygn9e}FjuSiwaPYuVWylW zpX~gdv4$4f@M{M0JdN1EvA+ViL05gIj6AU@_Fw(l(yzWjRt*+23=ImF{><6+wPHpw zbB0%>IQ>9G@`Kpz4?ou~Vj9Q`#I-o3T0Yp6cCH(1rN3q!`e8)Oy;A*CZB%}XaFuX& zWdSp!)~n0ndoO1S0=Pnh$~8*STTF>?u|WBfRB`}<9s>aX1*zZ9;{Nl1e1d2~=+Du> ziXi)7NX2Hn5fgY~b1E!@e#L7)I35w4GQ=gx&f?tJtS!4xZ-_tn`z!B?<_WccR`C`y z&eK6CfybBD8R)AjKYzQy4{f+_ODC4%Uexax6CPfgkR~N5g;HxP^W%~b-1_?LW(>!w z9rJc?Z$TGW3M>4o!JpySp?Idt{!CJ3PX;4(>$VLwc1YJ|A^k!nWiTLz@hoGmMC`Nh!FtJxeJ91h|51y(q1pp?$BL=`ivtkG$p zoqV+I@Po+P{qlqw`Q4F1Ih2@9nWy4=o_2DUv6xPc*O=S$&hngI3XvcsdNhYt8zwkU zbf@W>id@@$dpR`hC%65$bGFP&G(v~zS$&~oq$SftqkrdbW}aSIJMO8r329mPdyFR7 z1S27(KH$%G@-4XGWb<=4x!^pRQTT3vwwtMcA=&%@U~n7i9dp~!gpF|Tqq@wtdIL?! zDSgqVNF8Ld4iCJS6g7)zvC6FQRbGIXFAYTRKHg~@JYugRf1>K;=@2Ug*2%=Eo$UJe z6NTUXN$Kkb%_=j(1bjPEb5clY0=X6HMD`P-<7$P4%aXj&g&$YtWG1tB5aF2g}rf;jghvHa||>srTtVbA=f zhr_o&d{^Vm{wWj?S&M+~UIc8<<0yMJ-Ssi^0AK1{_%|&_HZlfmZEVH)n1mH+ltE(P z#8L{xQJY^E9rw2IWgk;K%--?QW~L z5(53AiG-Mi z+cnlRDVODk*JVHAUh?zqv`FG|oO_xXf3_qw7AD-DmcWR?1vzWLl;Q~%Y3zp1~vRFPBo zNi&Q-G|@T?@ELa}KJ#xp<_QlHY8g2or97@S?2^)Je<>y{wVf^@$t@iv(0FfF>(Wg4 zES&@#+SxB$9UA=G3SaB!3VnRYJ@4+osX$x-S4(DbF#WH^ebtWp)}m9ddzB9p`8dy^ zMnXJ|EfHItQ|Y|g{vDR@4i~%3u6sFYUt|CIuYV?cxl#j|!e7R7rTK8D-yu-=w_mpa zfQWIvRRnd8k?pw}1z1*-%kS(_Yj0~dvwgBIHapi5Pl~V5bE*jweCgc7)gX{Ar&KKS z1E6h28>0B!#TE5QpUO78uBDKMNq9r`OvAqd?~j#{(}8z&O%x>GW%_<{cuj!+Hlmy( zyVM}<8u4{>r3ZiYIZyV-atppgALG#BdEXY*)XBnMDxypNcp%OL3KFjl<9X$e7xSN8 zpW@mBxESy8bEil z-shZq&$;*ge(&$?Kc;KCLRYP>uC7}3tY?+gl4F@B4ttsnldc~+!K>ulSctVZ>XQph ztve4!zh6}9S!57C)usS0F2-QzT7XJgF=}r7?M`oQKtIr8f1sTuuou<0Cv$k){NR7U{>^7gd3Lgp(5U}cfEILzzb&q&wpphGh-N(@N z^#CMg@6#hV5IT{`;#UKDy}&xy&bj_5ULa}_+W5UCnKX33zkI;E9gUBjutD93AqUwHKpa zu%?YVYYq~>Od?jV7ckB66)8fMJn#2^)cJ2Koqk<)y<>8(z+dnA3HGH2$`WepcFx#D z;LNb;x8^eajDcOa`r(wjOG|U<#Rk9ax){gAx=fUJD&Y;Kq=ZVbax`%p7~Hb}23yTz6L5ly74>DhgnwU0l`P(YRO zS2|Rzx7>W{+;d0`;p0mhZg~6Wg~Wt+-Ow@h7QyjSRY`l}L0|5q@X~3Jm7y zHDsIAgQhSkCY|(ku$kw+8`Qop?2?vpJQ5i4fj@I3Z!~jcm+RTf_#sd1ae6O{<5K(H z;IXr0*5GI4xSy-qydSSmm@B-grNL6Ni{TL4ugukiMc`|%fgTL#Srp3Uteb~2g=kD6 zs+h&Q7!AS#wj+WW`j6_H42vJUsDxh?y2cCYPkO(@ziMBaz32Kdu5YTxyiksun>Tbe z=19^;u)X++xNqx*mqJ!`Dti(_Tdr`?-u%EgzWyF;|6w;$FjZys_(n)(-7+LvwfMM3 zC&IZDt!?BYR%Pvm$-E+wNobGkk&y+?2vG=RA^D0(WqCQK6VPgCvsB3beWd6-b;Hma zOYIwLSYH1-dx5Fxa}(oQ@Pr*lFNkhiBW4aW367eT<xddze>*{Xpw( z66SUPf%dX`(r}^*}F)k)}#9FJw6fr{%3pr@?_Z~hbv5?%`!X= z8QVW=;V2E#TC@X-TqIZ_iz8&-g&ftJNNR8+p=m1B_b9a&cmaKV(9{p<+_7Dc3)-f& zQnKHjMX6crkb+sQ^b}*(J*h?y2qvQz+p&V%Un4cK(oGwKt1nIndhbM**KBCIy4UHB zR=;*uO37YJ2!TDaJ;{p64y{$-9!c>R#*KImzx|Sv9KsQ)ekiZjA2?H5s*9!Bbl-q8 zJ(iJuCzZ+Bd?~^MK)M_<$kE!Tycg_OSf2$82@GoY`Xa|MBaE9w=VU_NV5(@sIb2E< zH5?ImEMzsY3w-vd>Rb6!o(ly=xgCI&&Sl3jeF6OoqWr`2i?0#mmLZifI3Y&zKhR#G zKk1-6F;zC3sIueFf(ez`NtbaS6NU3pDaLjI=Jg7)h*NqBTV%MDQU$0BqyQMPL&Rsr ztyDsaOkfhwTz)@S+v*J;V&mAJmPO?LyDB>pt&Kij3F-A96592UPtRY9N?n_UD`sdi zT<>6xC-E-$@=I9bGH9nb+Oy2Xhs$ct9LytJPuVFnM(=k=&?pd$qevWKE50-wGO-uclp{n#qjlxZMmR}ZCF(5*K83jMRuY|Kc zIpOkh*l)|?(MgV1ReODb9G`R=v126HvswBu>C#Y5f$mxqR~9sXz>~))VKqQt+s+CK zXQ}}kPNX?Lj(N5cCYud1pqME1piIn6sF(lA1c$}nac20@uT1KZ6^O#Vj5ky<0*71T zM#|h~?5JTmP348u>(pj30iNkE=CuhIm7x(-R8jdw;5q3&uhMZODUn8`-gKSm2PI6Wu2EY5c&E7xE;`ribQV zB3NyJ9iW=cO*2$A@~y5 zfD=CYH217!00~*$zSv6L(PJDQsd^juYc6f8Ed5S-MQx6ErVFZW+xvFSvWc#?`H`lDmbSNA02ArdO7XIa#VmlxM4(H2@`^;*(aeWt3nfOodzBgruZ=B3 zYsh*jXNFaJHl#L%qVOqj>=4q*lOOH zoRsz~+j+1mX0a$*62@;mXhd- z=kZduh)!f}KyKD(c;9vqyX-+(*Py;6Ggx0nJ2D*|mg+wzEZS8pdB!m6EywHBhN;Z= z^9S>#ei;FJlgnQv9nhC+Y3xA5r>8#I66nefPZ{^Vr4h|24D-_UK5_BUisZ$|4Vbsi zxrto!lv?eo+X{~>KPfpzSGC14LL|um_EGDG_SwCUS5w-kJ#O6xG6CBqb!fv1G=^%e z1q~fMdb+|1NfU$$4bWWPL8qwG(tuI)Qp&|9ktkI=D$Vwm(%+=Mkx-K0sX#(x(=4USTjOTg-;caDwUN>?ST@Xi zNJ)~yzt^G!6S<$m*;0uHg-NQTj%lI?F8W|{ zcT?Hr1H#TwdrgnWQrn&big5^x@-vZlx$SF#)~`n_%1EeKAx5pzPq6oFJalfC1ZtX= z9m(h2s%n-ui%&1zXU-5gH;c~S2wWL~{XpXyg}jwGwZiQ13Fxdg_^2*9y$e6xWqc9X z5~5L^t}jF=3Ws(t7a9kCEsvfEQ&teq-Uf?4+sgqnrKAY=6UeDDC^j6pM{FX)1?}qt zEH(Ukm1YF!EJIv8bQ3dtB(A8uMX^=KmD}XvAav&s8^r&3-O;d2Tk9dBaeiU^gqq#u zZjAO!-zGwj>=Ad8@tegM8xn#wb#!yAHX<#Lm!&Xb9}@9XI^mSK-_S3 zeNw$QtHiIkR>v-&UD2iYG~=7g0}DREbU&P|K?ke~FbMiJ<1C-QFc>WIk(=!8_C>Ar)Jfi`}O_>Gl=SW7JUe{wq+p2$*J~XoTEBXIbb7Rw8q23Sj83`G?sTD15q`i!-L4JdmlmMb z{@qFpZ1tS6{A+Q~;n}GC6B84fK0;)g+|omIE2*@&Jc zbmf+y#ifUB_nGpwDJ`WrLlY#WGwyOGdByN|Fdjws+A}k=4x!@(4tlnXfeVupb*|7aX{{QeX zP$nSpQe(`&caiF_>*4(kVzH8XFzP*6+o95^Vr1s(=CTrXOzzfaQ`KIr=uO_2VqJc1 zplPF9^#jd*1RLh?vpC6>g9s4kp|<-K@=ASI^+Tq_le~k-twySiB8SmZoCE{IsxD?= z;VyX5Z!&bgFL&T(oQqzzK5!zt_Eo4mFQ^z(fYs14Kqjwm=C!2jtJkC+^(&s*kBiFw zsOx|CY4{`J=MRxmz<07_p~7Q7`>CT@9TwpMpNN`GI#szC$>j_<8azP6)CZnC6dwZ6 zZ0*_xII4UFs2eh$LTP=-T9Wx83T9I#2?b8 zU=-ztI}M&m5)03kQ>>Q9STKd)V1B1XU!ua>`QZc`qJThPzD7CeDI;ih;?(;Tox|{s znD}z!cX`?L}`n#AjP=rO6L3pD5n_M(WfskE38VFtI3ts zsYrFEuxAv`mSX()!>x%I1fm)@e#f;Ay7M_cz3cZY3*p{v0%TF8l%MJG++iw35#C}< zh1a%>XX%*-f>m;g0UZkZ0@H?!2dlp-L2z_5;xtHlEf8(wz#EsxG%n42004!OLPszX z(pzR&*Q)w{RYFwC(^hBu9f#k%Uv}39b3(Ab$&*@Mz)Uu&7(pAR`$AY`7xtj}CBqwv_+FDb?Yf>N5O0zg=gRMAiXeX=*=-g;Q1OQrQBP%z#O#b0 z%Q>|2q$0}l)5K1%qE1RhRN=~6FC<`aYWezWue_nDH$_|77*OtO(ooE_ITQdu>K13^ zi%dh#5U?c@o?k}$WZ;~d)S_MlW=}Sa7YXew7U@sOy57|rL2DTirdyycZZGS%5P7AW z$pC(NdpE4$p?AuZAN#JIy{Tj0RXCpM@b?}Jpf_7|#4bSg+zR}dT3BG%<)|{W{avxb ztth1Sxs5MCt8S?%f=`o9cH1dnLAZul-~nAM3`6E#hlm0Vmwx@A2N7Day|Czlb7jmI z!5+3(4P1940mX{^9*DBD*gWvM2zF!)ue2<@E;3Q9k=CA~s_eGdJ)Wtr7OxER`}?Kh zBP_?16%05V-~W2s@9`Vh4(plljws5!?_Pu!0!!O4`inJ7)gUr_N0>Ah9D6}O&>F@d zbEug|f!0@Izs7W|la1aHg#(c-V80KaG_R0~bJ%mCdVAVAykAwnvvln4P*kJQ zze|L&KX6pZBvMg-fE`%J;|v3m2^%kE#;32Gh~P@<4N~TromOa)IRQ)z;S3%$24uCK z=@vN$y#ZK5*Su6zbbL5C%)=;9aIvc1sB6j!ls+hbecA=UEDgHBf7r6*#q`PDs!UgZ z_6p59iD&+hM?>&b#HD+8CYrtqR7#{XhK#xMOPv4SRO5F%>h!k?tT*YE-m&gX1nt&OTsP~efnPnPL+|07V6-ft5HAwS%d*e3cydfqaR#WtNSOOo zi~l(I|4PDalaQw=P_CSX_owJ5bIT&FFELEnH6JVjgH=PrH{8WZ^$8YFYq7m){Y_imW1^ajk z4dY30rRgM7R%(LJC<6TE6Q zDK26dfKm@?cra4)lNk2Ysx&^DD6P*BwXzb@FqT=@bvP72MB0S3*Cs_M`C6!;OuG=i zq3V}*2C^@+!ws!AA2S#HYQmjusg*siblQQ0S!1;DPPc0Vv(Nw9sQ;t?|3?y7-ZtNd zhYnP}YRH}bDFKR{ssQDju=P9I;1rpol?3}r@S=c^gG1bo0LmT|yBFt$HS3l7162q3gg=BR~ zRYk{B`RS&R!tf(J!lRQ)KlR+64^vEaBTvuQlDBuwD4H-LM?9Tc&Ap^PxAgeIg`;&5 z+S+s~M7phR=s(cR>u9d0Q9u;l_Xgx36^Xo~6ETkac)l@)**R+ z+|lt-##`!ianuMAPaRRZ1K5z9R&p#^=l&Y+yt&FaE~3b04zl)e2dJruF1e2n5-k`# zmm#Dmd|>eVY>K)#IB+^6p! zAw5y=l(5=oA)S#I2nfi#8AY#%wjqXBW)5xU(l%FS^hH#jk#v(5iSupKF;vT+34v+Pi7c#9fDtzGseB2-U|;veVt!p{s{U0Py{kv-)r^Fy&QJo3it2S8 z$}DYh!!nU!!?zntr!9p}Nb()bjo{T)b`9SL0Yc|8zT^5G3FGhaSCv`cwXW*AFbv9X zb)$AnI(k4;?LJxxh1K~Vk>omfsU{=|-9QWef(~e=fNYS#`@e+6(Cx_176uZyoYES>T6RA7Au?p#H$ zs?^-X_21v|(#*?S_Y(QsGhq;T3tVWEBS(@Sq700#y6qa8jQqT65M!(k4ELdxB`vrz z6n+q^YjhU)VB*C#T@Z{zh%i;8hlY!wsMo@p%M4HqQC00QXm3zA3jdg|g)O;YUr`p9 zAix}^`4W6vk>G^m>3YrDpUwXFw$ndTp!ZNZ)!V$;4CT(;?9L}>Ac9P4L+G4N$g!0T z?X-H?*&Fz2v?Bl?ctE6qyZXll@=4(^^Olww-UaLLh7$6V^YMl@Y6JpPrLUWWZlU*F zutgY&J_J_EX^mDA>uldF#^Vp)NXRs|Tm_(qfXG8pV{mdnU>Vj=z(nz-kQdngB=c-y z0!48U&6~{W{R3_3Bsny>u$}SBcW9IZyd#KnJ77u?uf)7#s}9m~Xt01#=-a$ddU5?I zXVGD>W@XWqBD87b*zYCN+byV&sce$p1)}K8AzZtRP}ki$q6#*lIwNMP7&%jxA2oph z6=!ToQA88>wa@WWyq~hhw5xvbGw4H<>MmNR<3&plqQ1nBf<^2y3myZMj_JjH`&Y*O zeIo^*y4Ro_a{*oiGSf23RGoKnnJiUKKJH44oV2_=lZtKPKkt|)Hb9$Q<+(CF0(x^_ zF#h^W^GVwi6O#1v!lGD)H@@-SgB!cw8Gc^)j|m-)`e{Y<5nhj^caeGXX9H`gAvZxy z#X^>vVH(GD6G2{VufE$!96>iMJ4n-f6DRT`5~d;6OiR;gjstg`Opj6}LAfzX0BHVF zWqoxmWy27;45htunf)A(DDUjHwgsyvYHyqE4R095BFyk?HJ|f1$_%`KymK9Q9ut}5 z(qdv^T+v^W4zO$q@*sA0&J@Te(17}wtran?ix^N^=G#5~nm;OUEZv2%H{KPlAb^(@w3I%0IsqfB?8_&kze1Kh{qzE?`0b@qJE z>(QO<=;ifq27O_PmjXA}bLPToSL?R5SBBS{ICFpY{C_=T8qIorZ9=?=$}3H9kv3td zp%YLWiMd0oKW-ciMS#hx(4U`jCW#bOY8B|jWLLBo4>p>lk>5`3)tF~5MQCkZh;W{3 zUqO>sXdCm1+*1O0iNUEv#~LZE<}39_oRz}R?Nzw;$W-eaL0uG5F-M=tqOJn+eiwXe#6=^4om$q5l{j``xWCYy&K<7Czq z>AUPlt@`eDpMmV9FiDTRcRb$L*jA(uk<%s-XD`bHwc8hnhzR!x*r+pw@Ie=e8^ZdP zl@)_q7s`IIBUPQ_c5(QzuF=tZ= z8*#no<}Q@wK${uYD9B%x->V9k4!T`KEZ7>dTsfH#1VcE)$nz>1n}@kDsTxnqQ%a*2 zNCVlX=OVeFs~0HbI>vf%DsSPg>UR4=!B=0sA^|a|xlB3lh@=yloXezfQOi6l30+*1C$C1jmlhsd^8`uR8GSl-o*mEt?deZb=d&A9;#z;#c5& zzEV{<-I=qPWy{^+AEurR()Q*`w_z;J$@Vf;-(bgU}jy& zTk1&3Gm}h;sUmg2w0Z*Oa<7-d-~p>-xQ%^;!Qg%Dj}i)?>|8g$m<|P{L+m37^tta4 z$>-D$?z_CWA+GNi`*aiVR+t=b%@6Zi+K$~7qeJ+-SOzKH?T;5v-tU+*O)pR93=2qP@qnN?6?m;DnA;NU=|->`Z(=>FDyy9@EQH9yZhThd*jGL+RmE z_c$F}5?lH?miJU^XxmIcIz#~XxVBAq$hDU79y}6!?UWCnj$!^r8LUPghDjlXvR~pW z`+i%}ip=0wWtzCqQpbnLnln&1YR)QD@N}l#TF&cjp`z+@EA`oQ*K* z+wjmiI3zMdf}nw+@>#^)o4a=RGWUTl@{Wo}(PW_OLan3@ixo`nAV@Mh2_(dz{+;K) z4nlwZA36(eB?q#Q;hg)Hdk@D50w^Zw-WNHp;v9ZWWyH-|Ao5MopBO9Z%urg5^;4Pv zmGW;_icG3zmD>{zMR4ksP@aD4V+tNQE_1I{ZJ|~=4ju?CNEWb@4w2k44C7p<)QHLs z8=0^s-FMm03k`}Mqzhg9fwuFQip;muF@=T<647}8{XEdMv#J#~mUXrXJ&dzt)0JkZ zn5v7Sm)mL&f_BE2vAlVpCS#|mu+I8GSMkBqlr>dccEWzwyB1Vq6xvSS7oYCn?$47wU?Onnc2aYC(0+b(Dr}f;s)Pi8TT0 zGs;W5Cur5x4y~5X)olYN&!~VSn(!D4=G`vM6K1B`_rHYu=ZuFcD4+FAu-^#U3L>$G z+*z`c+}NBJ|9f4x)Ii5^W{R+5@qm28lCa#En+S%GC!Y=^tncKhVlgeSlXK6he;rDkybPEc0>rjSkK- zihLQE0#02K_;Bdv-hEh`qZZxHXoYJtRz7|r?Ph0JgTgXDkARQzJh-`_q6w9>VxBb_ zro^bV)nn|p5^Gy=8g9@w#w^G}!?J0;zqm1C{EU7=&f7jW{jTk3BAbe76}y1Bpo@%7 z51$a1SOhl9@WjG-G2vt12u?nf%YeHmdPb`rYT1hFlVYCp`j-H+-`(yfNOAtRPoMt+ zQp8A}M@ArCh`1Ho$CC^9y$`-BM_N4A}oAFchoFR=^b`s*KPA;%c-VRCL&l|_ae1hJVtXADfflW`TK zuL|v0lbVVi-M3RsF-eh;siYuM&5$ukDl+MJ7txF^A4xMW2kVO(vowV2JyHF*Bn2vD z&R`L(JeaYWV*gI_-I8)-rDVK#GU6$mPSnt9tR3C(ix?G(y7A2I(iCM4gH~7m(8#FS zXra9`V#Z4#QA29WsJl&?`Ql4vI#SpvYk;9D9R@_HuUuyWM!hV`n@)!)MMr8B-%*z$ z2Yf5jtPyLidHMLR=tXvTgb?nG^0AGFhsXkrB`F%Ff@zms`wUV_u9l?|AmQE6u9v+) zpUAAzgNf^^=>Ba(D7x;x@?m!K?tbuT0mtwD_*?e>aH2eIO=fGg@RVr>Q{oa(lzcBF zcsZa*Ak!lS&>LGV74Ke0*Zp>PH-qB#z)7Q-DHn*$deV%S$#gU=tgpO z=3SpeiZcU&nt@3?owFvfnYgAnCdSb$^=pxmnRGeTiLEdMEdr6=JkQb_$$H^ZlPYX>F~@Sw zUS`3m6KV?#VltQ3otNx2^qmC+tfU}=P@gBT14^wH@Lkq9PC!{);FC}R{~G-LdLb`1 zHQ8RNb}tXL^qgk zpk3GJbJhfm@@;4Ob!K{lv!<~#sg|vn$lU0fm;`>KG6z1naT*;uY3n@SjNkJ7M`+#b zKcUipCcvm$sKTWkCDn?*U*P{l1#oQaVz@g=(UfO54*4XC8}+=eiEET+#+rqT^Qk>! zWw>-=70#JQB;4WN4kii#4R-hEa3do!U{h(1?a57`ihkS}I_fq{-tjW#t5xRXnw^Ug zxu2^h=JVV`%KrQw#JQ%6h!O`)a<#SFHA&GH;$W?M=?7X&@nt;M@k~+btohKFVet1{ zhlf?oEIr?jXoNlLR?4@njy*nHq@kVDhZtGg7pu}j^ZR47QpNJ75C*9nPf$2#6c~@c zDvNo;)D<<&`L50^qm=br8i8`0;HRSMK4EN9S|N$X^;`1)v(gORz>dX#lsDm>MiJnd zLRiUpJZ)S5&N2o>W**K*TUcr8L?d6nD;0RvEzt)jEH~nNjD}XyWmL>#+tt^(HWlJM zc|e(us7046_Z6)4Qh48I$_q6jAu}#(Z_5kOi%>lvCiDLm{itZx9nN8@e5YilTCGwzEl4|^7?3#K;G1(|cZXKXtu zze~c?u72~_JkmM4A|>r!fAoMR?=(@j5oi>5JY0zv%cMQh0qud0mWu9mIDnI=u@*ZNLW% z=Ra(Wn(nydz--O+vd)3%K~^xHK7^jdI+!tQNNwBKw2skv!X;9i+l(6CXN;8u(v2nM zQ$uZXxRuV?j!o}dg*A%fKmGHz|5;P`49C+N(UW%==cy48`?&fts(JZ?OuciEl%a@9 z$Qu#n{Sy>X84T*^d-Y0goOyQ_G}e4sY>4w*N|>PX7ci_A-e+$e6g-vo>xMMJNR1TH z=`2&GcOT^N>k|*iVWkSTt6{uwXFEF*yMlC{2q@BCu}6+Fxd3B}1+l<7>q|74Ox`cU z+3%+LqEVv%s)?0p^+N^0fN(+Bv$5hx87Oa|g_;upuqF8{B%CUzJ|f01Y?$~E&(!i# zfNWD5j@DsHd=?&-g4J((q6KY`<~`(20&`N zXxg59Agu!AaDL?pH}UuJ z_lNs#DHnpvGPI$`(^^Ga>IU-YVtG!^@xw$lQw^&3y1J$#pt~&BeheGg4rwnc`pjVm zq}{GJCZ8Tr2|bP4AF!82q8n`##5hm{IwkSZWEEzGNEY_f>%cuUop`7owAMAGBgQR> zHI=I<3`4D*CFRaXW*rsZSBU@l@c%gTh`1Cl3^lw|tz_<3c@PkbJ|^ zQwLm-zQ;E+u!iMN^_6ZZOlsP9I1o|c0DM9JGHV(WaHPaMbdcQ*r3V{RQJn$hkkP56HDO}3))DUIS4mFvFPc>Byzi%U0X z__EV+g6e+5qwClK_+bN#{{}U0e~!=Oi(a_%^y;F6qDtC|%v6CUDgMY;PyG^~x~%s3 zh-WOB5?h0%Tn&R}#jbg3LK_I&>iPa%$)ETC@4kK*Vl46LfxZlI#-}7Dqfvo^E)53D z{6#`Qe!qQ%*EFA!g=RSp!kd+ncVo{LYZJ?-wXW#bXU-vCya^n4JsZ1)5|c-MN6XAJ zDxLORW!BXEheXQ5FM@N8Jji+?_ze_f92{!Pt%IZ$D2xnuMj6&}hv?t+>(X6q+wor+ z7hhy!x-S;Ew#^r*SY--WdWy=!CZZey$KI~hO<#*OO^}&Cku%AErL9ddqUKBw>%`Q{ zZue42XnUi`pEYzMH7Kf+7qsm6l|Bqau`Tehgjb9Zx~|CP*BF~~^QB&cv5K-kHw)UT znXx+-@`fjRbjlKV{dk7D>C4IT0nz5b_SV5Et3LHU2}u#>Vb?9CLi^t@73`)E14m(> z{dn3UdMJL&NWEA*2p+=^t#mYBFFXsh!Axm3zvAN)I@j4eixmMzaC#6;qMvozG13Jr zNOLO~mF{sBu8oyUfkq1lY+Nc6i9ZcX-QrSY8H{LebC{|RK%yK&V!5;^LL!myveT?~ zDUC10ZVAOQ%wGYl;=(9o_?R|ij~nGeN6r7JGPo*cJ~UJ!aJh=juTPN`Jdkpx^GNxI z6|?AtT_Maugj45nP-w-#Mv-J{Iu{WEq^=o7Me_sgQk?4r$`?spsrfe3%S(mmlB82) zz!Hd5k6h=qp<@FBx1DzkAJA_j4sCYuqsqi*_*jXE&=SLxCS+ETB>- zIgU@D^?t3{e^4yd3Uc$Haq}Tb)1U9cA7||4kxjOk2`#PiO5V`2t)wi&q78)pvN{H4 zfB>bnkQDgt!M*->W{NaVXLHWU3oRZN-{3{F0p_P{l8Gop)|dUqHr|rl9e4zA4R0H_ zY+Rgo991{Sr=N3tf$@F>nZ3YXS(wQ=#Js>+hSC|2kDZo69vOO4Yg6(&ZIU7^$lM=E z?nr8jS09V|43o#)2=3@-7O?Tvmoyiv2fBfSpi8e}zafit=4lc>+|ho-xu}JdZXM4H z)Oai?IuggMkkqpXlM=1zN;3L^b&EPohWTKmYaS*Xy92DEvLcmTjCVI z{teweoZ!6YdSv!El%sq>(9x>Mu;q%0-_rQ^VCKKY^^qh{SY!%6G`idBE@e5J@`Akxdm1~sEF$t-ZF zCk#^$g%rguloT9U-+o;r)151&W$41Bd7PxzRw+!}2u;!3O{q9CkV7P5M9%$Hf&X^j zHxvPM&auDUArxH~p0IR8ZZKDdLSV+Axa2DPHX{xB#IU}2dbaFo%XIWw%?@Po?V5hS zB39Wy#QSDUL-TxhbqM#w>sR=X%eXgQkN<5F&dYG$v6K2Dr!L)q`j3rOSyWpWtjv2d z`gi8T^FzS2G_v^E6mL1nCz)Dj9&;IU9jpy>==qT{g z^|E9y%r}RZKeB4?6k1i9dAaaZd)HiNnb?|&jpfwW?TBC(`jpf17+7_cOI06JQdBqZ? zSi#GecZX!CxJG&VWO!9n)Ew2f)X_ABO%c=RX=Mr&C#5c6TleMj&C3L=CoL(AFEbtM zJ%Q%;3x*29W8C5r>BHS{-Xr&h7wNUzL4}}v<7HV(2{F$}+S>CITdEYa^bJUzv&;#X z*Cfu+^@Ub9pN)R<=g|+mp5ZPa;CqMY-iC^Wqr>mbbo;<})CDPv!=_^#dTiMx7ixI8 z9AtP5d`|T{o1WqNqxBw$yDiBT*sHtxD5)gw8e(S1(@5TUp%naQC==oWaf82l_8$TA zhBBn96-|J*1nsfjXvBTi^Ml5kG&R@jCk3vV?v)_Tnxp3-5cV9l#LN_bDF94A+sxt{ zA*~++9bIUCIS`4mmws z=)Hn&@(9IK?D|pkbMs62AZ8=3XD1zE8N?`u6-~p{Zmpt^%zAG*YR4q#s6rK-J0g4P z$`Zowya>(a8QBJ2Wp~jkoPxZpGCWQtHGM``g;0E9GX^3z=&47g=!0aIjsmYWB&fKc zGS&qpB5H676i&{*pL!O*T8t=2W6wPLo5M~t>&pu?N6}#9nTTny&R7l2K4nM(8KF5X z#369+mstL{tGr<4zJyd@f{DV*x0X6Fm+9)>-OFHS=7fpphC$Jy)cXrx8z?+0zloX4 zWk(}pB%Skp2+O}LruC53uU$M_o=wbq zL!=-`sLsIFd>cTR8fjVi`a{OtUytp-9gzNteKW=0Y?8Mx7U06Ru=5hw24lH((0n4p z%oeS#At2tMs1R^esPo-DtR8&OCtQ&wGdLim zWE~`%a+ctRIdRb%AE3-1Rg*35%jeDUZkf;t6}x`kn(-iVP9ectA+maPreS(PRo?D_ zo%+2_K3L@~C%?5}*<_AOPfx%anKsusHZYGBjbSHx(87o zWS5HjoXoQ5Yx{cL$4DR$INcRn8xzTRQHR@)=C(3+ZaXf!2ueCtizoj%ttn8Klw|5h zGc2q2BKiE0F$}I`YP{@Wn8Nt#7~Dc`Q{Gg592VcPQ7Gg=Xvb&NH^U^t*`3O{W0qsw@{8e}(9E5z>HUM&IMO3v7?-MGnYH4NIUzijJsJbIvB9WCtD3KE|2=AgWa-edoUAlN$du0XWTO+O;T)Lb@C^0 zL{elFJF@Z0j8~M1JO)5Iq85F=qd-_?MA%dnP6aoSe(j=J^9K*GRn6)yn zHhbr>82%-noMp=QkkT^r4;Te70HM?>i7B(-3`Kl3#nzv}#*JO|%ITnGp<$NpKejY! z1KvBYQ{qAzJkVopD6okn%d`x}TmrMi1ubE+TyW+z&bjX1Bya`HRfb36O!PSb?LZbq z?Qo50#R#3c!~)L+^zX(*Va+Srd!PD@sX{8h(8%TWSl|p{888O&$@n<0WH+kep!CN@ zQPV+L1BLf8prRUZK;iX*tt_t2t z6l;j$sHYJR>C=3GkHnAhRFGh9|< zsH2p7^DztC-}m@m=+^Ef>f3oxWEbl+zYMK0W$P`qC@qi*+>Pcpn_CH~&)MVKSyoYF ziB3!{ZyW%5-G9UO*JhI{FkEu~Zjx*bv|@4JfI9QBWaa5i2JJB~PA2v+6`^vDKsX1= zW_Us%^t{O6)CCiPGAsZ)gTn${jvP9#8k1}oyH8hYsFTm1TBPx4l(kvto@61EJ75Py zDnZfu%Su2N@xRy3f4Au$LVxax{Z}6fegBe*cq;*NH z?C@ssaWaWxH zUeW^FLM`*DH|)y~qWhx&WJqVulGBONpNm=bS7&eJXGWFjjV^ zvTV^>X>5GDZZ;@B&8Z1Qki@+BGD4{P@7FHEFoY;ScUlVB<(B}s|pj!3Y;kYmS1-}%%9%s;jsi3UW1V2BD z75`gYxAfE81?z-RNH9x-fS*v{y4o3Nqc-hdDKP3r>VSjp(nE+E6YU-D)zJb(#MVhP zikJ;qGJ8jlEUFWbQBh_qCtMI1?)h2+#gS~h{QYNc^4pNW=#qrhyV=Qsr7Xo+kZm@- zgnp-P#Q1|N^uo;t2L6N9)m+-@$?qb2-hyAXdem5h8?VW4cc}I{Z;X~xUj=4ZhR#-* z=B(F*=9MEHcZhd@2eR-@UMfbuq+H7)>Ep-LKo=FzF)l$0@Plmh7QBaCHYwI}ltt5# zy9o{LzLiY0GllXBS7_iTJ zD3!BA5zn`Kwr$)*A{8byAFX|l**@-;%XP+?*O8mp`2&7ElGbb9f~~2Kk0xa|2I-cx zGo=~&F&$YZJ>?1&0^mIE3zi~>(jy01`dONs28i#hkI{rWwWY28cR!~vBJ_&{2+nK35gg66hy(=cK>v1^-G zgF^|!N*mp{H38L;@Ss3eYbD8uVuy)a*5#ws4~VeH%DUdE4$q8564Q-3lau__NQ|DU zf_*^6C->yRaUjPBvWNncLwiPb=_^%Cc{_vG8wJe^rF%2RUwnp{mG4O>#rQP5|Kn-z zz>oXqrhe>#Y_i9GI}JS0L`ALV*Um$ z*h;t?7akO*b`Q4p70`g%o9W)Gkntq~w!%7L&}H`M3WtbxXh zTKPsoyo|@Bx_Oh;D{-u%MQPWq!TfxdbSv z>fAQ6*$VmY+LdK)PKBPu$y*69uF(^2RNyKvoO5#ir(vSFe%%)~0SE35ikXVpav(G--h23zuV+_zP-a-0 zi#)IlMs(RNkoeG`UOIM#>X+8;U* zCT#k@9cpg$w!r5MFIu4$AR>C*zFa`3A{;vB-k_<-OGeoSh8C+viF?MhJ}-}@4+VLQ zIEzv72kd`GK`9N`VTyiJHQcJ@>s&L{yJj=+ctIQd#q^tZ!>-{}c!&5mZ&1ksJ8p73 zk{YqX5?fu@AaRZ}bR7~`EeC%7S4v{XRQ(1YIusX#9mRToXFZpYxM8HZ2wUa&2Flr| z7NlAEeY@vZ>FGbv2*a;s9Af86f8BwGwr9aTVX_1kThq*Y!IYe-_A9y^L4qG?e_nNf zFQ6N}v^!&;`t96S&6Jm5MU~MM>8N#;YaBIHcEFDbEuJscrVsDQ(@X|==(j}#EQ1ni zGvJz~Z?0zQ6)2QEsw}zg?Ufp^7V9Onr#-5ey`#t#9(`;ZpUAVImumGn<9QWNdw%mq zlCB%8;+F(0fJKCwY5UPOX!v`YdDOmx%wV4>CqI?RT~6DA^6nG<1m|vKCTJW00O(ur zR9|LIM}J73vR3-|R4?AJ*9_@CbqkrZh?_V(em74_V@DoklCYK6>hbPm%2JRdklaP* zo6tUt$KhL9AJ9nBLq*&e-j7XG@O;9)Plv^OOVP_shk__nj;QynUz0Ysmv0Y?XfOK8 zxBX_qf5#YOdw6z?6tef+Yl2*pOpMPxc8l&3>2o&MxYD6`{bknk08gMjnK!rx*f!F! zRHm^hd$wY1<{o##-W>$Jg$!Kij~{U{Zf~JVaES|QP&CdPW#+H?tYBf~;XBB)KGFTs zT6XJoY3XB-Zf%CmXpe(9vS2?`z6Aze?|f2a;>O;gViHKvTc%AN^3zyRsIcE zC-M4ODp8$)H! zDosRsQ;G-ykuIGjO?pj$(3B3LNbk*pbPPy`fOHay0YWb#y|;wkq<5ruym9Td_Hyrg z&c5f2bM8H3Tz~kz4rk_kXZ^l8=kq=fkqen-t^VU)g96u?H?0aEE2Df&lbi2PWOFSc zQo{~r-K<{G7VAcVpG6tN_#Qb-Bm>_uA_Y@J;*Hy?fvRwOm%ukh!$VOB3ukTV67x;}Q7UM<@myx(dsBJ3Q;XDWjel-h{udO90(s?_y(S*BK5Q|HqE$^ef3=_05>FhOf z|BPZ$P`G;x2%Hd*x?4Vi3Nwz3f4=!Fr5j`}-xZ=$XMC~os`XEU$Ld5vnFQbSlk)}9qvM4E#5Vbi8QqAA^Yg|70*)(s++YA#nHv(R{k zYuQ`!QXkO@?t(@$cK0<-1CFDH8uxIx1##y3(cIlbTLW#eMl+?E#B43`Y-+) zn(afm6F^QGV#~@HC~wFJvxnV#VQbn$WSs{Fjl3Dro}*9se0-laA6Zg8r|g7)c#pn2 zpb&=^lR^`{RoWS`9ui%C>Z`*H%bIxr?$pGn^3IVC*8l=o%BD3#K+B*RkJfRabB<B#iSY~yOcc53RKJ@#*zior9Iqapx}fhOrR9GOIw{BQud`U zm$|3J@k$7Ha9HaTVCr}{XvcU|Boe5VvH$`X1Mm%Qg=bw`a?#4MCWE3FM>sc^q`B(J zb2-+yd1c7&R(hH)>BSx{c^J&->Yi-nY#1fOco<^3xs=Oe(4mx1gfGd8cD=i4Bqk^} zzkE>ptbA`%zPdzK?tnAp{_n3woIgVn$*K_QZwP>NcN;&p%IO@?-5`%$3_;9fJpUsZ z4$gm=rTmkVltFRW&`u+f_)@{rbAUfwX%RU6#U=f=8|>}y{&c@BePq&p0^{X$kHp2W zVdMI4+YGCX-^+;RruQ(b20|X+kdwGHrGdwncrDX_R717fsr%d&nxXLUyC~sc0R_sg z-`N?b8&LNTGqVJimeesnzsXDLiOGW>!GIrtm-6wCvr6MWSDMVyX8P|z+DL|1XVlpqe^k7&H#YkIR+dZeQKQ79JJ_e?b;HTcj!441)= zj0Y&YgN@5TpK$D9#yZ#*v9rqO9`;2>a2lN*xMye5RW%hV%gP-wYYhX~ z_J^KG_4u*keGf=HdrqQzplyP^Ou5^>L@yaH_4^O~N%xr)ajwE3Sct5t)T3_)$X|Xy z@VE42rwXJw(M?xoYxkMnT(l2KOJ!o&CC?M1yPo=S{hk5td9<`e@)068l#HrKQ)RTY zX4`u9F`s8JPjI(Vp)$U8rY;~ZQWmJawo_mX)~ekpmwmb~*3+Tmef;`FRnt}_SJcDI zxpr^F{o!uc)$ylVVm#6o2Dj#(X>rDCxw2;#6LjZ$81h3bPi=0n$yZ?>R-ix|O9Fe$ zWZJ&|a;-7cF64TYaAb`bP1GAEbww*?1d$CLv(n&J<^Bg>!kX~nLtj@LeMETW-SOAA zk~**yWU{hXqP|N71Z{^qKiAg~mRjp~Wgh zoM&JuIGc{ksDzN{V}aJ&6`X75KOu-sz5^{3`~lIrIH? z^1K-KEEa`Y3!loDgulEbS*X4iv-C%N5-?QnkfV>w^kI=pv1G2?x=HNQ9O)4{Bmy}l zd0^BCq8=;7cz2iQm#7^FxqUa;H_N^1a%}J_0ncy0Zd!ai@O6RPoLkb1HH~+G1-dWc z;axqaBYa34(MVJ7pz6BRBM{SlU1^dVBHy2HVA!rjKX2I?|SV_+h@Y)urU>PlizHw zR}x}*fviw?qo^3^R&Ri+$=_d^t9YrNs;9H_x+f_nhq5bQ)%Qr=ZzcM#YjDAsYmrOs{9S6o1OQk$fV(R*futZV(%Zl@D2$(~sK~@0~whF$iW_Km=f( z5PVhO<#H<@VKCwZp~PeLpPw6I$Cc%R=VTT#ah3PrAl{kta~rgjwTN*t;OU)ze_XNh z2kfxtHGV0XQX-HGeB|4B-flBH)_-0dK6jfRw5!b))6tnLKmn|Wx^KpBc+Uq;R@pQs z3Rp^dagFU2=CmqIA$Jx9h$20ji6x30w97uCyuv{gkr6?grglrSZcDtaB8Vz6M>#XD zb%qD&9dc|BU54dYhTY7>fD|DEJd*V5<8H>0ZA*E!LfwPAeUa;OF@hlYC$QN+zsW-5 zyB1z0uI-5cR7${<+oudyp)+s-;n@z`nJ0baPOlUrhIYGBFRT51&l#BB4Ug3c-K}8E zz8Ux6eX=}H4%G^v7BdYe5Y-+^Rr?0B#BS(}-+K89R*Tr+jTQx`wJwgHa^b7A!NZ7r z6`ek7wmqvY-Z8D->mV@x#_T6f`LJx*s#WRcR9e>vGd1n75(bm(aK+esdP_{xERNhQ zO8)WIt9$aW^e9ojlqbAv@Kf-B!$j=fIkhcMi>=Ore6+haTHC^&%Y7w`r&ZUCVjU`s zj=(GFb00j1^gUTVyqC_z^wyDAx!P!?t>ml@yPFexT-3PkKhb;?I@F;5L)RNRVu)QCIk*Ebp^C}(N35et6SK7YK6{yc9x z9qvdUT)s?7`AhQf_Z0Li?U(E^D@btjz4k)1kVTvoge+>Q{SnE-dTXp0=n_##TqM%M4T$VCXl@nsitB&QjRgoe^MVBNc)q)21Z?MG^@4IDR?^=- zP8?1&l)SA1VvG~+bddFKA~XZazMyLOFyw6-%tQF0AUF%ZQX>fD`OVCHdN=n64h?ZD z7RGP;^E@b>YqzSWQ``FU7d4755! z3tR+Ni{|xvGgAiTW_t9xda~H*;NE`mN26gM{~FVWWXXJOg{rNNaDj?p?qt78ziCfy zS^7{-%yZ)whJ{ivDdke>3joGy+wrs@;8vb%5yxhsJEcCiq@WpC&(MCbTG`tNFSdnu zAKWB`B=;yZc(`-f^@Z(;uit>M6H*mgcSZ>f?Uz2W@b9~tN8X!009CbaP&<<^K=kaD zhFMjTE%&^Zbf$s%_=4Cy-3%!Bsc4;=c3hwbUT@g`gF%YEqyPm9xrp~kZN9P3g$ zym8o7DE*)iw*;#qUqgC9&*Z~9D19674RA^rd5I#O>cXGQYyA8K3$uA~UM^6~=Wln& zGp2A{bd4%_z_6@I>4~;cVRuALhvZ;Clom)&J$feg{AH*hY@(i@&ddFI<)Avx2*8-x|vG1A9P8 zPfisIKa80PkCSy3dNf)ERY0}@FZa)^c-{}$2%auy@o)7(C7)&#wgGZUB0oO6386&NVz?t0>Q4nU;+9;Z?*?krV`G3u(S|g z8n5Ctm~J(8UJ#rdUyhg0S54<~4(u5zo$gk97cCcIeD6wm(XE*x?-8@e=UlPuYPy9D z#}mm5riO*HLogxvEyoCS-!F^ttt+uLm0WeQ&|cG*z7?PMYk+3t9$b#&K<#l5O_ikN zDqYoBZpVxt;q#W23}3!Rs{kjxup2v>s5TzG<`3y~;AUG;60`ZRaBM$k?t0O)h-yQx*ER58u$r| zMc84;3ow(~UKWxRvErUPz|SdQZWruaDCky0+l9dA0BN|bl9Vfv*5D3MHYY)?^OWk09vi5DorlOhs`N;FLnPt2D ze)B@lcWo*pRs@c?Ia^+>A79p$84&ZKj*2Us;xrM=>QF zVBN&oqwU}?{kY*iWYhnBE2!Qf`$**c79kB)fgv28 zopmTLM9hB4y1WdANl%7_pjSjH1sU-Jgc++K`1mmKkNe7%DW&BF?Db^1war{=RvPO) zd3X5TC>teP!)c7JLTW``fTv`88}T#UZpcL~b}|T8uzM)-v{==vG6g=hT&VH^byeE% zj3s;cP2k!rJ8QoDe9D!I6qO$?;Mz9x0W#axYu=81&_+VK@??mgLsXS6WmL;MMq~~$ zOJL{!tXNEQTF>%5okNfEZ!yRGelxC*{#dF0HDDh9CoS9bJz}+f;-`Lsab=Ga+Y7^L z4`oBb8oKCIZ87lQ=qWOysIyd)=y_Ua^Mf9>2e9Xenr|4LW0_{QRVIL)I zb@HMMh?U$Hf^Q~9Mm?H{gwVp?%Hkb(Eq+h5t7VC156^RhRs$oEgb(H6XPS&*1ly0Y zf`t_5Xxn)6M|iP-jZkz*NLonH@2j=lEI2v0-hGTlyIx}gc4?qH^Ok^^?h@GUXZ>=Q zuWxTCh^ov@t534#qv-h2i)Xb0!ye5?Lfh%|+H!j#+U8jNo6Ndac6|I0@EyC(xaA@{x3+zQ zhxIL+p%~9iMG%!r%Xmz6NQq;CG_GZivQxG@q?Mm+GRxdlg3RYggBSuvV`In2gk;rP zWEKaS#nr;BlUN;2d%Rha8_G5AG!c;rJu)?Ss$52fMA~{oiF}eB)n-cL?Y~*8i@Gaw zd(L1vj~DSKz^<(E97b9Mijgx>Md1_&Xkz9RrT%0_y=Z)dBD;4~0!HZ#VIgAV zWFGRo`RJ>gIJheeoErUH1DIeDR-isBgsj+qqY<2EbB+S#rHBF$4AF|e@xT80u*tiIK)uAp5jjr~NJUA)4(@VVM-D5Vy^A!q7` zWed;}%nv!8Gl#=+uzflxcaOL9kr{;LC(#rcZRk|UYj2(07>-;7QH*?T+cA=U8UFZE zsXow;*={Y3$H5ILncmhZTD$>RZ&~?)Lm|y(8um2H+zDA|rfvC?smg=-!AWCu+v5G@ z$3rES49APAbmo>(cV~XM?=J#O- zSMVh31c@0RKy!LSN~4BF7-W3YLD6*l%3CewdM;bar2#tbM^ep5s~w63>*7iZJh}%`O132JFhR z;hdV@K4Ny&{@Q6~RB1L5WxF*K#vJy@%-l)7W8wE6&WB}7OG3M|U9<95rt_R2Lg)M8 zAU5}_`oIsezyDEdkqrFXLh$FO@c;RG{pSMZc*m%MjJ$sN7;e>(>uvaQV=RL4kgb$)X%?Z;)i*la9ZY{QKYk z0jj89Qsdx}isXJ4eNFwcd*M?hy}GXaIJv8B!Cx2jzoz}g{?~WJzbE{QT+w<;-hC`l zyFuI6|I%Q12GRXqOqlfL#zB~FI_*{Aj$Gw7x9Rb|dNwCSV!eG$lp|6eMVl7#ekkV4 zFD3r>iQfE?>34pc`$M`G#;3#zGS4d9$XT5o zLZY1G3x=wf)U`)7qo}@fa0UM2{=bH`K|Z?8M59vu?-KPq?&Py!22YAmFi~FQZs*@< zlU)`B!M+-{4|SN+Tt7DHENoTa?5NFhSBj>xa31STSA#0j+Rz7O^bEx;aSv1T}&L}14j&~AQDaI1B!q{E};F z55OMV+$r~bvdbrKqm=nRV~vZ38H(|4ya{6octj802X;10O#o0(hz)9V~M|(7`ww=pC0o8fPiJPt3x=k;`1y#(SQ$Ti&vNLP< ze{XMAKkp{kQ@UQC=9gt{`pBYinb`smRseQ0JMu37Fq~Yex~{ibO9-{dl`c{QhAb{3 zF0-ZCq<`sxKTRU>3MC}MIa|lR8uWYF!oGDM+B;9{>8;Oy`u?9JJmHC4F*=i4%X8J$?l-Oz2WKWO7h+S?7jPPTlvp|et-U3YYCn!ZVmCk zja6Z3x{PTYhd0=NVwd%%yYgQV@-3<^?| z;b{&jS9+sDGUV4_0Bcn_uaDLi+FdiX3io<^m44v7G_*U*^%GWBpGnuKplB1Mj`tcH z0oko&8PsZ0_~;NtN?74blDr+$3FYm7M_H;s+ctxh#FNz#40h6$Rt&BD=-@6`aJ}GO zv|f(wew6B9k(@2yBXO*%t(|0EYZ+Tu+R%v0REsmR$7p;aAmaKx70d6I&x|w0wnP` z+EXUz1ok}!vbXN1DqUF*O)?7W;R|XfBGMnzi#D<Jo zjQ6m{Q`bJn25RQFZUe39j1w^40on`M_3x|uzqS`T8p|XW`A=@eQM*ZH4MQQnTij&u~(MBvvJ*a zT`A7W&pO~=XX?L&0mChT*j86RYMU~x^}7-kMk%Fa9+nImT{#~SzKrl6nap1)w&~p5 zK@NJ4u5SOpdAeE;Iq(QbO-WbZ&=F6LR%W0o@{iitj|v}YRPZ=$mrNhYdzdxUF1KNl zReQ1UGq33@r~bUdg3kPdf6l~Z?b6oSR)Wjb7FLwB;3n9Hy5gN%F0=zaTdCoaozW7^!tLP6?{ksXi7(Q{FgNMy2&8f(Nlk7Y-Pb^}SaD3lBWZ`|bGLcGPFlT)7@J5kygeEl53^0VQ3e-}Z~lRnAzwO)_E|@_B-pC?+yV7O z)^yXgz}4Y0hnA3z7omGyhno=U6!=F1e7JgnAW7b6!g$$cA8IXc{Rv6XBY*rKICi@% zNBvh`I1Rs!u|I9nc)L-Au>s{nbB(rE-xvu^Y)mf-s6zt5h;|FN6s!!L@ztJ5=t53~ zGm&A~xVWJj7l!QeX>p(jc5Mf->9cug)pzg04)Kzb)FoOX9Rnn?G!yjzoK2548IL4~ zF72(8qcXm=mA9`mm`3!0(zI^wDIM1z>aLpxVRkz@SPeu)_oUfrHb3rWe@gf^t+3>~5A@?{?Nqi;JVOP&90n=jf`VM<rdB9Uv2<{>C{(XDCk`g`uxytH3iK-KcD~OU&*x+widWb zyL_WO>_m*g{LbA&8r+5y^6a|YA@d_@$-V)CDt>M>QK%vl-bx9C;3>ABSVbY(=H6zJB_qON8{s?jtNxVe;48C!?N@5TGbl;XiQF zYjDT^G0JGbOXZ!NvC!5UM(Z|eG1-dQA-`jy*yux{8^tz}?S)yP1z(kocFKA>%p9bD z6^*BIBHa1G&_CxLTfB;nF7-#Ou<~rTDD@u$@IUbu^hS5#r3;!j=oc(no^P?3@xaMk z1OcsQ&`ip*gVtb4r7F>-ZYiPXoJD^h+*spZ@2KPkh7;+H<*%Lvi&EPgx};fJGVf*U zip9e-lqv-wRvg7|eg_hLJp4{cfp!KY$dM#j}%Ws3^uljrPdWaMengY}wXBZ6`o z;)~yoLRRyU%7F!oPRe~13<(a#lg^{{s_y}zt&7MQGe;fsLsp39K;tr;SF`hi0_So8 ziFR^*k*9V;<~r9OB`ACom?2P?6ZcGY6{O)G+!K--(mBY#`-0@<)yLr>ama{1SESg` zZxWzqt*;qPj0ZUajK12~TSc_YcFreRQt8?qricNVXw}fK$kJas&<9M1$--D<*68tD zD?^)g5u$B{`et&oF&WRT%KKpbO+@;>uAr=U>#LK@k9(bCZia+l<8yFv5r-83MG!*b z?3m4N4?r7-jk}<@lV5e?xZlOtV3{pSOhibT_Ia!6<{?zN@z?(O)4oaH_h#^P65+w| zw#f1;lDbra$}jo4?p(@yrejm6)VpC?A2kJ2aY-TCP*E%)!zJgylf#GahiD^LFB%vL z`B1g2;kB%jlct&J^%5Be!O75}`6s9irGnD#kyD|g8F+@3Ew^AG+Erhh|CyKupDeV{ z8O|vo@u)~gTd3ZXb2PZ#Gll*M64_3NJ!vaa#bV4qkPgyd$;i3rwreS(gUgB}n8cFK z98}NHxA;T^X0FjMj#upt>t3e6;0k6V3-%IpvGwx?Mg!3$r9vM_Cu+(T825 z%tx8YcfUh|AyVQswj@~aO2n1UD>{s*EAr43p;QjsXiMRHlpJB^_xydvAI@^W-Ot@-a^arN-hnza>jQ~cHt;@^XK!zbl+cBDzGqc19Pm}S!&H?y(QAY z?aa7?sKU`oW4b#^;}7B@eVqk8AQOwz6xaULj1}Cn^67jf zTO9O+C?_~$^4NQ9S#+uu-LxAZ%)|0y8Tf92zc$UU$No1cRio?%)hkw8`I(XQ5ps|Z zq#3`a{ZHDr^MgOZ_nrvFep6ts?zkRLjG#=9gr`Ahlk@@v)WJay|I81!tIn~^O3R5# zW$jV6&O6HM0TW>VuEy#>H}2blyl9A49^uQP>o)Y7l?AjTso^CEei|dN7K`2hc>X>KX;@Xv@+xKAi3|s^_eSYnj}HkBzQA&r6F?=A8}Y z%%`&=h?z3vA6B&T2RAY2jo0V`2=TGJoZLJ@jzy{lhA)ektYy2x!j`4n)b>A|%=H(YXZc904i+Hkuf7a;dGpv749Cpuf=)t2_ zHD$k#IQd~I90UQRqSYo!U*!+4aAaH^iVnc0pZ2p*9--DGQJ5LFtpEFFUTgb$%U1ap z#^2x<2Bh^2h&p9ySNGywZifFUlQ0?+I%yLqDyHihAN8{pGX7`HbjsjDwMjdi#7n*U z_(Z?eEu(ob-tQgr|H7QcmPW8NR8NAF*&&vM+i(u)* z%Q!`{JC3xT!mPE9RJPqfcKmUNsN_oI2Lgqu-BOb1>IHM$!DDf|H(A@B<2bH<{dD(N z&tXE+3`{O<|J?~4|J!r?@AvWldXAO<4;%b{uMFP8ZoCnCBU(#75?Dm~z^KOIhV-8^ zh2Wu+$n0RT@SIE8Lrk8VB6g!(5Z+TqQYt4Wt7A0ib8m@+8*UrLvsQ=irm%nOy7r~g zabd|75ndqW$N z^hcqjKGdT><;=<5fJV?e`v_>;gm)=weON-wfIiYWujme`G(ocYuBLjA)B_;v;)f(eCDYd(>fZ*jPx((F>Y%Bp}N=Q zG$aN!H7V}>nipy8Sc$PQ_f5`l}z87Ehm9H%4`D0z@hQjrfET9i~r$9OfdD8gSfh6S#dgvBHJ~hOSD{i*K3SCO~ z1Bb{{Uox>8B9>N_!0)a(SxsR)2E9bo9MwxF&Tqp;M6d_Y_JU%QqF@D8a<=CTTt4nB zom))r(1PU*D#p&UVo-2QJ6G7YlXg6!Q_dp?9H>R{WcFs-lYW*@rbKBVH~Hdt8p%t& z{jPqJljh=0UCW{_8KpOj(RzA?9^cLHyGD`TiW;u6#qNU)LjYwlGlvI=I3JA4+%HY` z(|p7&(;R71A}Fb4xNQm6<2(Y10+S|L49={-5@`D}tS=yAXNSGQXAFSh0G2d^S_~{$ zB&7Ev@oW%yhXwwM%GP{YzZk2K3bNRW;qW)D8u9Ee;tx-3w@wq|A9tQ_i#LyS_uqfq z6GF;*!f#4NQMP|H%38P)Yw1E?M)x|oT_h6iQ7+_dCGowm0BBr`V8UW*zA!xb)k^{6 zM;m&smT_ga!g&embplDV%vb3J?}dCBlPo1!&BtvRV=L(_*c6VkhCsNZrx(3OQm;EL zw){D>NuUO6&Ut2j4%#0D;-mU@UmspvWp(uv-%vp|-7`Ltjhdz%35qTjpfq`Vly57+ zpHPabUJR3sMHO$y+Cw*;FcaFEAT$_4l@KICjJhV#v$ZH83Y(qEzh3o=gIb2B^V}EL zw7$Hk*{8bXR-Gketot}FWi$tQYVxLB7{WHKomS6wI-CG_;qAsVa;mw$*Y^z@?QFZs zu`j6*o7_XCMVtiCWT0*B#se-mG}h1Or1gKtGAAl$x=@bT^7lZc5o7J0qvQ+Q3TZEXaTNEY z)8q|JWWBr5X$>kHrdsR8i!#i6V57>Lyzp3^Ev0|S>J5XjgYBc2=T*meJch5ZlyF3I z;W{=d(=AcA^v1_I^SH^bx|k*2tFBJVPs`=-(!7nNuE?0)RBNk;7kRUz!vbC0EBaPW zpe2igRXzv3PCjG#A*@jFA~QgTxIf5H zy1L+V#qKy3Su(a{b=bERPnGceFX!)wwrjTLxV9+>vRSE36q)X;I`bNrct2U}wJJQx zK?qU}fqq1bW;rlO|Z|_Bm1{&h((0Keph`?SMH<5K8-2a(7QFgh%z>D?j@{j4NzB z`Yp(^r`})DatF&-sP+`f=0glRSxb+rEL*s*+dyX zb1?NZYpIAkgc>~!lwKyf?p}p#5#c{J^?t!iDvy458SU(r$#Yh>`^|A>!O$;P_o@hI z_Ug{awD$YDE}k^=7ok{gK1}dr)0vaTw~AC#|1`HGKj(U5ya<6Lm*5F#w0e?JX{<>&FFmxfSwKPQAsJ`CgkzmU05`vNMri+bbRonKFUQRQsF!@FvlE-6! z4V=w~HQ#L-k#<)9`XDrFq{a$x*Pa>ZxceID^lL-lFnZD6F=?C55#G)6{ zp|cc8RbVl9WWtf_CO`_-?p>lNewNy}?fj)@p7idgc?wSfcW9Ad!c0=Q_#V4Ji@9wy z&!tpww8AR9>gails~(*FY|xlm}ERVQlu z4d#y271hhNE!kEmPOd-6sY27ILuMWeWydrrUsm>}%tomIkomIFlDu@E$pm6%PBe9~ zn4DeCsZ!f4Yduu=!8LXa{ba_kj{fEH9~K~r#YvbflT)z7R=-oFcLS|{;p`z$AaG2K z@*v~;u+Hsbs%LZ5q+ApY@?Z)0Ezk`gj48kb7wPHWyMv^mw^L0XsVpG7vv|e$0|$9= z-;I@LE51#U=6tp(z3VnmN6{|OD%M=4fY}I#~r!$+TG!A?ysF*^J#eS+?v=X*U8Wj<5fF; zFPzE#c;pr)TE`npbpC}aN@R)GJ3N@Wr}$yNlqTP%P8^xRSSJg&F{QqFoygFRV-Q5j z*R~M1HFTVS#7$;;E-t-6PM(&hsjckOfROo_Z(+Z>7H`D0t-pN4Az>Ww1{HsrUp8Qh zk#qCpo`SVVDfe`j@_upOJQ0^jCTZ`Dgr9}=DLg?6r%a-q6-YDuqZe|vq_h;g=yzbm z39CbbuqS*jKH4Wr!*Q3fU_|E+90~J~jr$@}-x$MPbZo#x8*!M1GaZ|eUy8+{?I&|n zJ}8dvTFSH)^&E=`qFS@iF;fzId%@dgYiGLN3+yv5oQ+qI?KG~nYX?y5TyqIEq#L&T zV$@@EHqG>*y`@4s$vcY?>g&dSVu9d+NYc&hwR0t8M0qkx>I0VBl;lnF*xyyO~o-WQK)yF}Yt`V=Ey=VPk zY10d8cFT!kQniEG<3Kmu&MAI%o`t>w6qXS^f%ON@RGBl*#+|?1_-6~Zc|u#y#^xk0 z0bNsLRh@s_5G#U;BU{}{x`-6z`F`50ev^S4BToX>VI}T&$Zsy~N)8 znHGK{0k9!ZM<&Lzg~(AwR;5P6Xtig|eknNXzj}v774Z#-(8W6%ebJ5Y+0pG}MjX3* zBr<6tAP(5>HJRT}@$}1fT~-f2D$fsB)4ppU!asm)dmylTGP`#I<#>2qBoW=X-R<2tGX-#x9cQbj+ z^MJXq4tA<-qrc^!jO(Je;7IbIjeBG{2Ds=DH%@oDr1fd;N|+t)c4c5ZUHo}O@!_&$ zHJh4z?-BL&_e=*P&tM9Kg-B@e!HAGrembvdz+(xE(Pu z$4Si3Q2b~Ya;C6Y5=|QJ3b|V^`M&s}kJ3)mKb%?K$ZFZ*m&_X!v9oX#0SW1mH#|>` zmjC$v=F^h5UtVgftSdCUMdw19W8}r``sB@r3lo;R*_Cupj( zt_w-xw1dNQdz%;lKC0H`d#mvP-8=)H$Y~@}leR^J!}cim8!1JJ=+s?TiGzR#TW7bZ z;=|Gnl?>0aqQLA7Z(waakGd;O?&eriw_hA?>IN0R!o%LJ&X=syM>r_!3#z?jn(WCl z;r1)N`m1qqfOTFM;}%yFC9?vg4mbQEpBZaS4oQV04Fbb8)*>nxw&io+eP;CakpU2f z^HhQ--`|7)U1gJ0IpAKd)wN0|i#|Xt5i$C9@>JvbTqfans{FZDZs)>FW^BG(Sgz;V zEV`NEYa8E+KN_)PquHu`ztNVCpWAq&m&5*@>fraopDtxOjfrfU?nTtP?Q(*GA_Dr2 zTfP3h>U`#z`=vhrWa8%X+&XctuQB~_LH=stYE$c)*{x5mQP<|DT3;eCX=ZvO&j&p~ zoSATPUF+dS5V#V6<>}5Hl4H@_boR5(&@E_!QMX!k1!O@1wedD_=^Z6poQE6e*N*_r z^crnw2#c9!kuCctGO;`VcsMPiIaK3Fmrxm>7xtR&2qc&6zSA8bx@}N-m{0x?m4D)v zdC1i%X+U^=*pCm83H5V`)YQ3=K5(X>B4QNW++-}LnkHVi}os%}<%o^armK1R%Uwwy=fFY`FzMx&3NsAVPl?!y@ zrAAA=_{9-CBQ(9_U`>pg5BHlA@mZymR^~Ho#BosQn{QFCzo>OTy})-@>8t2wZm<&c znJ7g>mJ|8Tz`m}mdp$H@!mkpZreX0I_nD~A&rON=MnJ+FAA|7|xz;u6fQj2v3q8+q z`9Qq-OL4sW)Da-#w;%ZgS&(2k+XnU{PLzkVq$%?0ro>I_h4s>Uy=ucLmD7SQpz}Gp z6VDhuLH^uU&TOCBnoB9P%wi)-R^35RPW%bhX>Uu56tZ`(Z#5(VhdN++&*Awu2F#WZ zj;N%xJNd>=xSJYQJLwv_XE0lGc&cr$-n^&oRv88|pM5G@AY(M(xG+>1$r?R;3RJ1i^2M*2(u~os-N`tUF+O5CYI8ANd zx^DNzlPy$J#P&Fi@o%l4bCT0CZIE4d_{j9hhpE>}TuDQc9LD*FK9d!j8k}oacYJ?) zEg0j+t0cq@Nhex;P5M@-v9!;S);6MW$DF^4`22mU{)l&q4yJYS`yEgbmos9?j$cqy zuhn?8fU5A2opQtJ*=^5!?#E(jLH8p+I9^9`vU3}M7Zv}b^Zg$iBA(@hgDmVcexu7y zR(RYoA}9>xjH2JA)7(g%(R$7s2bqJ{gF9i-`dFf_Alz05UaNBBocC1n?`Sgp9PV8El%!~sa=19Eeb?HiJwY56mw+!W3CB6%{*moYa zF&XVpd-my-HL*HG+xo)T60NBqksW75Z;(X}Wl6-BX;YPHC347amC2)V;t;)ope(1~ zQxvWCP)H+CQdawx@g~wJ0HOv zYb$v(uNK-rTc$vlXjeA+ZeJz*9umtFt4Y7)8flArOD00}+Ok6hElhD=59S&yTuQa? z;GGg>rSamspf7?u=5@A2$QnawwNGLiJOdQ#d%x;crx!8GS@ukR)XB`o&mT%G00uIKiXj$Vq;xwvxn0HPAuv|t!=Cx)lIOa~F{+q!%Ta25{|Bo*j~Szs zw)G=kZxSV!ve1{RwCAqo_g3QRw>k~K*9X`khWJKwi+kNd;v%@O>*&bIVS~H0j_6R%kK8{tx1I1w^SpEk=7 zUMhG83_%dqdu0g-d4RuNRsuA^4Io43#qbUpXi* zZpB%%6Tn2k&+qOkB_X0_D=>kB10WH*vg7xkHfel`_uc6t5P1#`sC_|Y12>*0iE6jK z`I&Mrbw+Wol@73fw|FPr;h8fOJ|I*EYT5!-m(_ifvV$as)oAxLdK)^GOJsKMFfFS~rs(LZ-TX ztFPsL=gj{FUdyr))cev-Mu5MWexR$9#YeN}Cr_cE_n@XW>K+artbvR+-a9eEG9@2g zW}dy&7dnBIuN6$MZ*9+X?V;6GHr*x5be<1o5F0*N4lqEIm@L#eb+;PQJ3rj?6SZDE zi}#2kGbzh_nie}$Ia?0O0nb#MZM^$|vx4_I~X_VVT{D+`0jCfSID5M*iK7^VXe`Pp5)%KAz(3O@atR%V6n8D zO0;dCud`NovO=h!Jg5*j&ls7m`8f3rph2@t@$TrdrF!&T&Uz8s8O7q2r@hUa#GRb# zv2LLYMzD#&!u&5n3@j}SEbzh27IQ7dv6Kb^6gEV@(TYwk>^rzb&}+dCs?^RWFIGg4 zg=vHlaC~p`I^dz?$5MUkc5HPNy&nnfdxi~YZbtl9mIxfxlO&0EPgmv!PfZdpV;a4` zh*4j8Sr@A5iXpaNtg3y^Rb+dOZYQ-t^}c8ADMnX{{C>lI_fH+B*lAmm|ELC2&$hQ8#=&8Y8X z6UIMiA(a}=UvJy6=~iE5pUKG*xaPp`Z-?R;Yd!7}FnOcCD6f6g)97;N4%4ljCquK2 zkV|UI?`7|*dPWGD@O{$2hGxjAwQh_&gB4JLRhV0 z>E@cb)(pn*&Y-@Xc~ASoe7g~Z;suO&kZii&ntr26wq}OAB>~uNb9Y^>cuSuQ(Ipm$ zbM5m&<8rg`7UulujuZh?2O$$G@4N!nJ=FypuRcRcb*@f!g$x>N-`|k7*{De{7;Bbs zcn+9Yi8&viKqVeP4bCz71TQA%8cD3$M8jV>HSY?UZ@R=-mIQDu_zDuME9+Ref+Y7d z4UVZ8Y_yTRVQQ5@W3DIs&B=Ndn*MjiNEYo*mvmi^PK%fAUPo-iNyJFcXv%m$&N%{#T}Y$}XSJZKwXG zti49wb2k6OKzH7@DI8NCay~5LRdjUr2}!3&bEvNddUhz+dh32_xYCgv*J?f=d~+snB@K|;U@)D%D-iw^ag%ywLj7#st^R}G7AFDLExmx zt;+}#-)6ZJfh$=HDZ6egsXy*_*gi4$(NW{zyS3(H76HoG%V_oO4>uinuj~bR#`t%@ z)>1dpJL)R8-e2~-0D1qz^mJ-$I^Iu6x=exWjJZqGyR?|}UjdGS*)R1RXK&@io{0rs zs(lEU4zl^4kYr4669s8HAMCEK32IB_hej_nD9IOZ8YIab*KK~rx!sJt$w?uat<>Ea zlfIx2xXF>wVke;g)i_OO+U_q}_n#DCs&m(U? z&7cNU9KT&Ee07`J z$;HY*o~@>V|6y>u{jc86G@9+KkK<`oJG#(X9y(~IF>P9 zT0$%}EmCx7f|zJD)D}VzPc5k`5ld7nq_$90s;YLe$NXpJc`?t6=R9Z5bI$YbzWCky zdvU+#-gAEUem_5WC6};rHml=i{(vDx+>*bMu3MJ6-LX%13(RmFhxY2DR7GCjS@~&z zHzX5EZl?`jC^J4BUXJh6RY6eUaBN~@yBLTbElvIl;O!qdzV6QQ&YT%Nd&6rk$;HvfO zZZgMyE)xE4A6);gV0Y<`-xA7BkmD^Tqme@E?JuAiWa>pJGO3|^X6Sm1f^upQyfQ;{ zf5LQ$=l#+Z9OTF1t6e$SZ5|hTZOwGbwMY%0$GqK9*h1wj%Hu=u2cAAjc0HiiR!bFD z!8WBYFMFTq7o3-@{jUO@YaVSgf^Hro^S?VBh1?qSxn6-9seT)M$T%$`U`T2+>s9Fr zAcv-LpeysZF;9~oJMlRF)2DNqn>z1DTSH+=_kI^B^bdjFMafm)t!uP!%lqkw-|OWn zggjRTBoO^0C}czC;6~Vw+)4t4_~ly$na9Hj=^V%QHw!%(CVN_@I>OFQ(qa$?cxO#j z5WAm!U>;rA^6)!7{5ZRE}n`8x^&J^})j!sga1x;F1cZ~rXS#9rLZ{^~9I8hfo! z>B|miHJ&Bnw#1a)2-RSN_jSMVKneZI0lyj>Henyr@beCcQNG<98>+`PG~WUo?~Qjn zWBOw6J*092eT=m6&9+6ojNJ_I#$)?rcJ!p_6mKoFUKp~ZLP}~qdINll6hDaJ!(Tjl zzQ!zIKyS%J7^a`=H%NoT9PcX&*87)DF&3cNDH|Iofamcqw?Ey8V+~`t8CpX6=WOe# z+|D1M>OU=)jQHQ)0p-X}y^-`jH^ocHZAT9BM}_r9Q8#-h90z;u7*a{&br#*kfa5;z7p< zpWvFfCoRqf?#;QDIlvy%Sb;_&%Cq+`w5<5hrf~wusiT66F8dyKUU3_+#YP6sH9~Hy zypw0@ig~mK?to0k%5-?f_C2^z+Dv9zX9$)Mx+%lc8X=J(!}vA#s7zOZUJcnp*ESW^ z<%B8N%suK-+`|Owa;nXq5;CafGt%Y;UZZ6+zU(}EoDM6xzE@M(nIwr}zI&fU*Y0*Q z<2xiKy*Nmd-gsTsMP!t9yzRelHfZ_W(~=GK2kKF{C?JnQI2g#|NP@?x19hCmhcvOOkv@(&Jz>-Pp-QYzFObvP#`;M6Tv!tlSY*-4pmzTp0y`bLK{~9^jz}D!h-dLz}a*pb=C<&@d5gba#BXZDas-c%_CgVO48yM~Jxd>ufG6 zOI}MzqGO2?^I7XO1}6mF?xEX(>&`A(v0J^Q$w9x#X+l9PD__*r^p9pgF@Nm5smhG? zshF=%Ab43pxU#yUO}!S8lB@kpoCJ^}mc2f+ zfYV%+cnLYFp@7cUXpT{I{D=-yFE+F#d^@{rL0KV!BD{jKN=qg7+JK!u9>N&W`;)4) zk0lm&m|;}?F^+i_U}yd6l4iGt$i?Fik`xu|X6v=qDu`l*W5O-d^gXkQ~wP~r2|J(%SY z6@Sh7jsAwQtz(xRvG@^AagoZqdJy5$m^oAjnc4H<&$!EKnbLey*I5GluK q10{3Ca-b);Q_pZPN=j);ia9;S4nTFIe+Snjy#4F)|KG9L&cJV1{YE7K literal 0 HcmV?d00001