From 9008cfc22a6989624e15b0855b4db99e5baaf3f9 Mon Sep 17 00:00:00 2001 From: MDSALMANSHAMS Date: Mon, 29 Sep 2025 12:11:14 +0530 Subject: [PATCH 1/7] Segmentation Tasks with RETFound --- segmentation_finetune.py | 363 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 363 insertions(+) create mode 100644 segmentation_finetune.py diff --git a/segmentation_finetune.py b/segmentation_finetune.py new file mode 100644 index 00000000..51ce8a59 --- /dev/null +++ b/segmentation_finetune.py @@ -0,0 +1,363 @@ +import os +import argparse +import time +import logging +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +from torch.utils.data import Dataset, DataLoader +import eyepy as ep +from albumentations import Compose, Resize, Normalize +from albumentations.pytorch import ToTensorV2 +from huggingface_hub import hf_hub_download +from util.pos_embed import interpolate_pos_embed + +# Minimal logging configuration +logging.basicConfig( + level=logging.WARNING, + format="%(asctime)s [%(levelname)s] %(message)s", + handlers=[logging.StreamHandler()], +) +logger = logging.getLogger(__name__) + + +# ------------------------------ +# Multi-eye Dataset for .eye files in a folder +# ------------------------------ +class MultiEyeSegmentationDataset(Dataset): + def __init__(self, folder, transform=None): + """ + folder: Path to a folder containing .eye files. + transform: Albumentations transform applied to both image and mask. + """ + # List all .eye files in the folder + self.eye_files = sorted( + [os.path.join(folder, f) for f in os.listdir(folder) if f.endswith(".eye")] + ) + self.transform = transform + # Build a list of (eye_file, bscan_index) tuples for all volumes + self.samples = [] + for eye_file in self.eye_files: + volume = ep.EyeVolume.load(eye_file) + num_scans = volume.shape[0] + for idx in range(num_scans): + self.samples.append((eye_file, idx)) + # Cache loaded volumes to avoid repeated disk I/O + self.cache = {} + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + eye_file, idx = self.samples[index] + if eye_file not in self.cache: + self.cache[eye_file] = ep.EyeVolume.load(eye_file) + volume = self.cache[eye_file] + image = volume[idx].data # assume shape: (H, W) + image = np.stack([image] * 3, axis=-1) # convert grayscale to 3 channels + # Convert mask from bool to uint8 to avoid OpenCV errors + mask = volume.volume_maps["drusen"].data[idx].astype(np.uint8) # shape: (H, W) + if self.transform: + augmented = self.transform(image=image, mask=mask) + image = augmented["image"] + mask = augmented["mask"] + mask = mask.long() + else: + image = torch.tensor(image, dtype=torch.float).permute(2, 0, 1) / 255.0 + mask = torch.tensor(mask, dtype=torch.long) + return image, mask + + +# ------------------------------ +# Segmentation Model Components +# ------------------------------ +from models_vit import RETFound_mae # Ensure this import is correct + + +class SegmentationHead(nn.Module): + def __init__(self, hidden_dim, num_classes, img_size, patch_size): + super().__init__() + self.patch_size = patch_size + self.h = img_size // patch_size + self.w = img_size // patch_size + self.conv = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim // 2, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_dim // 2, num_classes, kernel_size=1), + ) + + def forward(self, x): + # x: [B, num_tokens, hidden_dim] + B, N, C = x.shape + x = x.reshape(B, self.h, self.w, C) + x = x.permute(0, 3, 1, 2) # [B, C, h, w] + x = F.interpolate( + x, scale_factor=self.patch_size, mode="bilinear", align_corners=False + ) + x = self.conv(x) + return x + + +class RETFoundSegmentation(nn.Module): + def __init__( + self, img_size=512, patch_size=16, hidden_dim=1024, num_classes=2, drop_path=0.2 + ): + super().__init__() + self.encoder = RETFound_mae( + img_size=img_size, + num_classes=num_classes, + drop_path_rate=drop_path, + global_pool=False, + ) + self.seg_head = SegmentationHead(hidden_dim, num_classes, img_size, patch_size) + + def forward(self, x): + B = x.shape[0] + x_tokens = self.encoder.patch_embed(x) # [B, num_patches, hidden_dim] + cls_tokens = self.encoder.cls_token.expand(B, -1, -1) + x_tokens = torch.cat((cls_tokens, x_tokens), dim=1) + x_tokens = x_tokens + self.encoder.pos_embed + x_tokens = self.encoder.pos_drop(x_tokens) + for blk in self.encoder.blocks: + x_tokens = blk(x_tokens) + x_tokens = self.encoder.norm(x_tokens) + tokens = x_tokens[:, 1:] # exclude cls token + seg_map = self.seg_head(tokens) + return seg_map + + +# ------------------------------ +# Loss Functions for Imbalanced Segmentation +# ------------------------------ +def dice_loss(pred, target, smooth=1e-6): + """ + Computes Dice loss. + pred: logits [B, num_classes, H, W] + target: ground truth [B, H, W] (long) + """ + pred_soft = F.softmax(pred, dim=1) + num_classes = pred.shape[1] + target_onehot = ( + F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float() + ) + intersection = (pred_soft * target_onehot).sum(dim=(2, 3)) + union = pred_soft.sum(dim=(2, 3)) + target_onehot.sum(dim=(2, 3)) + dice = (2.0 * intersection + smooth) / (union + smooth) + return 1 - dice.mean() + + +def combined_loss_fn(outputs, targets, ce_loss_fn, dice_weight=1.0): + ce_loss = ce_loss_fn(outputs, targets) + d_loss = dice_loss(outputs, targets) + return ce_loss + dice_weight * d_loss + + +# ------------------------------ +# Metrics Function +# ------------------------------ +def compute_metrics(preds, targets, smooth=1e-6): + pixel_acc = np.mean(preds == targets) + intersection = np.sum(preds * targets) + dice = (2.0 * intersection + smooth) / (np.sum(preds) + np.sum(targets) + smooth) + union = np.sum(preds) + np.sum(targets) - intersection + iou = (intersection + smooth) / (union + smooth) + return pixel_acc, dice, iou + + +# ------------------------------ +# Training and Evaluation Loops +# ------------------------------ +def train_segmentation(model, dataloader, loss_fn, optimizer, device): + model.train() + running_loss = 0.0 + for images, masks in dataloader: + images = images.to(device) + masks = masks.to(device) + optimizer.zero_grad() + outputs = model(images) + loss = loss_fn(outputs, masks) + loss.backward() + optimizer.step() + running_loss += loss.item() * images.size(0) + return running_loss / len(dataloader.dataset) + + +def evaluate_segmentation(model, dataloader, loss_fn, device): + model.eval() + running_loss = 0.0 + all_preds = [] + all_targets = [] + with torch.no_grad(): + for images, masks in dataloader: + images = images.to(device) + masks = masks.to(device) + outputs = model(images) + loss = loss_fn(outputs, masks) + running_loss += loss.item() * images.size(0) + preds = outputs.argmax(dim=1) + all_preds.append(preds.cpu()) + all_targets.append(masks.cpu()) + epoch_loss = running_loss / len(dataloader.dataset) + all_preds = torch.cat(all_preds, dim=0).numpy() + all_targets = torch.cat(all_targets, dim=0).numpy() + return epoch_loss, all_preds, all_targets + + +# ------------------------------ +# Main Training Script +# ------------------------------ +def main(): + parser = argparse.ArgumentParser(description="RETFound Segmentation Fine-tuning") + parser.add_argument( + "--data_path", + type=str, + required=True, + help="Base dataset directory with subfolders: train, val, test", + ) + parser.add_argument("--epochs", type=int, default=50, help="Number of epochs") + parser.add_argument("--batch_size", type=int, default=4, help="Batch size") + parser.add_argument("--lr", type=float, default=1e-4, help="Learning rate") + parser.add_argument( + "--img_size", type=int, default=512, help="Input image size (square)" + ) + parser.add_argument( + "--patch_size", type=int, default=16, help="Patch size used by the encoder" + ) + parser.add_argument("--drop_path", type=float, default=0.2, help="Drop path rate") + parser.add_argument( + "--output_dir", + type=str, + default="./segmentation_output", + help="Directory to save model checkpoints", + ) + parser.add_argument( + "--finetune", + type=str, + default="", + help="Path to pretrained RETFound checkpoint (or repo name for download)", + ) + parser.add_argument( + "--dice_weight", type=float, default=1.0, help="Weight for dice loss term" + ) + parser.add_argument( + "--ce_weight", + type=str, + default="0.3,0.7", + help="Comma-separated weights for cross entropy loss (background, drusen)", + ) + args = parser.parse_args() + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + os.makedirs(args.output_dir, exist_ok=True) + + # Minimal transform: resize and normalize + transform = Compose( + [ + Resize(args.img_size, args.img_size), + Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + ToTensorV2(), + ] + ) + + # Create datasets for train, val, and test + train_folder = os.path.join(args.data_path, "train") + val_folder = os.path.join(args.data_path, "val") + test_folder = os.path.join(args.data_path, "test") + from torch.utils.data import DataLoader + + train_dataset = MultiEyeSegmentationDataset(train_folder, transform=transform) + val_dataset = MultiEyeSegmentationDataset(val_folder, transform=transform) + test_dataset = MultiEyeSegmentationDataset(test_folder, transform=transform) + + train_loader = DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0 + ) + val_loader = DataLoader( + val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 + ) + test_loader = DataLoader( + test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 + ) + + model = RETFoundSegmentation( + img_size=args.img_size, + patch_size=args.patch_size, + hidden_dim=1024, + num_classes=2, + drop_path=args.drop_path, + ) + model.to(device) + + # ----- Load Pretrained RETFound Foundation Weights ----- + if args.finetune: + if os.path.exists(args.finetune): + checkpoint_path = args.finetune + print( + f"Loading pretrained weights from local checkpoint: {checkpoint_path}" + ) + else: + print(f"Downloading pretrained weights from: {args.finetune}") + checkpoint_path = hf_hub_download( + repo_id=f"YukunZhou/{args.finetune}", + filename=f"{args.finetune}.pth", + ) + checkpoint = torch.load(checkpoint_path, map_location="cpu") + if "model" in checkpoint: + pretrained_dict = checkpoint["model"] + else: + pretrained_dict = checkpoint + for k in ["head.weight", "head.bias"]: + if k in pretrained_dict: + print(f"Removing key {k} from pretrained checkpoint") + del pretrained_dict[k] + interpolate_pos_embed(model.encoder, pretrained_dict) + model.encoder.load_state_dict(pretrained_dict, strict=False) + print("Pretrained RETFound encoder weights loaded.") + + # Create weighted CrossEntropyLoss for class weighting + ce_weights = [float(x) for x in args.ce_weight.split(",")] + ce_weights_tensor = torch.tensor(ce_weights, device=device) + ce_loss_fn = nn.CrossEntropyLoss(weight=ce_weights_tensor) + + def loss_fn(outputs, targets): + return combined_loss_fn( + outputs, targets, ce_loss_fn, dice_weight=args.dice_weight + ) + + optimizer = optim.AdamW(model.parameters(), lr=args.lr) + + best_loss = float("inf") + for epoch in range(args.epochs): + train_loss = train_segmentation(model, train_loader, loss_fn, optimizer, device) + val_loss, all_preds, all_targets = evaluate_segmentation( + model, val_loader, loss_fn, device + ) + pixel_acc, dice, iou = compute_metrics(all_preds, all_targets) + print( + f"Epoch {epoch+1} - Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f}" + ) + print(f"Metrics: Pixel Acc: {pixel_acc:.4f}, Dice: {dice:.4f}, IoU: {iou:.4f}") + if val_loss < best_loss: + best_loss = val_loss + checkpoint_path = os.path.join( + args.output_dir, f"checkpoint_epoch{epoch+1}.pth" + ) + torch.save(model.state_dict(), checkpoint_path) + print(f"Saved checkpoint: {checkpoint_path}") + + # Evaluate on test set + test_loss, test_preds, test_targets = evaluate_segmentation( + model, test_loader, loss_fn, device + ) + pixel_acc, dice, iou = compute_metrics(test_preds, test_targets) + print(f"Test Loss: {test_loss:.4f}") + print(f"Test Metrics: Pixel Acc: {pixel_acc:.4f}, Dice: {dice:.4f}, IoU: {iou:.4f}") + + +if __name__ == "__main__": + main() + +# example usage: +# !python segmentation_finetune.py --data_path ./data/ --epochs 50 --batch --data_path "Data" --finetune "" --epochs 50 --batch_size 1 --lr 1e-4 --img_size 256 --patch_size 16 --drop_path 0.2 --ce_weight "0.3,0.7" --dice_weight 1.0 --output_dir "./segmentation_output/2" From 6bfad42e9d3cea5e011436af0e98dd1a6240483f Mon Sep 17 00:00:00 2001 From: MDSALMANSHAMS Date: Thu, 8 Jan 2026 15:28:22 +0530 Subject: [PATCH 2/7] Segmentation --- .gitignore | 1 + Segmentation/main.py | 0 2 files changed, 1 insertion(+) create mode 100644 Segmentation/main.py diff --git a/.gitignore b/.gitignore index 34a74493..aeeccf09 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ __pycache__/ # Distribution / packaging .Python build/ +Data develop-eggs/ dist/ downloads/ diff --git a/Segmentation/main.py b/Segmentation/main.py new file mode 100644 index 00000000..e69de29b From 08a2c6cec5d3b9f023c112cadb67419f6a5c6132 Mon Sep 17 00:00:00 2001 From: MDSALMANSHAMS Date: Thu, 8 Jan 2026 20:47:17 +0530 Subject: [PATCH 3/7] Refactor --- .gitignore | 1 + .../segmentation_finetune.py | 726 +++++++------- engine_finetune.py | 296 +++--- engine_segmentation.py | 52 + examples/RETFound_MendeleyOCT_demo.ipynb | 320 +++++++ main_finetune.py | 896 +++++++++--------- main_segmentation.py | 122 +++ models_segmentation.py | 43 + models_vit.py | 210 ++-- requirements.txt | 23 +- util/datasets.py | 162 ++-- util/lr_decay.py | 146 +-- util/lr_sched.py | 40 +- util/misc.py | 738 +++++++-------- util/pos_embed.py | 184 ++-- 15 files changed, 2249 insertions(+), 1710 deletions(-) rename segmentation_finetune.py => Segmentation/segmentation_finetune.py (97%) create mode 100644 engine_segmentation.py create mode 100644 examples/RETFound_MendeleyOCT_demo.ipynb create mode 100644 main_segmentation.py create mode 100644 models_segmentation.py diff --git a/.gitignore b/.gitignore index aeeccf09..bc0b8174 100644 --- a/.gitignore +++ b/.gitignore @@ -105,6 +105,7 @@ celerybeat.pid # Environments .env .venv +venv3.9 env/ venv/ ENV/ diff --git a/segmentation_finetune.py b/Segmentation/segmentation_finetune.py similarity index 97% rename from segmentation_finetune.py rename to Segmentation/segmentation_finetune.py index 51ce8a59..b4db1a8c 100644 --- a/segmentation_finetune.py +++ b/Segmentation/segmentation_finetune.py @@ -1,363 +1,363 @@ -import os -import argparse -import time -import logging -import numpy as np -import torch -import torch.nn as nn -import torch.optim as optim -import torch.nn.functional as F -from torch.utils.data import Dataset, DataLoader -import eyepy as ep -from albumentations import Compose, Resize, Normalize -from albumentations.pytorch import ToTensorV2 -from huggingface_hub import hf_hub_download -from util.pos_embed import interpolate_pos_embed - -# Minimal logging configuration -logging.basicConfig( - level=logging.WARNING, - format="%(asctime)s [%(levelname)s] %(message)s", - handlers=[logging.StreamHandler()], -) -logger = logging.getLogger(__name__) - - -# ------------------------------ -# Multi-eye Dataset for .eye files in a folder -# ------------------------------ -class MultiEyeSegmentationDataset(Dataset): - def __init__(self, folder, transform=None): - """ - folder: Path to a folder containing .eye files. - transform: Albumentations transform applied to both image and mask. - """ - # List all .eye files in the folder - self.eye_files = sorted( - [os.path.join(folder, f) for f in os.listdir(folder) if f.endswith(".eye")] - ) - self.transform = transform - # Build a list of (eye_file, bscan_index) tuples for all volumes - self.samples = [] - for eye_file in self.eye_files: - volume = ep.EyeVolume.load(eye_file) - num_scans = volume.shape[0] - for idx in range(num_scans): - self.samples.append((eye_file, idx)) - # Cache loaded volumes to avoid repeated disk I/O - self.cache = {} - - def __len__(self): - return len(self.samples) - - def __getitem__(self, index): - eye_file, idx = self.samples[index] - if eye_file not in self.cache: - self.cache[eye_file] = ep.EyeVolume.load(eye_file) - volume = self.cache[eye_file] - image = volume[idx].data # assume shape: (H, W) - image = np.stack([image] * 3, axis=-1) # convert grayscale to 3 channels - # Convert mask from bool to uint8 to avoid OpenCV errors - mask = volume.volume_maps["drusen"].data[idx].astype(np.uint8) # shape: (H, W) - if self.transform: - augmented = self.transform(image=image, mask=mask) - image = augmented["image"] - mask = augmented["mask"] - mask = mask.long() - else: - image = torch.tensor(image, dtype=torch.float).permute(2, 0, 1) / 255.0 - mask = torch.tensor(mask, dtype=torch.long) - return image, mask - - -# ------------------------------ -# Segmentation Model Components -# ------------------------------ -from models_vit import RETFound_mae # Ensure this import is correct - - -class SegmentationHead(nn.Module): - def __init__(self, hidden_dim, num_classes, img_size, patch_size): - super().__init__() - self.patch_size = patch_size - self.h = img_size // patch_size - self.w = img_size // patch_size - self.conv = nn.Sequential( - nn.Conv2d(hidden_dim, hidden_dim // 2, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(hidden_dim // 2, num_classes, kernel_size=1), - ) - - def forward(self, x): - # x: [B, num_tokens, hidden_dim] - B, N, C = x.shape - x = x.reshape(B, self.h, self.w, C) - x = x.permute(0, 3, 1, 2) # [B, C, h, w] - x = F.interpolate( - x, scale_factor=self.patch_size, mode="bilinear", align_corners=False - ) - x = self.conv(x) - return x - - -class RETFoundSegmentation(nn.Module): - def __init__( - self, img_size=512, patch_size=16, hidden_dim=1024, num_classes=2, drop_path=0.2 - ): - super().__init__() - self.encoder = RETFound_mae( - img_size=img_size, - num_classes=num_classes, - drop_path_rate=drop_path, - global_pool=False, - ) - self.seg_head = SegmentationHead(hidden_dim, num_classes, img_size, patch_size) - - def forward(self, x): - B = x.shape[0] - x_tokens = self.encoder.patch_embed(x) # [B, num_patches, hidden_dim] - cls_tokens = self.encoder.cls_token.expand(B, -1, -1) - x_tokens = torch.cat((cls_tokens, x_tokens), dim=1) - x_tokens = x_tokens + self.encoder.pos_embed - x_tokens = self.encoder.pos_drop(x_tokens) - for blk in self.encoder.blocks: - x_tokens = blk(x_tokens) - x_tokens = self.encoder.norm(x_tokens) - tokens = x_tokens[:, 1:] # exclude cls token - seg_map = self.seg_head(tokens) - return seg_map - - -# ------------------------------ -# Loss Functions for Imbalanced Segmentation -# ------------------------------ -def dice_loss(pred, target, smooth=1e-6): - """ - Computes Dice loss. - pred: logits [B, num_classes, H, W] - target: ground truth [B, H, W] (long) - """ - pred_soft = F.softmax(pred, dim=1) - num_classes = pred.shape[1] - target_onehot = ( - F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float() - ) - intersection = (pred_soft * target_onehot).sum(dim=(2, 3)) - union = pred_soft.sum(dim=(2, 3)) + target_onehot.sum(dim=(2, 3)) - dice = (2.0 * intersection + smooth) / (union + smooth) - return 1 - dice.mean() - - -def combined_loss_fn(outputs, targets, ce_loss_fn, dice_weight=1.0): - ce_loss = ce_loss_fn(outputs, targets) - d_loss = dice_loss(outputs, targets) - return ce_loss + dice_weight * d_loss - - -# ------------------------------ -# Metrics Function -# ------------------------------ -def compute_metrics(preds, targets, smooth=1e-6): - pixel_acc = np.mean(preds == targets) - intersection = np.sum(preds * targets) - dice = (2.0 * intersection + smooth) / (np.sum(preds) + np.sum(targets) + smooth) - union = np.sum(preds) + np.sum(targets) - intersection - iou = (intersection + smooth) / (union + smooth) - return pixel_acc, dice, iou - - -# ------------------------------ -# Training and Evaluation Loops -# ------------------------------ -def train_segmentation(model, dataloader, loss_fn, optimizer, device): - model.train() - running_loss = 0.0 - for images, masks in dataloader: - images = images.to(device) - masks = masks.to(device) - optimizer.zero_grad() - outputs = model(images) - loss = loss_fn(outputs, masks) - loss.backward() - optimizer.step() - running_loss += loss.item() * images.size(0) - return running_loss / len(dataloader.dataset) - - -def evaluate_segmentation(model, dataloader, loss_fn, device): - model.eval() - running_loss = 0.0 - all_preds = [] - all_targets = [] - with torch.no_grad(): - for images, masks in dataloader: - images = images.to(device) - masks = masks.to(device) - outputs = model(images) - loss = loss_fn(outputs, masks) - running_loss += loss.item() * images.size(0) - preds = outputs.argmax(dim=1) - all_preds.append(preds.cpu()) - all_targets.append(masks.cpu()) - epoch_loss = running_loss / len(dataloader.dataset) - all_preds = torch.cat(all_preds, dim=0).numpy() - all_targets = torch.cat(all_targets, dim=0).numpy() - return epoch_loss, all_preds, all_targets - - -# ------------------------------ -# Main Training Script -# ------------------------------ -def main(): - parser = argparse.ArgumentParser(description="RETFound Segmentation Fine-tuning") - parser.add_argument( - "--data_path", - type=str, - required=True, - help="Base dataset directory with subfolders: train, val, test", - ) - parser.add_argument("--epochs", type=int, default=50, help="Number of epochs") - parser.add_argument("--batch_size", type=int, default=4, help="Batch size") - parser.add_argument("--lr", type=float, default=1e-4, help="Learning rate") - parser.add_argument( - "--img_size", type=int, default=512, help="Input image size (square)" - ) - parser.add_argument( - "--patch_size", type=int, default=16, help="Patch size used by the encoder" - ) - parser.add_argument("--drop_path", type=float, default=0.2, help="Drop path rate") - parser.add_argument( - "--output_dir", - type=str, - default="./segmentation_output", - help="Directory to save model checkpoints", - ) - parser.add_argument( - "--finetune", - type=str, - default="", - help="Path to pretrained RETFound checkpoint (or repo name for download)", - ) - parser.add_argument( - "--dice_weight", type=float, default=1.0, help="Weight for dice loss term" - ) - parser.add_argument( - "--ce_weight", - type=str, - default="0.3,0.7", - help="Comma-separated weights for cross entropy loss (background, drusen)", - ) - args = parser.parse_args() - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - os.makedirs(args.output_dir, exist_ok=True) - - # Minimal transform: resize and normalize - transform = Compose( - [ - Resize(args.img_size, args.img_size), - Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), - ToTensorV2(), - ] - ) - - # Create datasets for train, val, and test - train_folder = os.path.join(args.data_path, "train") - val_folder = os.path.join(args.data_path, "val") - test_folder = os.path.join(args.data_path, "test") - from torch.utils.data import DataLoader - - train_dataset = MultiEyeSegmentationDataset(train_folder, transform=transform) - val_dataset = MultiEyeSegmentationDataset(val_folder, transform=transform) - test_dataset = MultiEyeSegmentationDataset(test_folder, transform=transform) - - train_loader = DataLoader( - train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0 - ) - val_loader = DataLoader( - val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 - ) - test_loader = DataLoader( - test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 - ) - - model = RETFoundSegmentation( - img_size=args.img_size, - patch_size=args.patch_size, - hidden_dim=1024, - num_classes=2, - drop_path=args.drop_path, - ) - model.to(device) - - # ----- Load Pretrained RETFound Foundation Weights ----- - if args.finetune: - if os.path.exists(args.finetune): - checkpoint_path = args.finetune - print( - f"Loading pretrained weights from local checkpoint: {checkpoint_path}" - ) - else: - print(f"Downloading pretrained weights from: {args.finetune}") - checkpoint_path = hf_hub_download( - repo_id=f"YukunZhou/{args.finetune}", - filename=f"{args.finetune}.pth", - ) - checkpoint = torch.load(checkpoint_path, map_location="cpu") - if "model" in checkpoint: - pretrained_dict = checkpoint["model"] - else: - pretrained_dict = checkpoint - for k in ["head.weight", "head.bias"]: - if k in pretrained_dict: - print(f"Removing key {k} from pretrained checkpoint") - del pretrained_dict[k] - interpolate_pos_embed(model.encoder, pretrained_dict) - model.encoder.load_state_dict(pretrained_dict, strict=False) - print("Pretrained RETFound encoder weights loaded.") - - # Create weighted CrossEntropyLoss for class weighting - ce_weights = [float(x) for x in args.ce_weight.split(",")] - ce_weights_tensor = torch.tensor(ce_weights, device=device) - ce_loss_fn = nn.CrossEntropyLoss(weight=ce_weights_tensor) - - def loss_fn(outputs, targets): - return combined_loss_fn( - outputs, targets, ce_loss_fn, dice_weight=args.dice_weight - ) - - optimizer = optim.AdamW(model.parameters(), lr=args.lr) - - best_loss = float("inf") - for epoch in range(args.epochs): - train_loss = train_segmentation(model, train_loader, loss_fn, optimizer, device) - val_loss, all_preds, all_targets = evaluate_segmentation( - model, val_loader, loss_fn, device - ) - pixel_acc, dice, iou = compute_metrics(all_preds, all_targets) - print( - f"Epoch {epoch+1} - Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f}" - ) - print(f"Metrics: Pixel Acc: {pixel_acc:.4f}, Dice: {dice:.4f}, IoU: {iou:.4f}") - if val_loss < best_loss: - best_loss = val_loss - checkpoint_path = os.path.join( - args.output_dir, f"checkpoint_epoch{epoch+1}.pth" - ) - torch.save(model.state_dict(), checkpoint_path) - print(f"Saved checkpoint: {checkpoint_path}") - - # Evaluate on test set - test_loss, test_preds, test_targets = evaluate_segmentation( - model, test_loader, loss_fn, device - ) - pixel_acc, dice, iou = compute_metrics(test_preds, test_targets) - print(f"Test Loss: {test_loss:.4f}") - print(f"Test Metrics: Pixel Acc: {pixel_acc:.4f}, Dice: {dice:.4f}, IoU: {iou:.4f}") - - -if __name__ == "__main__": - main() - -# example usage: -# !python segmentation_finetune.py --data_path ./data/ --epochs 50 --batch --data_path "Data" --finetune "" --epochs 50 --batch_size 1 --lr 1e-4 --img_size 256 --patch_size 16 --drop_path 0.2 --ce_weight "0.3,0.7" --dice_weight 1.0 --output_dir "./segmentation_output/2" +import os +import argparse +import time +import logging +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +from torch.utils.data import Dataset, DataLoader +import eyepy as ep +from albumentations import Compose, Resize, Normalize +from albumentations.pytorch import ToTensorV2 +from huggingface_hub import hf_hub_download +from util.pos_embed import interpolate_pos_embed + +# Minimal logging configuration +logging.basicConfig( + level=logging.WARNING, + format="%(asctime)s [%(levelname)s] %(message)s", + handlers=[logging.StreamHandler()], +) +logger = logging.getLogger(__name__) + + +# ------------------------------ +# Multi-eye Dataset for .eye files in a folder +# ------------------------------ +class MultiEyeSegmentationDataset(Dataset): + def __init__(self, folder, transform=None): + """ + folder: Path to a folder containing .eye files. + transform: Albumentations transform applied to both image and mask. + """ + # List all .eye files in the folder + self.eye_files = sorted( + [os.path.join(folder, f) for f in os.listdir(folder) if f.endswith(".eye")] + ) + self.transform = transform + # Build a list of (eye_file, bscan_index) tuples for all volumes + self.samples = [] + for eye_file in self.eye_files: + volume = ep.EyeVolume.load(eye_file) + num_scans = volume.shape[0] + for idx in range(num_scans): + self.samples.append((eye_file, idx)) + # Cache loaded volumes to avoid repeated disk I/O + self.cache = {} + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + eye_file, idx = self.samples[index] + if eye_file not in self.cache: + self.cache[eye_file] = ep.EyeVolume.load(eye_file) + volume = self.cache[eye_file] + image = volume[idx].data # assume shape: (H, W) + image = np.stack([image] * 3, axis=-1) # convert grayscale to 3 channels + # Convert mask from bool to uint8 to avoid OpenCV errors + mask = volume.volume_maps["drusen"].data[idx].astype(np.uint8) # shape: (H, W) + if self.transform: + augmented = self.transform(image=image, mask=mask) + image = augmented["image"] + mask = augmented["mask"] + mask = mask.long() + else: + image = torch.tensor(image, dtype=torch.float).permute(2, 0, 1) / 255.0 + mask = torch.tensor(mask, dtype=torch.long) + return image, mask + + +# ------------------------------ +# Segmentation Model Components +# ------------------------------ +from models_vit import RETFound_mae # Ensure this import is correct + + +class SegmentationHead(nn.Module): + def __init__(self, hidden_dim, num_classes, img_size, patch_size): + super().__init__() + self.patch_size = patch_size + self.h = img_size // patch_size + self.w = img_size // patch_size + self.conv = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim // 2, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_dim // 2, num_classes, kernel_size=1), + ) + + def forward(self, x): + # x: [B, num_tokens, hidden_dim] + B, N, C = x.shape + x = x.reshape(B, self.h, self.w, C) + x = x.permute(0, 3, 1, 2) # [B, C, h, w] + x = F.interpolate( + x, scale_factor=self.patch_size, mode="bilinear", align_corners=False + ) + x = self.conv(x) + return x + + +class RETFoundSegmentation(nn.Module): + def __init__( + self, img_size=512, patch_size=16, hidden_dim=1024, num_classes=2, drop_path=0.2 + ): + super().__init__() + self.encoder = RETFound_mae( + img_size=img_size, + num_classes=num_classes, + drop_path_rate=drop_path, + global_pool=False, + ) + self.seg_head = SegmentationHead(hidden_dim, num_classes, img_size, patch_size) + + def forward(self, x): + B = x.shape[0] + x_tokens = self.encoder.patch_embed(x) # [B, num_patches, hidden_dim] + cls_tokens = self.encoder.cls_token.expand(B, -1, -1) + x_tokens = torch.cat((cls_tokens, x_tokens), dim=1) + x_tokens = x_tokens + self.encoder.pos_embed + x_tokens = self.encoder.pos_drop(x_tokens) + for blk in self.encoder.blocks: + x_tokens = blk(x_tokens) + x_tokens = self.encoder.norm(x_tokens) + tokens = x_tokens[:, 1:] # exclude cls token + seg_map = self.seg_head(tokens) + return seg_map + + +# ------------------------------ +# Loss Functions for Imbalanced Segmentation +# ------------------------------ +def dice_loss(pred, target, smooth=1e-6): + """ + Computes Dice loss. + pred: logits [B, num_classes, H, W] + target: ground truth [B, H, W] (long) + """ + pred_soft = F.softmax(pred, dim=1) + num_classes = pred.shape[1] + target_onehot = ( + F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float() + ) + intersection = (pred_soft * target_onehot).sum(dim=(2, 3)) + union = pred_soft.sum(dim=(2, 3)) + target_onehot.sum(dim=(2, 3)) + dice = (2.0 * intersection + smooth) / (union + smooth) + return 1 - dice.mean() + + +def combined_loss_fn(outputs, targets, ce_loss_fn, dice_weight=1.0): + ce_loss = ce_loss_fn(outputs, targets) + d_loss = dice_loss(outputs, targets) + return ce_loss + dice_weight * d_loss + + +# ------------------------------ +# Metrics Function +# ------------------------------ +def compute_metrics(preds, targets, smooth=1e-6): + pixel_acc = np.mean(preds == targets) + intersection = np.sum(preds * targets) + dice = (2.0 * intersection + smooth) / (np.sum(preds) + np.sum(targets) + smooth) + union = np.sum(preds) + np.sum(targets) - intersection + iou = (intersection + smooth) / (union + smooth) + return pixel_acc, dice, iou + + +# ------------------------------ +# Training and Evaluation Loops +# ------------------------------ +def train_segmentation(model, dataloader, loss_fn, optimizer, device): + model.train() + running_loss = 0.0 + for images, masks in dataloader: + images = images.to(device) + masks = masks.to(device) + optimizer.zero_grad() + outputs = model(images) + loss = loss_fn(outputs, masks) + loss.backward() + optimizer.step() + running_loss += loss.item() * images.size(0) + return running_loss / len(dataloader.dataset) + + +def evaluate_segmentation(model, dataloader, loss_fn, device): + model.eval() + running_loss = 0.0 + all_preds = [] + all_targets = [] + with torch.no_grad(): + for images, masks in dataloader: + images = images.to(device) + masks = masks.to(device) + outputs = model(images) + loss = loss_fn(outputs, masks) + running_loss += loss.item() * images.size(0) + preds = outputs.argmax(dim=1) + all_preds.append(preds.cpu()) + all_targets.append(masks.cpu()) + epoch_loss = running_loss / len(dataloader.dataset) + all_preds = torch.cat(all_preds, dim=0).numpy() + all_targets = torch.cat(all_targets, dim=0).numpy() + return epoch_loss, all_preds, all_targets + + +# ------------------------------ +# Main Training Script +# ------------------------------ +def main(): + parser = argparse.ArgumentParser(description="RETFound Segmentation Fine-tuning") + parser.add_argument( + "--data_path", + type=str, + required=True, + help="Base dataset directory with subfolders: train, val, test", + ) + parser.add_argument("--epochs", type=int, default=50, help="Number of epochs") + parser.add_argument("--batch_size", type=int, default=4, help="Batch size") + parser.add_argument("--lr", type=float, default=1e-4, help="Learning rate") + parser.add_argument( + "--img_size", type=int, default=512, help="Input image size (square)" + ) + parser.add_argument( + "--patch_size", type=int, default=16, help="Patch size used by the encoder" + ) + parser.add_argument("--drop_path", type=float, default=0.2, help="Drop path rate") + parser.add_argument( + "--output_dir", + type=str, + default="./segmentation_output", + help="Directory to save model checkpoints", + ) + parser.add_argument( + "--finetune", + type=str, + default="", + help="Path to pretrained RETFound checkpoint (or repo name for download)", + ) + parser.add_argument( + "--dice_weight", type=float, default=1.0, help="Weight for dice loss term" + ) + parser.add_argument( + "--ce_weight", + type=str, + default="0.3,0.7", + help="Comma-separated weights for cross entropy loss (background, drusen)", + ) + args = parser.parse_args() + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + os.makedirs(args.output_dir, exist_ok=True) + + # Minimal transform: resize and normalize + transform = Compose( + [ + Resize(args.img_size, args.img_size), + Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + ToTensorV2(), + ] + ) + + # Create datasets for train, val, and test + train_folder = os.path.join(args.data_path, "train") + val_folder = os.path.join(args.data_path, "val") + test_folder = os.path.join(args.data_path, "test") + from torch.utils.data import DataLoader + + train_dataset = MultiEyeSegmentationDataset(train_folder, transform=transform) + val_dataset = MultiEyeSegmentationDataset(val_folder, transform=transform) + test_dataset = MultiEyeSegmentationDataset(test_folder, transform=transform) + + train_loader = DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0 + ) + val_loader = DataLoader( + val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 + ) + test_loader = DataLoader( + test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 + ) + + model = RETFoundSegmentation( + img_size=args.img_size, + patch_size=args.patch_size, + hidden_dim=1024, + num_classes=2, + drop_path=args.drop_path, + ) + model.to(device) + + # ----- Load Pretrained RETFound Foundation Weights ----- + if args.finetune: + if os.path.exists(args.finetune): + checkpoint_path = args.finetune + print( + f"Loading pretrained weights from local checkpoint: {checkpoint_path}" + ) + else: + print(f"Downloading pretrained weights from: {args.finetune}") + checkpoint_path = hf_hub_download( + repo_id=f"YukunZhou/{args.finetune}", + filename=f"{args.finetune}.pth", + ) + checkpoint = torch.load(checkpoint_path, map_location="cpu") + if "model" in checkpoint: + pretrained_dict = checkpoint["model"] + else: + pretrained_dict = checkpoint + for k in ["head.weight", "head.bias"]: + if k in pretrained_dict: + print(f"Removing key {k} from pretrained checkpoint") + del pretrained_dict[k] + interpolate_pos_embed(model.encoder, pretrained_dict) + model.encoder.load_state_dict(pretrained_dict, strict=False) + print("Pretrained RETFound encoder weights loaded.") + + # Create weighted CrossEntropyLoss for class weighting + ce_weights = [float(x) for x in args.ce_weight.split(",")] + ce_weights_tensor = torch.tensor(ce_weights, device=device) + ce_loss_fn = nn.CrossEntropyLoss(weight=ce_weights_tensor) + + def loss_fn(outputs, targets): + return combined_loss_fn( + outputs, targets, ce_loss_fn, dice_weight=args.dice_weight + ) + + optimizer = optim.AdamW(model.parameters(), lr=args.lr) + + best_loss = float("inf") + for epoch in range(args.epochs): + train_loss = train_segmentation(model, train_loader, loss_fn, optimizer, device) + val_loss, all_preds, all_targets = evaluate_segmentation( + model, val_loader, loss_fn, device + ) + pixel_acc, dice, iou = compute_metrics(all_preds, all_targets) + print( + f"Epoch {epoch+1} - Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f}" + ) + print(f"Metrics: Pixel Acc: {pixel_acc:.4f}, Dice: {dice:.4f}, IoU: {iou:.4f}") + if val_loss < best_loss: + best_loss = val_loss + checkpoint_path = os.path.join( + args.output_dir, f"checkpoint_epoch{epoch+1}.pth" + ) + torch.save(model.state_dict(), checkpoint_path) + print(f"Saved checkpoint: {checkpoint_path}") + + # Evaluate on test set + test_loss, test_preds, test_targets = evaluate_segmentation( + model, test_loader, loss_fn, device + ) + pixel_acc, dice, iou = compute_metrics(test_preds, test_targets) + print(f"Test Loss: {test_loss:.4f}") + print(f"Test Metrics: Pixel Acc: {pixel_acc:.4f}, Dice: {dice:.4f}, IoU: {iou:.4f}") + + +if __name__ == "__main__": + main() + +# example usage: +# !python segmentation_finetune.py --data_path ./data/ --epochs 50 --batch --data_path "Data" --finetune "" --epochs 50 --batch_size 1 --lr 1e-4 --img_size 256 --patch_size 16 --drop_path 0.2 --ce_weight "0.3,0.7" --dice_weight 1.0 --output_dir "./segmentation_output/2" diff --git a/engine_finetune.py b/engine_finetune.py index fe60d442..2cc37f33 100644 --- a/engine_finetune.py +++ b/engine_finetune.py @@ -1,148 +1,148 @@ -import os -import csv -import torch -import torch.nn as nn -import torch.nn.functional as F -import numpy as np -import matplotlib.pyplot as plt -from typing import Iterable, Optional -from timm.data import Mixup -from timm.utils import accuracy -from sklearn.metrics import ( - accuracy_score, roc_auc_score, f1_score, average_precision_score, - hamming_loss, jaccard_score, recall_score, precision_score, cohen_kappa_score -) -from pycm import ConfusionMatrix -import util.misc as misc -import util.lr_sched as lr_sched - -def train_one_epoch( - model: torch.nn.Module, - criterion: torch.nn.Module, - data_loader: Iterable, - optimizer: torch.optim.Optimizer, - device: torch.device, - epoch: int, - loss_scaler, - max_norm: float = 0, - mixup_fn: Optional[Mixup] = None, - log_writer=None, - args=None -): - """Train the model for one epoch.""" - model.train(True) - metric_logger = misc.MetricLogger(delimiter=" ") - metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) - print_freq, accum_iter = 20, args.accum_iter - optimizer.zero_grad() - - if log_writer: - print(f'log_dir: {log_writer.log_dir}') - - for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, f'Epoch: [{epoch}]')): - if data_iter_step % accum_iter == 0: - lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args) - - samples, targets = samples.to(device, non_blocking=True), targets.to(device, non_blocking=True) - if mixup_fn: - samples, targets = mixup_fn(samples, targets) - - with torch.cuda.amp.autocast(): - outputs = model(samples) - loss = criterion(outputs, targets) - loss_value = loss.item() - loss /= accum_iter - - loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=False, - update_grad=(data_iter_step + 1) % accum_iter == 0) - if (data_iter_step + 1) % accum_iter == 0: - optimizer.zero_grad() - - torch.cuda.synchronize() - metric_logger.update(loss=loss_value) - min_lr = 10. - max_lr = 0. - for group in optimizer.param_groups: - min_lr = min(min_lr, group["lr"]) - max_lr = max(max_lr, group["lr"]) - - metric_logger.update(lr=max_lr) - - loss_value_reduce = misc.all_reduce_mean(loss_value) - if log_writer is not None and (data_iter_step + 1) % accum_iter == 0: - """ We use epoch_1000x as the x-axis in tensorboard. - This calibrates different curves when batch size changes. - """ - epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000) - log_writer.add_scalar('loss/train', loss_value_reduce, epoch_1000x) - log_writer.add_scalar('lr', max_lr, epoch_1000x) - - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger) - return {k: meter.global_avg for k, meter in metric_logger.meters.items()} - -@torch.no_grad() -def evaluate(data_loader, model, device, args, epoch, mode, num_class, log_writer): - """Evaluate the model.""" - criterion = nn.CrossEntropyLoss() - metric_logger = misc.MetricLogger(delimiter=" ") - os.makedirs(os.path.join(args.output_dir, args.task), exist_ok=True) - - model.eval() - true_onehot, pred_onehot, true_labels, pred_labels, pred_softmax = [], [], [], [], [] - - for batch in metric_logger.log_every(data_loader, 10, f'{mode}:'): - images, target = batch[0].to(device, non_blocking=True), batch[-1].to(device, non_blocking=True) - target_onehot = F.one_hot(target.to(torch.int64), num_classes=num_class) - - with torch.cuda.amp.autocast(): - output = model(images) - loss = criterion(output, target) - output_ = nn.Softmax(dim=1)(output) - output_label = output_.argmax(dim=1) - output_onehot = F.one_hot(output_label.to(torch.int64), num_classes=num_class) - - metric_logger.update(loss=loss.item()) - true_onehot.extend(target_onehot.cpu().numpy()) - pred_onehot.extend(output_onehot.detach().cpu().numpy()) - true_labels.extend(target.cpu().numpy()) - pred_labels.extend(output_label.detach().cpu().numpy()) - pred_softmax.extend(output_.detach().cpu().numpy()) - - accuracy = accuracy_score(true_labels, pred_labels) - hamming = hamming_loss(true_onehot, pred_onehot) - jaccard = jaccard_score(true_onehot, pred_onehot, average='macro') - average_precision = average_precision_score(true_onehot, pred_softmax, average='macro') - kappa = cohen_kappa_score(true_labels, pred_labels) - f1 = f1_score(true_onehot, pred_onehot, zero_division=0, average='macro') - roc_auc = roc_auc_score(true_onehot, pred_softmax, multi_class='ovr', average='macro') - precision = precision_score(true_onehot, pred_onehot, zero_division=0, average='macro') - recall = recall_score(true_onehot, pred_onehot, zero_division=0, average='macro') - - score = (f1 + roc_auc + kappa) / 3 - if log_writer: - for metric_name, value in zip(['accuracy', 'f1', 'roc_auc', 'hamming', 'jaccard', 'precision', 'recall', 'average_precision', 'kappa', 'score'], - [accuracy, f1, roc_auc, hamming, jaccard, precision, recall, average_precision, kappa, score]): - log_writer.add_scalar(f'perf/{metric_name}', value, epoch) - - print(f'val loss: {metric_logger.meters["loss"].global_avg}') - print(f'Accuracy: {accuracy:.4f}, F1 Score: {f1:.4f}, ROC AUC: {roc_auc:.4f}, Hamming Loss: {hamming:.4f},\n' - f' Jaccard Score: {jaccard:.4f}, Precision: {precision:.4f}, Recall: {recall:.4f},\n' - f' Average Precision: {average_precision:.4f}, Kappa: {kappa:.4f}, Score: {score:.4f}') - - metric_logger.synchronize_between_processes() - - results_path = os.path.join(args.output_dir, args.task, f'metrics_{mode}.csv') - file_exists = os.path.isfile(results_path) - with open(results_path, 'a', newline='', encoding='utf8') as cfa: - wf = csv.writer(cfa) - if not file_exists: - wf.writerow(['val_loss', 'accuracy', 'f1', 'roc_auc', 'hamming', 'jaccard', 'precision', 'recall', 'average_precision', 'kappa']) - wf.writerow([metric_logger.meters["loss"].global_avg, accuracy, f1, roc_auc, hamming, jaccard, precision, recall, average_precision, kappa]) - - if mode == 'test': - cm = ConfusionMatrix(actual_vector=true_labels, predict_vector=pred_labels) - cm.plot(cmap=plt.cm.Blues, number_label=True, normalized=True, plot_lib="matplotlib") - plt.savefig(os.path.join(args.output_dir, args.task, 'confusion_matrix_test.jpg'), dpi=600, bbox_inches='tight') - - return {k: meter.global_avg for k, meter in metric_logger.meters.items()}, score +import os +import csv +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +import matplotlib.pyplot as plt +from typing import Iterable, Optional +from timm.data import Mixup +from timm.utils import accuracy +from sklearn.metrics import ( + accuracy_score, roc_auc_score, f1_score, average_precision_score, + hamming_loss, jaccard_score, recall_score, precision_score, cohen_kappa_score +) +from pycm import ConfusionMatrix +import util.misc as misc +import util.lr_sched as lr_sched + +def train_one_epoch( + model: torch.nn.Module, + criterion: torch.nn.Module, + data_loader: Iterable, + optimizer: torch.optim.Optimizer, + device: torch.device, + epoch: int, + loss_scaler, + max_norm: float = 0, + mixup_fn: Optional[Mixup] = None, + log_writer=None, + args=None +): + """Train the model for one epoch.""" + model.train(True) + metric_logger = misc.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) + print_freq, accum_iter = 20, args.accum_iter + optimizer.zero_grad() + + if log_writer: + print(f'log_dir: {log_writer.log_dir}') + + for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, f'Epoch: [{epoch}]')): + if data_iter_step % accum_iter == 0: + lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args) + + samples, targets = samples.to(device, non_blocking=True), targets.to(device, non_blocking=True) + if mixup_fn: + samples, targets = mixup_fn(samples, targets) + + with torch.cuda.amp.autocast(): + outputs = model(samples) + loss = criterion(outputs, targets) + loss_value = loss.item() + loss /= accum_iter + + loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=False, + update_grad=(data_iter_step + 1) % accum_iter == 0) + if (data_iter_step + 1) % accum_iter == 0: + optimizer.zero_grad() + + torch.cuda.synchronize() + metric_logger.update(loss=loss_value) + min_lr = 10. + max_lr = 0. + for group in optimizer.param_groups: + min_lr = min(min_lr, group["lr"]) + max_lr = max(max_lr, group["lr"]) + + metric_logger.update(lr=max_lr) + + loss_value_reduce = misc.all_reduce_mean(loss_value) + if log_writer is not None and (data_iter_step + 1) % accum_iter == 0: + """ We use epoch_1000x as the x-axis in tensorboard. + This calibrates different curves when batch size changes. + """ + epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000) + log_writer.add_scalar('loss/train', loss_value_reduce, epoch_1000x) + log_writer.add_scalar('lr', max_lr, epoch_1000x) + + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + +@torch.no_grad() +def evaluate(data_loader, model, device, args, epoch, mode, num_class, log_writer): + """Evaluate the model.""" + criterion = nn.CrossEntropyLoss() + metric_logger = misc.MetricLogger(delimiter=" ") + os.makedirs(os.path.join(args.output_dir, args.task), exist_ok=True) + + model.eval() + true_onehot, pred_onehot, true_labels, pred_labels, pred_softmax = [], [], [], [], [] + + for batch in metric_logger.log_every(data_loader, 10, f'{mode}:'): + images, target = batch[0].to(device, non_blocking=True), batch[-1].to(device, non_blocking=True) + target_onehot = F.one_hot(target.to(torch.int64), num_classes=num_class) + + with torch.cuda.amp.autocast(): + output = model(images) + loss = criterion(output, target) + output_ = nn.Softmax(dim=1)(output) + output_label = output_.argmax(dim=1) + output_onehot = F.one_hot(output_label.to(torch.int64), num_classes=num_class) + + metric_logger.update(loss=loss.item()) + true_onehot.extend(target_onehot.cpu().numpy()) + pred_onehot.extend(output_onehot.detach().cpu().numpy()) + true_labels.extend(target.cpu().numpy()) + pred_labels.extend(output_label.detach().cpu().numpy()) + pred_softmax.extend(output_.detach().cpu().numpy()) + + accuracy = accuracy_score(true_labels, pred_labels) + hamming = hamming_loss(true_onehot, pred_onehot) + jaccard = jaccard_score(true_onehot, pred_onehot, average='macro') + average_precision = average_precision_score(true_onehot, pred_softmax, average='macro') + kappa = cohen_kappa_score(true_labels, pred_labels) + f1 = f1_score(true_onehot, pred_onehot, zero_division=0, average='macro') + roc_auc = roc_auc_score(true_onehot, pred_softmax, multi_class='ovr', average='macro') + precision = precision_score(true_onehot, pred_onehot, zero_division=0, average='macro') + recall = recall_score(true_onehot, pred_onehot, zero_division=0, average='macro') + + score = (f1 + roc_auc + kappa) / 3 + if log_writer: + for metric_name, value in zip(['accuracy', 'f1', 'roc_auc', 'hamming', 'jaccard', 'precision', 'recall', 'average_precision', 'kappa', 'score'], + [accuracy, f1, roc_auc, hamming, jaccard, precision, recall, average_precision, kappa, score]): + log_writer.add_scalar(f'perf/{metric_name}', value, epoch) + + print(f'val loss: {metric_logger.meters["loss"].global_avg}') + print(f'Accuracy: {accuracy:.4f}, F1 Score: {f1:.4f}, ROC AUC: {roc_auc:.4f}, Hamming Loss: {hamming:.4f},\n' + f' Jaccard Score: {jaccard:.4f}, Precision: {precision:.4f}, Recall: {recall:.4f},\n' + f' Average Precision: {average_precision:.4f}, Kappa: {kappa:.4f}, Score: {score:.4f}') + + metric_logger.synchronize_between_processes() + + results_path = os.path.join(args.output_dir, args.task, f'metrics_{mode}.csv') + file_exists = os.path.isfile(results_path) + with open(results_path, 'a', newline='', encoding='utf8') as cfa: + wf = csv.writer(cfa) + if not file_exists: + wf.writerow(['val_loss', 'accuracy', 'f1', 'roc_auc', 'hamming', 'jaccard', 'precision', 'recall', 'average_precision', 'kappa']) + wf.writerow([metric_logger.meters["loss"].global_avg, accuracy, f1, roc_auc, hamming, jaccard, precision, recall, average_precision, kappa]) + + if mode == 'test': + cm = ConfusionMatrix(actual_vector=true_labels, predict_vector=pred_labels) + cm.plot(cmap=plt.cm.Blues, number_label=True, normalized=True, plot_lib="matplotlib") + plt.savefig(os.path.join(args.output_dir, args.task, 'confusion_matrix_test.jpg'), dpi=600, bbox_inches='tight') + + return {k: meter.global_avg for k, meter in metric_logger.meters.items()}, score diff --git a/engine_segmentation.py b/engine_segmentation.py new file mode 100644 index 00000000..18a13dd7 --- /dev/null +++ b/engine_segmentation.py @@ -0,0 +1,52 @@ +import torch +import torch.nn.functional as F +import numpy as np + + +def dice_loss(pred, target, smooth=1e-6): + pred = F.softmax(pred, dim=1) + num_classes = pred.shape[1] + target_oh = F.one_hot(target, num_classes).permute(0, 3, 1, 2).float() + inter = (pred * target_oh).sum((2, 3)) + union = pred.sum((2, 3)) + target_oh.sum((2, 3)) + return 1 - ((2 * inter + smooth) / (union + smooth)).mean() + + +def combined_loss_fn(outputs, targets, ce_fn, dice_w=1.0): + return ce_fn(outputs, targets) + dice_w * dice_loss(outputs, targets) + + +def compute_metrics(preds, targets, smooth=1e-6): + pixel_acc = (preds == targets).mean() + inter = (preds & targets).sum() + dice = (2 * inter + smooth) / (preds.sum() + targets.sum() + smooth) + union = preds.sum() + targets.sum() - inter + iou = (inter + smooth) / (union + smooth) + return pixel_acc, dice, iou + + +def train_segmentation(model, loader, loss_fn, optimizer, device): + model.train() + total = 0 + for x, y in loader: + x, y = x.to(device), y.to(device) + optimizer.zero_grad() + loss = loss_fn(model(x), y) + loss.backward() + optimizer.step() + total += loss.item() * x.size(0) + return total / len(loader.dataset) + + +@torch.no_grad() +def evaluate_segmentation(model, loader, loss_fn, device): + model.eval() + total, P, T = 0, [], [] + for x, y in loader: + x, y = x.to(device), y.to(device) + out = model(x) + loss = loss_fn(out, y) + total += loss.item() * x.size(0) + P.append(out.argmax(1).cpu()) + T.append(y.cpu()) + return total / len(loader.dataset), torch.cat(P).numpy(), torch.cat(T).numpy() diff --git a/examples/RETFound_MendeleyOCT_demo.ipynb b/examples/RETFound_MendeleyOCT_demo.ipynb new file mode 100644 index 00000000..677e460b --- /dev/null +++ b/examples/RETFound_MendeleyOCT_demo.ipynb @@ -0,0 +1,320 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "76b39fb1", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "## Jupyter notebook example - Segementation task\n", + "### Example using [MendeleyOCT](https://data.mendeley.com/datasets/rscbjbr9sj/2) dataset\n", + "**Application**: Using RETFound for Drusen segmentation\n", + "\n", + "**Author**: Yukun Zhou, Salman Shams\n", + "\n", + "**Date**: 08 Jan 2026\n", + "\n", + "**Performance**:\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "
AccuracyRecallF1 ScoreROC AUCPR AUC
0.70910.56160.60780.90370.6863
\n" + ] + }, + { + "cell_type": "markdown", + "id": "7ec435a7", + "metadata": {}, + "source": [ + "## 1. Install environment\n", + "1. Follow [RETFound README](https://github.com/rmaphoh/RETFound) to install environment\n", + "2. Restart this Jupyter Notebook\n", + "3. Select Kernel retfound" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7cbf5e93-6ca0-4401-88e6-64e39968e7cd", + "metadata": {}, + "outputs": [], + "source": [ + "import sys, torch\n", + "from pathlib import Path\n", + "import os\n", + "\n", + "PROJECT_ROOT = Path.cwd().resolve()\n", + "\n", + "if PROJECT_ROOT.name == 'examples': PROJECT_ROOT = PROJECT_ROOT.parent\n", + "os.chdir(PROJECT_ROOT)\n", + "\n", + "print('Project root:', PROJECT_ROOT)\n", + "print(\"sys.executable:\", sys.executable)\n", + "print(\"torch version:\", torch.__version__)" + ] + }, + { + "cell_type": "markdown", + "id": "ed67953f", + "metadata": {}, + "source": [ + "## 2. Prepare MendeleyOCT dataset\n", + "1. Download from the [shared data pool](https://github.com/rmaphoh/RETFound/blob/main/BENCHMARK.md).\n", + "2. Put the data folder under the project directory, e.g. \"RETFound/MESSIDOR2\"\n" + ] + }, + { + "cell_type": "markdown", + "id": "357be2fa-a914-4d1f-8759-76b2b1c3f20f", + "metadata": {}, + "source": [ + "## 3. Hyperparameter and path settings\n", + "1. Can choose finetune or lp (linear probe)\n", + "2. Model selection [info](https://github.com/rmaphoh/RETFound#:~:text=In%20train.sh%2C%20the%20model%20can%20be%20selected%20by%20changing%20the%20hyperparameters%20MODEL%2C%20MODEL_ARCH%2C%20FINETUNE%3A)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f675843", + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "ADAPTATION='finetune'\n", + "MODEL='RETFound_dinov2'\n", + "MODEL_ARCH='retfound_dinov2'\n", + "FINETUNE='RETFound_dinov2_meh'\n", + "DATASET='MESSIDOR2'\n", + "NUM_CLASS=5\n", + "DATA_PATH=PROJECT_ROOT/DATASET\n", + "BATCH_SIZE=24\n", + "EPOCHS=50\n", + "INPUT_SIZE=224\n", + "WORLD_SIZE=1\n", + "TASK=f\"{MODEL_ARCH}_{DATASET}_{ADAPTATION}\"\n", + "OUTPUT_DIR=PROJECT_ROOT/'output_dir'/TASK\n", + "print('DATA_PATH:',DATA_PATH)\n", + "print('TASK:',TASK)\n", + "print('OUTPUT_DIR:',OUTPUT_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa3d8d10", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "6ac04845", + "metadata": {}, + "source": [ + "## 4. Fine-tuning and testing RETFound on MESSIDOR2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d23ff751", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "!{sys.executable} main_finetune.py \\\n", + " --model {MODEL} \\\n", + " --model_arch {MODEL_ARCH} \\\n", + " --finetune {FINETUNE} \\\n", + " --savemodel \\\n", + " --global_pool \\\n", + " --batch_size {BATCH_SIZE} \\\n", + " --epochs {EPOCHS} \\\n", + " --nb_classes {NUM_CLASS} \\\n", + " --data_path {DATA_PATH} \\\n", + " --input_size {INPUT_SIZE} \\\n", + " --task {TASK} \\\n", + " --adaptation {ADAPTATION}" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "b55116e5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "F:\\GitHub\\RETFound\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\IPython\\core\\magics\\osm.py:417: UserWarning: using dhist requires you to install the `pickleshare` library.\n", + " self.shell.db['dhist'] = compress_dhist(dhist)[-100:]\n" + ] + } + ], + "source": [ + "%cd F:\\GitHub\\RETFound" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "374fdce3", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\albumentations\\__init__.py:24: UserWarning: A new version of Albumentations is available: 2.0.8 (you have 1.4.24). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n", + " check_for_updates()\n", + "Traceback (most recent call last):\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\utils\\_errors.py\", line 304, in hf_raise_for_status\n", + " response.raise_for_status()\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\requests\\models.py\", line 1026, in raise_for_status\n", + " raise HTTPError(http_error_msg, response=self)\n", + "requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://huggingface.co/YukunZhou/RETFound_OCT/resolve/main/RETFound_OCT.pth\n", + "\n", + "The above exception was the direct cause of the following exception:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"F:\\GitHub\\RETFound\\main_segmentation.py\", line 122, in \n", + " main()\n", + " File \"F:\\GitHub\\RETFound\\main_segmentation.py\", line 90, in main\n", + " ckpt = hf_hub_download(f\"YukunZhou/{args.finetune}\", f\"{args.finetune}.pth\")\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\utils\\_validators.py\", line 114, in _inner_fn\n", + " return fn(*args, **kwargs)\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1221, in hf_hub_download\n", + " return _hf_hub_download_to_cache_dir(\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n", + " _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1823, in _raise_on_head_call_error\n", + " raise head_call_error\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1722, in _get_metadata_or_catch_error\n", + " metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\utils\\_validators.py\", line 114, in _inner_fn\n", + " return fn(*args, **kwargs)\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1645, in get_hf_file_metadata\n", + " r = _request_wrapper(\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 372, in _request_wrapper\n", + " response = _request_wrapper(\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 396, in _request_wrapper\n", + " hf_raise_for_status(response)\n", + " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\utils\\_errors.py\", line 352, in hf_raise_for_status\n", + " raise RepositoryNotFoundError(message, response) from e\n", + "huggingface_hub.utils._errors.RepositoryNotFoundError: 401 Client Error. (Request ID: Root=1-695fc45d-79fe26933f26030a6b9841b0;54857051-1858-4db9-bda3-3c975c6024e2)\n", + "\n", + "Repository Not Found for url: https://huggingface.co/YukunZhou/RETFound_OCT/resolve/main/RETFound_OCT.pth.\n", + "Please make sure you specified the correct `repo_id` and `repo_type`.\n", + "If you are trying to access a private or gated repo, make sure you are authenticated.\n", + "Invalid username or password.\n" + ] + } + ], + "source": [ + "!python main_segmentation.py \\\n", + " --data_path Segmentation/Data \\\n", + " --finetune RETFound_OCT \\\n", + " --epochs 2 \\\n", + " --batch_size 2\n" + ] + }, + { + "cell_type": "markdown", + "id": "84ce93ac", + "metadata": {}, + "source": [ + "## 5. Evaluation-only" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0af0f8a7", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "CKPT = OUTPUT_DIR / \"checkpoint-best.pth\"\n", + "\n", + "!{sys.executable} main_finetune.py \\\n", + " --model {MODEL} \\\n", + " --model_arch {MODEL_ARCH} \\\n", + " --finetune {FINETUNE} \\\n", + " --savemodel \\\n", + " --global_pool \\\n", + " --batch_size 128 \\\n", + " --nb_classes {NUM_CLASS} \\\n", + " --data_path {DATA_PATH} \\\n", + " --input_size {INPUT_SIZE} \\\n", + " --task {TASK} \\\n", + " --adaptation {ADAPTATION} \\\n", + " --eval \\\n", + " --resume {CKPT}\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "02d2dce7-31c2-48e2-87ce-9223b74cf94e", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "environment": { + "kernel": "retfound", + "name": "workbench-notebooks.m128", + "type": "gcloud", + "uri": "us-docker.pkg.dev/deeplearning-platform-release/gcr.io/workbench-notebooks:m128" + }, + "kernelspec": { + "display_name": "venv3.9", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/main_finetune.py b/main_finetune.py index 0f4513f4..91e192ee 100644 --- a/main_finetune.py +++ b/main_finetune.py @@ -1,448 +1,448 @@ -#!/usr/bin/env python3 - -# ========================= -import argparse -import datetime -import json -import os -import time -from pathlib import Path -import warnings -import faulthandler - -# ========================= -import numpy as np -import torch -import torch.backends.cudnn as cudnn -from torch.utils.tensorboard import SummaryWriter -from timm.models.layers import trunc_normal_ -from timm.data.mixup import Mixup -from huggingface_hub import hf_hub_download, login # login imported as in original - -# ========================= -import models_vit as models -import util.lr_decay as lrd -import util.misc as misc -from util.datasets import build_dataset -from util.pos_embed import interpolate_pos_embed -from util.misc import NativeScalerWithGradNormCount as NativeScaler -from engine_finetune import train_one_epoch, evaluate - -# ========================= -faulthandler.enable() -warnings.simplefilter(action="ignore", category=FutureWarning) - - -def get_args_parser(): - parser = argparse.ArgumentParser( - "MAE fine-tuning / linear probing for image classification", add_help=False - ) - - # ---- Core training - parser.add_argument("--batch_size", default=128, type=int, - help="Batch size per GPU (effective batch size = batch_size * accum_iter * #gpus)") - parser.add_argument("--epochs", default=50, type=int) - parser.add_argument("--accum_iter", default=1, type=int, - help="Gradient accumulation steps") - - # ---- Model parameters - parser.add_argument("--model", default="vit_large_patch16", type=str, metavar="MODEL", - help="Model entry in models_vit.py") - parser.add_argument("--model_arch", default="dinov3_vits16", type=str, metavar="MODEL_ARCH", - help="Backbone architecture key (e.g., dinov2_vitl14, convnext_base, etc.)") - parser.add_argument("--input_size", default=256, type=int, help="Image size") - parser.add_argument("--drop_path", type=float, default=0.2, metavar="PCT", help="Drop path rate") - parser.add_argument("--global_pool", action="store_true"); parser.set_defaults(global_pool=True) - parser.add_argument("--cls_token", action="store_false", dest="global_pool", - help="Use class token instead of global pool for classification") - - # ---- Optimizer parameters - parser.add_argument("--clip_grad", type=float, default=None, metavar="NORM", help="Clip grad norm") - parser.add_argument("--weight_decay", type=float, default=0.05, help="Weight decay") - parser.add_argument("--lr", type=float, default=None, metavar="LR", help="Absolute LR (overrides blr)") - parser.add_argument("--blr", type=float, default=5e-3, metavar="LR", - help="Base LR: lr = blr * total_batch_size / 256") - parser.add_argument("--layer_decay", type=float, default=0.65, help="Layer-wise LR decay (ViT)") - parser.add_argument("--min_lr", type=float, default=1e-6, metavar="LR", help="Lower LR bound") - parser.add_argument("--warmup_epochs", type=int, default=10, metavar="N", help="Warmup epochs") - - # ---- Augmentation - parser.add_argument("--color_jitter", type=float, default=None, metavar="PCT") - parser.add_argument("--aa", type=str, default="rand-m9-mstd0.5-inc1", metavar="NAME") - parser.add_argument("--smoothing", type=float, default=0.1) - - # ---- Random erase - parser.add_argument("--reprob", type=float, default=0.25, metavar="PCT") - parser.add_argument("--remode", type=str, default="pixel") - parser.add_argument("--recount", type=int, default=1) - parser.add_argument("--resplit", action="store_true", default=False) - - # ---- Mixup/Cutmix - parser.add_argument("--mixup", type=float, default=0.0) - parser.add_argument("--cutmix", type=float, default=0.0) - parser.add_argument("--cutmix_minmax", type=float, nargs="+", default=None) - parser.add_argument("--mixup_prob", type=float, default=1.0) - parser.add_argument("--mixup_switch_prob", type=float, default=0.5) - parser.add_argument("--mixup_mode", type=str, default="batch") - - # ---- Finetuning & adaptation - parser.add_argument("--finetune", default="", type=str, help="Checkpoint id/path (see model rules below)") - parser.add_argument("--task", default="", type=str, help="Task name for logging/output grouping") - parser.add_argument("--adaptation", default="finetune", choices=["finetune", "lp"], - help="Adaptation strategy: finetune=full fine-tune, lp=linear probe (train head only)") - - # ---- Dataset & paths - parser.add_argument("--data_path", default="./data/", type=str) - parser.add_argument("--nb_classes", default=8, type=int) - parser.add_argument("--output_dir", default="./output_dir") - parser.add_argument("--log_dir", default="./output_logs") - - # >>> NEW: training data efficiency <<< - parser.add_argument( - "--dataratio", type=str, default="1.0", - help=('Training data ratio(s) for subsampling in build_dataset. ' - 'Use a single float in (0,1] (e.g., 0.25) or a comma-separated list ' - '(e.g., "1.0,0.5,0.25") if your build_dataset supports sweeps.') - ) - parser.add_argument( - "--stratified", action="store_true", - help="If set, subsample training data in a class-stratified manner (requires support in build_dataset)." - ) - - # ---- Runtime - parser.add_argument("--device", default="cuda") - parser.add_argument("--seed", default=0, type=int) - parser.add_argument("--resume", default="", help="Resume full state (optimizer, scaler, etc.)") - parser.add_argument("--start_epoch", default=0, type=int, metavar="N") - parser.add_argument("--eval", action="store_true", help="Evaluation only") - parser.add_argument("--dist_eval", action="store_true", default=False, - help="Distributed evaluation (faster monitoring during training)") - parser.add_argument("--num_workers", default=10, type=int) - parser.add_argument("--pin_mem", action="store_true"); parser.set_defaults(pin_mem=True) - - # ---- Distributed - parser.add_argument("--world_size", default=1, type=int) - parser.add_argument("--local_rank", default=-1, type=int) - parser.add_argument("--dist_on_itp", action="store_true") - parser.add_argument("--dist_url", default="env://") - - # ---- Misc - parser.add_argument("--savemodel", action="store_true", default=True, help="Save best model") - parser.add_argument("--norm", default="IMAGENET", type=str) - parser.add_argument("--enhance", action="store_true", default=False) - parser.add_argument("--datasets_seed", default=2026, type=int) - - return parser - - -# ========================= -# Main -# ========================= -def main(args, criterion): - # ---- Optionally load args from resume (when training) - if args.resume and not args.eval: - resume_path = args.resume - checkpoint = torch.load(args.resume, map_location="cpu") - print(f"Load checkpoint (args) from: {args.resume}") - args = checkpoint["args"] - args.resume = resume_path - - # ---- Distributed setup - misc.init_distributed_mode(args) - - print(f"job dir: {os.path.dirname(os.path.realpath(__file__))}") - print(f"{args}".replace(", ", ",\n")) - - device = torch.device(args.device) - - # ---- Reproducibility - seed = args.seed + misc.get_rank() - torch.manual_seed(seed) - np.random.seed(seed) - cudnn.benchmark = True - - # ---- Build model - if args.model == "RETFound_mae": - model = models.__dict__[args.model]( - img_size=args.input_size, - num_classes=args.nb_classes, - drop_path_rate=args.drop_path, - global_pool=args.global_pool, - ) - else: - model = models.__dict__[args.model]( - num_classes=args.nb_classes, - drop_path_rate=args.drop_path, - args=args, - ) - - # ---- Load pre-trained weights (if requested and not eval-only) - if args.finetune and not args.eval: - print(f"Preparing to load pre-trained weights: {args.finetune}") - - if args.model in ["Dinov3", "Dinov2"]: - checkpoint_path = args.finetune # local path - elif args.model in ["RETFound_dinov2", "RETFound_mae"]: - print(f"Downloading pre-trained weights from Hugging Face Hub: {args.finetune}") - checkpoint_path = hf_hub_download( - repo_id=f"YukunZhou/{args.finetune}", - filename=f"{args.finetune}.pth", - ) - else: - raise ValueError( - f"Unsupported model '{args.model}'. " - f"Expected one of: Dinov3, Dinov2, RETFound_dinov2, RETFound_mae" - ) - - checkpoint = torch.load(checkpoint_path, map_location="cpu") - print(f"Loaded pre-trained checkpoint from: {checkpoint_path}") - - if args.model in ["Dinov3", "Dinov2"]: - checkpoint_model = checkpoint - elif args.model == "RETFound_dinov2": - checkpoint_model = checkpoint["teacher"] - else: # RETFound_mae - checkpoint_model = checkpoint["model"] - - # -- Key hygiene - checkpoint_model = {k.replace("backbone.", ""): v for k, v in checkpoint_model.items()} - checkpoint_model = {k.replace("mlp.w12.", "mlp.fc1."): v for k, v in checkpoint_model.items()} - checkpoint_model = {k.replace("mlp.w3.", "mlp.fc2."): v for k, v in checkpoint_model.items()} - - # -- Remove classifier if shape mismatched - state_dict = model.state_dict() - for k in ["head.weight", "head.bias"]: - if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: - print(f"Removing key {k} from pretrained checkpoint") - del checkpoint_model[k] - - # -- Interpolate pos embed (ViT) - interpolate_pos_embed(model, checkpoint_model) - - # -- Load backbone weights (non-strict) - _ = model.load_state_dict(checkpoint_model, strict=False) - - # -- Re-init head - if hasattr(model, "head") and hasattr(model.head, "weight"): - trunc_normal_(model.head.weight, std=2e-5) - - # ---- Datasets & samplers - dataset_train = build_dataset(is_train="train", args=args) - dataset_val = build_dataset(is_train="val", args=args) - dataset_test = build_dataset(is_train="test", args=args) - - num_tasks = misc.get_world_size() - global_rank = misc.get_rank() - - if not args.eval: - sampler_train = torch.utils.data.DistributedSampler( - dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True - ) - print(f"Sampler_train = {sampler_train}") - if args.dist_eval: - if len(dataset_val) % num_tasks != 0: - print("Warning: dist eval with dataset not divisible by #procs; results may differ slightly.") - sampler_val = torch.utils.data.DistributedSampler( - dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True - ) - else: - sampler_val = torch.utils.data.SequentialSampler(dataset_val) - - if args.dist_eval: - if len(dataset_test) % num_tasks != 0: - print("Warning: dist eval test set not divisible by #procs; results may differ slightly.") - sampler_test = torch.utils.data.DistributedSampler( - dataset_test, num_replicas=num_tasks, rank=global_rank, shuffle=True - ) - else: - sampler_test = torch.utils.data.SequentialSampler(dataset_test) - - # ---- Logging - if global_rank == 0 and args.log_dir is not None and not args.eval: - os.makedirs(args.log_dir, exist_ok=True) - log_writer = SummaryWriter(log_dir=os.path.join(args.log_dir, args.task)) - else: - log_writer = None - - # ---- DataLoaders - if not args.eval: - data_loader_train = torch.utils.data.DataLoader( - dataset_train, sampler=sampler_train, - batch_size=args.batch_size, num_workers=args.num_workers, - pin_memory=args.pin_mem, drop_last=True, - ) - print(f"len of train_set: {len(data_loader_train) * args.batch_size}") - - data_loader_val = torch.utils.data.DataLoader( - dataset_val, sampler=sampler_val, - batch_size=args.batch_size, num_workers=args.num_workers, - pin_memory=args.pin_mem, drop_last=False, - ) - - data_loader_test = torch.utils.data.DataLoader( - dataset_test, sampler=sampler_test, - batch_size=args.batch_size, num_workers=args.num_workers, - pin_memory=args.pin_mem, drop_last=False, - ) - - # ---- Mixup/CutMix - mixup_fn = None - mixup_active = (args.mixup > 0) or (args.cutmix > 0.) or (args.cutmix_minmax is not None) - if mixup_active: - print("Mixup is activated!") - mixup_fn = Mixup( - mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, - prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, - label_smoothing=args.smoothing, num_classes=args.nb_classes - ) - - # ---- Eval-only: resume weights - if args.resume and args.eval: - checkpoint = torch.load(args.resume, map_location="cpu") - print(f"Load checkpoint for eval from: {args.resume}") - model.load_state_dict(checkpoint["model"]) - - model.to(device) - model_without_ddp = model - - # ---- Adaptation toggle - if args.adaptation == "lp": - for name, param in model.named_parameters(): - param.requires_grad = ("head" in name) - print("[Adaptation] Linear probe: training classifier head only.") - else: - print("[Adaptation] Full fine-tuning: training all parameters.") - - # ---- Count trainable params - n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) - print(f"number of trainable params (M): {n_parameters / 1.e6:.2f}") - - # ---- LR scaling by effective batch size - eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() - if args.lr is None: - args.lr = args.blr * eff_batch_size / 256 - print(f"base lr: {args.lr * 256 / eff_batch_size:.2e}") - print(f"actual lr: {args.lr:.2e}") - print(f"accumulate grad iterations: {args.accum_iter}") - print(f"effective batch size: {eff_batch_size}") - - # ---- DDP (if available) - if args.distributed and torch.cuda.device_count() > 1: - ddp_kwargs = {} - if args.adaptation == "lp": - ddp_kwargs["find_unused_parameters"] = True - model = torch.nn.parallel.DistributedDataParallel( - model, device_ids=[args.gpu], **ddp_kwargs - ) - model_without_ddp = model.module - else: - model_without_ddp = model # single-GPU - - # ---- Optimizer param groups (after freezing) - no_weight_decay = (model_without_ddp.no_weight_decay() - if hasattr(model_without_ddp, "no_weight_decay") else []) - - - param_groups = lrd.param_groups_lrd( - model_without_ddp, - weight_decay=args.weight_decay, - no_weight_decay_list=no_weight_decay, - layer_decay=args.layer_decay, - ) - for g in param_groups: - g["params"] = [p for p in g["params"] if p.requires_grad] - - optimizer = torch.optim.AdamW(param_groups, lr=args.lr) - loss_scaler = NativeScaler() - print(f"criterion = {criterion}") - - # ---- Load previous full state (optimizer, scaler, etc.) - misc.load_model(args=args, model_without_ddp=model_without_ddp, - optimizer=optimizer, loss_scaler=loss_scaler) - - # ========================= - # Eval-only Short Circuit - # ========================= - if args.eval: - if "checkpoint" in locals() and isinstance(checkpoint, dict) and ("epoch" in checkpoint): - print(f"Test with the best model at epoch = {checkpoint['epoch']}") - test_stats, auc_roc = evaluate( - data_loader_test, model, device, args, epoch=0, mode="test", - num_class=args.nb_classes, log_writer=log_writer - ) - return - - # ========================= - # Train Loop - # ========================= - print(f"Start training for {args.epochs} epochs") - start_time = time.time() - max_score = 0.0 - best_epoch = 0 - - for epoch in range(args.start_epoch, args.epochs): - if args.distributed: - data_loader_train.sampler.set_epoch(epoch) - - train_stats = train_one_epoch( - model, criterion, data_loader_train, - optimizer, device, epoch, loss_scaler, - args.clip_grad, mixup_fn, - log_writer=log_writer, args=args - ) - - val_stats, val_score = evaluate( - data_loader_val, model, device, args, epoch, mode="val", - num_class=args.nb_classes, log_writer=log_writer - ) - - if max_score < val_score: - max_score = val_score - best_epoch = epoch - if args.output_dir and args.savemodel: - misc.save_model( - args=args, model=model, model_without_ddp=model_without_ddp, - optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, mode="best" - ) - print(f"Best epoch = {best_epoch}, Best score = {max_score:.4f}") - - if log_writer is not None: - log_writer.add_scalar("loss/val", val_stats["loss"], epoch) - log_writer.flush() - - log_stats = {**{f"train_{k}": v for k, v in train_stats.items()}, - "epoch": epoch, - "n_parameters": n_parameters} - - if args.output_dir and misc.is_main_process(): - with open(os.path.join(args.output_dir, args.task, "log.txt"), "a", encoding="utf-8") as f: - f.write(json.dumps(log_stats) + "\n") - - # ========================= - # Final Test (Best Ckpt) - # ========================= - ckpt_path = os.path.join(args.output_dir, args.task, "checkpoint-best.pth") - checkpoint = torch.load(ckpt_path, map_location="cpu") - model_without_ddp.load_state_dict(checkpoint["model"], strict=False) - model.to(device) - print(f"Test with the best model, epoch = {checkpoint.get('epoch', -1)}:") - _test_stats, _auc_roc = evaluate( - data_loader_test, model, device, args, -1, mode="test", - num_class=args.nb_classes, log_writer=None - ) - - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print(f"Training time {total_time_str}") - - -if __name__ == "__main__": - args = get_args_parser() - args = args.parse_args() - - criterion = torch.nn.CrossEntropyLoss() - - if args.output_dir: - Path(args.output_dir).mkdir(parents=True, exist_ok=True) - - main(args, criterion) +#!/usr/bin/env python3 + +# ========================= +import argparse +import datetime +import json +import os +import time +from pathlib import Path +import warnings +import faulthandler + +# ========================= +import numpy as np +import torch +import torch.backends.cudnn as cudnn +from torch.utils.tensorboard import SummaryWriter +from timm.models.layers import trunc_normal_ +from timm.data.mixup import Mixup +from huggingface_hub import hf_hub_download, login # login imported as in original + +# ========================= +import models_vit as models +import util.lr_decay as lrd +import util.misc as misc +from util.datasets import build_dataset +from util.pos_embed import interpolate_pos_embed +from util.misc import NativeScalerWithGradNormCount as NativeScaler +from engine_finetune import train_one_epoch, evaluate + +# ========================= +faulthandler.enable() +warnings.simplefilter(action="ignore", category=FutureWarning) + + +def get_args_parser(): + parser = argparse.ArgumentParser( + "MAE fine-tuning / linear probing for image classification", add_help=False + ) + + # ---- Core training + parser.add_argument("--batch_size", default=128, type=int, + help="Batch size per GPU (effective batch size = batch_size * accum_iter * #gpus)") + parser.add_argument("--epochs", default=50, type=int) + parser.add_argument("--accum_iter", default=1, type=int, + help="Gradient accumulation steps") + + # ---- Model parameters + parser.add_argument("--model", default="vit_large_patch16", type=str, metavar="MODEL", + help="Model entry in models_vit.py") + parser.add_argument("--model_arch", default="dinov3_vits16", type=str, metavar="MODEL_ARCH", + help="Backbone architecture key (e.g., dinov2_vitl14, convnext_base, etc.)") + parser.add_argument("--input_size", default=256, type=int, help="Image size") + parser.add_argument("--drop_path", type=float, default=0.2, metavar="PCT", help="Drop path rate") + parser.add_argument("--global_pool", action="store_true"); parser.set_defaults(global_pool=True) + parser.add_argument("--cls_token", action="store_false", dest="global_pool", + help="Use class token instead of global pool for classification") + + # ---- Optimizer parameters + parser.add_argument("--clip_grad", type=float, default=None, metavar="NORM", help="Clip grad norm") + parser.add_argument("--weight_decay", type=float, default=0.05, help="Weight decay") + parser.add_argument("--lr", type=float, default=None, metavar="LR", help="Absolute LR (overrides blr)") + parser.add_argument("--blr", type=float, default=5e-3, metavar="LR", + help="Base LR: lr = blr * total_batch_size / 256") + parser.add_argument("--layer_decay", type=float, default=0.65, help="Layer-wise LR decay (ViT)") + parser.add_argument("--min_lr", type=float, default=1e-6, metavar="LR", help="Lower LR bound") + parser.add_argument("--warmup_epochs", type=int, default=10, metavar="N", help="Warmup epochs") + + # ---- Augmentation + parser.add_argument("--color_jitter", type=float, default=None, metavar="PCT") + parser.add_argument("--aa", type=str, default="rand-m9-mstd0.5-inc1", metavar="NAME") + parser.add_argument("--smoothing", type=float, default=0.1) + + # ---- Random erase + parser.add_argument("--reprob", type=float, default=0.25, metavar="PCT") + parser.add_argument("--remode", type=str, default="pixel") + parser.add_argument("--recount", type=int, default=1) + parser.add_argument("--resplit", action="store_true", default=False) + + # ---- Mixup/Cutmix + parser.add_argument("--mixup", type=float, default=0.0) + parser.add_argument("--cutmix", type=float, default=0.0) + parser.add_argument("--cutmix_minmax", type=float, nargs="+", default=None) + parser.add_argument("--mixup_prob", type=float, default=1.0) + parser.add_argument("--mixup_switch_prob", type=float, default=0.5) + parser.add_argument("--mixup_mode", type=str, default="batch") + + # ---- Finetuning & adaptation + parser.add_argument("--finetune", default="", type=str, help="Checkpoint id/path (see model rules below)") + parser.add_argument("--task", default="", type=str, help="Task name for logging/output grouping") + parser.add_argument("--adaptation", default="finetune", choices=["finetune", "lp"], + help="Adaptation strategy: finetune=full fine-tune, lp=linear probe (train head only)") + + # ---- Dataset & paths + parser.add_argument("--data_path", default="./data/", type=str) + parser.add_argument("--nb_classes", default=8, type=int) + parser.add_argument("--output_dir", default="./output_dir") + parser.add_argument("--log_dir", default="./output_logs") + + # >>> NEW: training data efficiency <<< + parser.add_argument( + "--dataratio", type=str, default="1.0", + help=('Training data ratio(s) for subsampling in build_dataset. ' + 'Use a single float in (0,1] (e.g., 0.25) or a comma-separated list ' + '(e.g., "1.0,0.5,0.25") if your build_dataset supports sweeps.') + ) + parser.add_argument( + "--stratified", action="store_true", + help="If set, subsample training data in a class-stratified manner (requires support in build_dataset)." + ) + + # ---- Runtime + parser.add_argument("--device", default="cuda") + parser.add_argument("--seed", default=0, type=int) + parser.add_argument("--resume", default="", help="Resume full state (optimizer, scaler, etc.)") + parser.add_argument("--start_epoch", default=0, type=int, metavar="N") + parser.add_argument("--eval", action="store_true", help="Evaluation only") + parser.add_argument("--dist_eval", action="store_true", default=False, + help="Distributed evaluation (faster monitoring during training)") + parser.add_argument("--num_workers", default=10, type=int) + parser.add_argument("--pin_mem", action="store_true"); parser.set_defaults(pin_mem=True) + + # ---- Distributed + parser.add_argument("--world_size", default=1, type=int) + parser.add_argument("--local_rank", default=-1, type=int) + parser.add_argument("--dist_on_itp", action="store_true") + parser.add_argument("--dist_url", default="env://") + + # ---- Misc + parser.add_argument("--savemodel", action="store_true", default=True, help="Save best model") + parser.add_argument("--norm", default="IMAGENET", type=str) + parser.add_argument("--enhance", action="store_true", default=False) + parser.add_argument("--datasets_seed", default=2026, type=int) + + return parser + + +# ========================= +# Main +# ========================= +def main(args, criterion): + # ---- Optionally load args from resume (when training) + if args.resume and not args.eval: + resume_path = args.resume + checkpoint = torch.load(args.resume, map_location="cpu") + print(f"Load checkpoint (args) from: {args.resume}") + args = checkpoint["args"] + args.resume = resume_path + + # ---- Distributed setup + misc.init_distributed_mode(args) + + print(f"job dir: {os.path.dirname(os.path.realpath(__file__))}") + print(f"{args}".replace(", ", ",\n")) + + device = torch.device(args.device) + + # ---- Reproducibility + seed = args.seed + misc.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + cudnn.benchmark = True + + # ---- Build model + if args.model == "RETFound_mae": + model = models.__dict__[args.model]( + img_size=args.input_size, + num_classes=args.nb_classes, + drop_path_rate=args.drop_path, + global_pool=args.global_pool, + ) + else: + model = models.__dict__[args.model]( + num_classes=args.nb_classes, + drop_path_rate=args.drop_path, + args=args, + ) + + # ---- Load pre-trained weights (if requested and not eval-only) + if args.finetune and not args.eval: + print(f"Preparing to load pre-trained weights: {args.finetune}") + + if args.model in ["Dinov3", "Dinov2"]: + checkpoint_path = args.finetune # local path + elif args.model in ["RETFound_dinov2", "RETFound_mae"]: + print(f"Downloading pre-trained weights from Hugging Face Hub: {args.finetune}") + checkpoint_path = hf_hub_download( + repo_id=f"YukunZhou/{args.finetune}", + filename=f"{args.finetune}.pth", + ) + else: + raise ValueError( + f"Unsupported model '{args.model}'. " + f"Expected one of: Dinov3, Dinov2, RETFound_dinov2, RETFound_mae" + ) + + checkpoint = torch.load(checkpoint_path, map_location="cpu") + print(f"Loaded pre-trained checkpoint from: {checkpoint_path}") + + if args.model in ["Dinov3", "Dinov2"]: + checkpoint_model = checkpoint + elif args.model == "RETFound_dinov2": + checkpoint_model = checkpoint["teacher"] + else: # RETFound_mae + checkpoint_model = checkpoint["model"] + + # -- Key hygiene + checkpoint_model = {k.replace("backbone.", ""): v for k, v in checkpoint_model.items()} + checkpoint_model = {k.replace("mlp.w12.", "mlp.fc1."): v for k, v in checkpoint_model.items()} + checkpoint_model = {k.replace("mlp.w3.", "mlp.fc2."): v for k, v in checkpoint_model.items()} + + # -- Remove classifier if shape mismatched + state_dict = model.state_dict() + for k in ["head.weight", "head.bias"]: + if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: + print(f"Removing key {k} from pretrained checkpoint") + del checkpoint_model[k] + + # -- Interpolate pos embed (ViT) + interpolate_pos_embed(model, checkpoint_model) + + # -- Load backbone weights (non-strict) + _ = model.load_state_dict(checkpoint_model, strict=False) + + # -- Re-init head + if hasattr(model, "head") and hasattr(model.head, "weight"): + trunc_normal_(model.head.weight, std=2e-5) + + # ---- Datasets & samplers + dataset_train = build_dataset(is_train="train", args=args) + dataset_val = build_dataset(is_train="val", args=args) + dataset_test = build_dataset(is_train="test", args=args) + + num_tasks = misc.get_world_size() + global_rank = misc.get_rank() + + if not args.eval: + sampler_train = torch.utils.data.DistributedSampler( + dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True + ) + print(f"Sampler_train = {sampler_train}") + if args.dist_eval: + if len(dataset_val) % num_tasks != 0: + print("Warning: dist eval with dataset not divisible by #procs; results may differ slightly.") + sampler_val = torch.utils.data.DistributedSampler( + dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True + ) + else: + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + + if args.dist_eval: + if len(dataset_test) % num_tasks != 0: + print("Warning: dist eval test set not divisible by #procs; results may differ slightly.") + sampler_test = torch.utils.data.DistributedSampler( + dataset_test, num_replicas=num_tasks, rank=global_rank, shuffle=True + ) + else: + sampler_test = torch.utils.data.SequentialSampler(dataset_test) + + # ---- Logging + if global_rank == 0 and args.log_dir is not None and not args.eval: + os.makedirs(args.log_dir, exist_ok=True) + log_writer = SummaryWriter(log_dir=os.path.join(args.log_dir, args.task)) + else: + log_writer = None + + # ---- DataLoaders + if not args.eval: + data_loader_train = torch.utils.data.DataLoader( + dataset_train, sampler=sampler_train, + batch_size=args.batch_size, num_workers=args.num_workers, + pin_memory=args.pin_mem, drop_last=True, + ) + print(f"len of train_set: {len(data_loader_train) * args.batch_size}") + + data_loader_val = torch.utils.data.DataLoader( + dataset_val, sampler=sampler_val, + batch_size=args.batch_size, num_workers=args.num_workers, + pin_memory=args.pin_mem, drop_last=False, + ) + + data_loader_test = torch.utils.data.DataLoader( + dataset_test, sampler=sampler_test, + batch_size=args.batch_size, num_workers=args.num_workers, + pin_memory=args.pin_mem, drop_last=False, + ) + + # ---- Mixup/CutMix + mixup_fn = None + mixup_active = (args.mixup > 0) or (args.cutmix > 0.) or (args.cutmix_minmax is not None) + if mixup_active: + print("Mixup is activated!") + mixup_fn = Mixup( + mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, + prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, + label_smoothing=args.smoothing, num_classes=args.nb_classes + ) + + # ---- Eval-only: resume weights + if args.resume and args.eval: + checkpoint = torch.load(args.resume, map_location="cpu") + print(f"Load checkpoint for eval from: {args.resume}") + model.load_state_dict(checkpoint["model"]) + + model.to(device) + model_without_ddp = model + + # ---- Adaptation toggle + if args.adaptation == "lp": + for name, param in model.named_parameters(): + param.requires_grad = ("head" in name) + print("[Adaptation] Linear probe: training classifier head only.") + else: + print("[Adaptation] Full fine-tuning: training all parameters.") + + # ---- Count trainable params + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + print(f"number of trainable params (M): {n_parameters / 1.e6:.2f}") + + # ---- LR scaling by effective batch size + eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() + if args.lr is None: + args.lr = args.blr * eff_batch_size / 256 + print(f"base lr: {args.lr * 256 / eff_batch_size:.2e}") + print(f"actual lr: {args.lr:.2e}") + print(f"accumulate grad iterations: {args.accum_iter}") + print(f"effective batch size: {eff_batch_size}") + + # ---- DDP (if available) + if args.distributed and torch.cuda.device_count() > 1: + ddp_kwargs = {} + if args.adaptation == "lp": + ddp_kwargs["find_unused_parameters"] = True + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[args.gpu], **ddp_kwargs + ) + model_without_ddp = model.module + else: + model_without_ddp = model # single-GPU + + # ---- Optimizer param groups (after freezing) + no_weight_decay = (model_without_ddp.no_weight_decay() + if hasattr(model_without_ddp, "no_weight_decay") else []) + + + param_groups = lrd.param_groups_lrd( + model_without_ddp, + weight_decay=args.weight_decay, + no_weight_decay_list=no_weight_decay, + layer_decay=args.layer_decay, + ) + for g in param_groups: + g["params"] = [p for p in g["params"] if p.requires_grad] + + optimizer = torch.optim.AdamW(param_groups, lr=args.lr) + loss_scaler = NativeScaler() + print(f"criterion = {criterion}") + + # ---- Load previous full state (optimizer, scaler, etc.) + misc.load_model(args=args, model_without_ddp=model_without_ddp, + optimizer=optimizer, loss_scaler=loss_scaler) + + # ========================= + # Eval-only Short Circuit + # ========================= + if args.eval: + if "checkpoint" in locals() and isinstance(checkpoint, dict) and ("epoch" in checkpoint): + print(f"Test with the best model at epoch = {checkpoint['epoch']}") + test_stats, auc_roc = evaluate( + data_loader_test, model, device, args, epoch=0, mode="test", + num_class=args.nb_classes, log_writer=log_writer + ) + return + + # ========================= + # Train Loop + # ========================= + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + max_score = 0.0 + best_epoch = 0 + + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + data_loader_train.sampler.set_epoch(epoch) + + train_stats = train_one_epoch( + model, criterion, data_loader_train, + optimizer, device, epoch, loss_scaler, + args.clip_grad, mixup_fn, + log_writer=log_writer, args=args + ) + + val_stats, val_score = evaluate( + data_loader_val, model, device, args, epoch, mode="val", + num_class=args.nb_classes, log_writer=log_writer + ) + + if max_score < val_score: + max_score = val_score + best_epoch = epoch + if args.output_dir and args.savemodel: + misc.save_model( + args=args, model=model, model_without_ddp=model_without_ddp, + optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, mode="best" + ) + print(f"Best epoch = {best_epoch}, Best score = {max_score:.4f}") + + if log_writer is not None: + log_writer.add_scalar("loss/val", val_stats["loss"], epoch) + log_writer.flush() + + log_stats = {**{f"train_{k}": v for k, v in train_stats.items()}, + "epoch": epoch, + "n_parameters": n_parameters} + + if args.output_dir and misc.is_main_process(): + with open(os.path.join(args.output_dir, args.task, "log.txt"), "a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + # ========================= + # Final Test (Best Ckpt) + # ========================= + ckpt_path = os.path.join(args.output_dir, args.task, "checkpoint-best.pth") + checkpoint = torch.load(ckpt_path, map_location="cpu") + model_without_ddp.load_state_dict(checkpoint["model"], strict=False) + model.to(device) + print(f"Test with the best model, epoch = {checkpoint.get('epoch', -1)}:") + _test_stats, _auc_roc = evaluate( + data_loader_test, model, device, args, -1, mode="test", + num_class=args.nb_classes, log_writer=None + ) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print(f"Training time {total_time_str}") + + +if __name__ == "__main__": + args = get_args_parser() + args = args.parse_args() + + criterion = torch.nn.CrossEntropyLoss() + + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + + main(args, criterion) diff --git a/main_segmentation.py b/main_segmentation.py new file mode 100644 index 00000000..3987e588 --- /dev/null +++ b/main_segmentation.py @@ -0,0 +1,122 @@ +import os +import argparse +import logging +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import Dataset, DataLoader +import cv2 +from albumentations import Compose, Resize, Normalize +from albumentations.pytorch import ToTensorV2 +from huggingface_hub import hf_hub_download +from util.pos_embed import interpolate_pos_embed + +from models_segmentation import RETFoundSegmentation +from engine_segmentation import ( + train_segmentation, + evaluate_segmentation, + combined_loss_fn, + compute_metrics, +) + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +# ========================= +# Dataset +# ========================= +class OCTDrusenDataset(Dataset): + def __init__(self, root, transform=None): + self.image_dir = os.path.join(root, "images") + self.mask_dir = os.path.join(root, "masks") + self.images = sorted(os.listdir(self.image_dir)) + self.transform = transform + + def __len__(self): + return len(self.images) + + def __getitem__(self, idx): + img = cv2.imread(os.path.join(self.image_dir, self.images[idx])) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + mask = cv2.imread(os.path.join(self.mask_dir, self.images[idx]), cv2.IMREAD_GRAYSCALE) + + if self.transform: + aug = self.transform(image=img, mask=mask) + img, mask = aug["image"], aug["mask"] + + return img, mask.long() + + +# ========================= +# Main +# ========================= +def main(): + parser = argparse.ArgumentParser("RETFound Segmentation") + parser.add_argument("--data_path", type=str, required=True) + parser.add_argument("--epochs", type=int, default=50) + parser.add_argument("--batch_size", type=int, default=4) + parser.add_argument("--lr", type=float, default=1e-4) + parser.add_argument("--img_size", type=int, default=512) + parser.add_argument("--patch_size", type=int, default=16) + parser.add_argument("--drop_path", type=float, default=0.2) + parser.add_argument("--finetune", type=str, default="") + parser.add_argument("--output_dir", type=str, default="./segmentation_output") + parser.add_argument("--dice_weight", type=float, default=1.0) + parser.add_argument("--ce_weight", type=str, default="0.3,0.7") + args = parser.parse_args() + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + os.makedirs(args.output_dir, exist_ok=True) + + transform = Compose([ + Resize(args.img_size, args.img_size), + Normalize((0.485,0.456,0.406), (0.229,0.224,0.225)), + ToTensorV2() + ]) + + train_ds = OCTDrusenDataset(os.path.join(args.data_path, "train"), transform) + val_ds = OCTDrusenDataset(os.path.join(args.data_path, "val"), transform) + test_ds = OCTDrusenDataset(os.path.join(args.data_path, "test"), transform) + + train_loader = DataLoader(train_ds, args.batch_size, shuffle=True, num_workers=4) + val_loader = DataLoader(val_ds, args.batch_size, shuffle=False, num_workers=4) + test_loader = DataLoader(test_ds, args.batch_size, shuffle=False, num_workers=4) + + model = RETFoundSegmentation(args.img_size, args.patch_size).to(device) + + if args.finetune: + ckpt = hf_hub_download(f"YukunZhou/{args.finetune}", f"{args.finetune}.pth") + state = torch.load(ckpt, map_location="cpu") + state = state["model"] if "model" in state else state + interpolate_pos_embed(model.encoder, state) + model.encoder.load_state_dict(state, strict=False) + + ce_weights = torch.tensor([float(x) for x in args.ce_weight.split(",")]).to(device) + ce_loss = nn.CrossEntropyLoss(weight=ce_weights) + + def loss_fn(out, tgt): + return combined_loss_fn(out, tgt, ce_loss, args.dice_weight) + + opt = optim.AdamW(model.parameters(), lr=args.lr) + + best = 1e9 + for e in range(args.epochs): + train_loss = train_segmentation(model, train_loader, loss_fn, opt, device) + val_loss, P, T = evaluate_segmentation(model, val_loader, loss_fn, device) + acc, dice, iou = compute_metrics(P, T) + + print(f"Epoch {e+1}: Train={train_loss:.4f} Val={val_loss:.4f} Dice={dice:.4f} IoU={iou:.4f}") + + if val_loss < best: + best = val_loss + torch.save(model.state_dict(), f"{args.output_dir}/best.pth") + + test_loss, P, T = evaluate_segmentation(model, test_loader, loss_fn, device) + acc, dice, iou = compute_metrics(P, T) + print(f"Test: Loss={test_loss:.4f} Dice={dice:.4f} IoU={iou:.4f}") + + +if __name__ == "__main__": + main() diff --git a/models_segmentation.py b/models_segmentation.py new file mode 100644 index 00000000..2bfe9132 --- /dev/null +++ b/models_segmentation.py @@ -0,0 +1,43 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from models_vit import RETFound_mae + + +class SegmentationHead(nn.Module): + def __init__(self, hidden_dim, num_classes, img_size, patch_size): + super().__init__() + self.patch_size = patch_size + self.h = img_size // patch_size + self.w = img_size // patch_size + self.conv = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim // 2, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_dim // 2, num_classes, 1), + ) + + def forward(self, x): + B, N, C = x.shape + x = x.reshape(B, self.h, self.w, C).permute(0, 3, 1, 2) + x = F.interpolate(x, scale_factor=self.patch_size, mode="bilinear", align_corners=False) + return self.conv(x) + + +class RETFoundSegmentation(nn.Module): + def __init__(self, img_size=512, patch_size=16, hidden_dim=1024, num_classes=2, drop_path=0.2): + super().__init__() + self.encoder = RETFound_mae(img_size=img_size, num_classes=num_classes, + drop_path_rate=drop_path, global_pool=False) + self.seg_head = SegmentationHead(hidden_dim, num_classes, img_size, patch_size) + + def forward(self, x): + B = x.size(0) + x = self.encoder.patch_embed(x) + cls = self.encoder.cls_token.expand(B, -1, -1) + x = torch.cat((cls, x), dim=1) + x = x + self.encoder.pos_embed + x = self.encoder.pos_drop(x) + for blk in self.encoder.blocks: + x = blk(x) + x = self.encoder.norm(x) + return self.seg_head(x[:, 1:]) diff --git a/models_vit.py b/models_vit.py index 82e7fbdb..6640aa80 100644 --- a/models_vit.py +++ b/models_vit.py @@ -1,105 +1,105 @@ - -from functools import partial - -import timm.models.vision_transformer -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor -from timm.models.layers import trunc_normal_ - -class VisionTransformer(timm.models.vision_transformer.VisionTransformer): - """ Vision Transformer with support for global average pooling - """ - def __init__(self, global_pool=False, **kwargs): - super(VisionTransformer, self).__init__(**kwargs) - - self.global_pool = global_pool - if self.global_pool: - norm_layer = kwargs['norm_layer'] - embed_dim = kwargs['embed_dim'] - self.fc_norm = norm_layer(embed_dim) - - del self.norm # remove the original norm - - def forward_features(self, x): - B = x.shape[0] - x = self.patch_embed(x) - - cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks - x = torch.cat((cls_tokens, x), dim=1) - x = x + self.pos_embed - x = self.pos_drop(x) - - for blk in self.blocks: - x = blk(x) - - if self.global_pool: - x = x[:, 1:, :].mean(dim=1,keepdim=True) # global pool without cls token - outcome = self.fc_norm(x) - else: - x = self.norm(x) - outcome = x[:, 0] - - return outcome - - -def RETFound_mae(**kwargs): - model = VisionTransformer( - patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, - norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) - return model - - - -def Dinov2(args, **kwargs): - - if args.model_arch == 'dinov2_vits14': - arch = 'vit_small_patch14_dinov2.lvd142m' - elif args.model_arch == 'dinov2_vitb14': - arch = 'vit_base_patch14_dinov2.lvd142m' - elif args.model_arch == 'dinov2_vitl14': - arch = 'vit_large_patch14_dinov2.lvd142m' - elif args.model_arch == 'dinov2_vitg14': - arch = 'vit_giant_patch14_dinov2.lvd142m' - else: - raise ValueError(f"Unknown model_arch '{args.model_arch}'. " - f"Expected one of: dinov2_vits14, dinov2_vitb14, dinov2_vitl14, dinov2_vitg14") - - model = timm.create_model( - arch, - pretrained=True, - img_size=224, - **kwargs - ) - return model - - - -def RETFound_dinov2(args, **kwargs): - model = timm.create_model( - 'vit_large_patch14_dinov2.lvd142m', - pretrained=True, - img_size=224, - **kwargs - ) - return model - - -def Dinov3(args, **kwargs): - # Load ViT-L/16 backbone (hub model has `head = Identity` by default) - model = torch.hub.load( - repo_or_dir="facebookresearch/dinov3", - model=args.model_arch, - pretrained=False, # main() will load your checkpoint - trust_repo=True, - ) - - # Figure out feature dimension for the probe - feat_dim = getattr(model, "embed_dim", None) or getattr(model, "num_features", None) - model.head = nn.Linear(feat_dim, args.nb_classes) - trunc_normal_(model.head.weight, std=2e-5) - if model.head.bias is not None: - nn.init.zeros_(model.head.bias) - - return model + +from functools import partial + +import timm.models.vision_transformer +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from timm.models.layers import trunc_normal_ + +class VisionTransformer(timm.models.vision_transformer.VisionTransformer): + """ Vision Transformer with support for global average pooling + """ + def __init__(self, global_pool=False, **kwargs): + super(VisionTransformer, self).__init__(**kwargs) + + self.global_pool = global_pool + if self.global_pool: + norm_layer = kwargs['norm_layer'] + embed_dim = kwargs['embed_dim'] + self.fc_norm = norm_layer(embed_dim) + + del self.norm # remove the original norm + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + if self.global_pool: + x = x[:, 1:, :].mean(dim=1,keepdim=True) # global pool without cls token + outcome = self.fc_norm(x) + else: + x = self.norm(x) + outcome = x[:, 0] + + return outcome + + +def RETFound_mae(**kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + + +def Dinov2(args, **kwargs): + + if args.model_arch == 'dinov2_vits14': + arch = 'vit_small_patch14_dinov2.lvd142m' + elif args.model_arch == 'dinov2_vitb14': + arch = 'vit_base_patch14_dinov2.lvd142m' + elif args.model_arch == 'dinov2_vitl14': + arch = 'vit_large_patch14_dinov2.lvd142m' + elif args.model_arch == 'dinov2_vitg14': + arch = 'vit_giant_patch14_dinov2.lvd142m' + else: + raise ValueError(f"Unknown model_arch '{args.model_arch}'. " + f"Expected one of: dinov2_vits14, dinov2_vitb14, dinov2_vitl14, dinov2_vitg14") + + model = timm.create_model( + arch, + pretrained=True, + img_size=224, + **kwargs + ) + return model + + + +def RETFound_dinov2(args, **kwargs): + model = timm.create_model( + 'vit_large_patch14_dinov2.lvd142m', + pretrained=True, + img_size=224, + **kwargs + ) + return model + + +def Dinov3(args, **kwargs): + # Load ViT-L/16 backbone (hub model has `head = Identity` by default) + model = torch.hub.load( + repo_or_dir="facebookresearch/dinov3", + model=args.model_arch, + pretrained=False, # main() will load your checkpoint + trust_repo=True, + ) + + # Figure out feature dimension for the probe + feat_dim = getattr(model, "embed_dim", None) or getattr(model, "num_features", None) + model.head = nn.Linear(feat_dim, args.nb_classes) + trunc_normal_(model.head.weight, std=2e-5) + if model.head.bias is not None: + nn.init.zeros_(model.head.bias) + + return model diff --git a/requirements.txt b/requirements.txt index 0c2b9399..6f012a00 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,12 @@ -opencv-python~=4.9.0.80 -Pillow~=10.2.0 -pycm~=4.0 -scikit-learn~=1.4.2 -timm~=0.9.2 - -numpy~=1.26.4 -matplotlib~=3.8.4 -scikit-multilearn~=0.2.0 -huggingface-hub~=0.23.4 -tensorboard~=2.17.0 \ No newline at end of file +opencv-python~=4.9.0.80 +Pillow~=10.2.0 +pycm~=4.0 +scikit-learn~=1.4.2 +timm~=0.9.2 + +numpy~=1.26.4 +matplotlib~=3.8.4 +scikit-multilearn~=0.2.0 +huggingface-hub~=0.23.4 +tensorboard~=2.17.0 +albumentations~=1.4.3 \ No newline at end of file diff --git a/util/datasets.py b/util/datasets.py index 20f65f20..7458331f 100644 --- a/util/datasets.py +++ b/util/datasets.py @@ -1,81 +1,81 @@ -import os -import torch -from torch.utils.data import Subset -from torchvision import datasets, transforms -from timm.data import create_transform -from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD - -def build_dataset(is_train, args): - transform = build_transform(is_train, args) - root = os.path.join(args.data_path, is_train) - dataset = datasets.ImageFolder(root, transform=transform) - - if is_train == 'train': - ratio = float(getattr(args, "dataratio", 1.0)) - seed = int(getattr(args, "seed", 0)) - stratified = bool(getattr(args, "stratified", False)) - - if 0.0 < ratio < 1.0: - if stratified: - idx = _stratified_indices(dataset.targets, ratio, seed) - else: - # simple uniform subsample with torch.Generator for reproducibility - g = torch.Generator().manual_seed(seed) - n = len(dataset) - k = max(1, int(n * ratio)) - idx = torch.randperm(n, generator=g)[:k].tolist() - dataset = Subset(dataset, idx) - - return dataset - -def build_transform(is_train, args): - mean = IMAGENET_DEFAULT_MEAN - std = IMAGENET_DEFAULT_STD - - if is_train == 'train': - return create_transform( - input_size=args.input_size, - is_training=True, - color_jitter=args.color_jitter, - auto_augment=args.aa, - interpolation='bicubic', - re_prob=args.reprob, - re_mode=args.remode, - re_count=args.recount, - mean=mean, - std=std, - ) - - # eval transform - crop_pct = 224 / 256 if args.input_size <= 224 else 1.0 - size = int(args.input_size / crop_pct) - t = [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC), - transforms.CenterCrop(args.input_size), - transforms.ToTensor(), - transforms.Normalize(mean, std), - ] - return transforms.Compose(t) - -# ---- helpers ---- - -def _stratified_indices(targets, ratio: float, seed: int): - """Maintain class proportions. Ensures at least 1 sample per class when possible.""" - t = torch.as_tensor(targets) - classes = torch.unique(t) - g = torch.Generator().manual_seed(seed) - - keep = [] - for c in classes.tolist(): - cls_idx = torch.nonzero(t == c, as_tuple=False).view(-1) - if len(cls_idx) == 0: - continue - k = max(1, int(round(len(cls_idx) * ratio))) - sel = cls_idx[torch.randperm(len(cls_idx), generator=g)[:k]] - keep.extend(sel.tolist()) - - # shuffle final indices (stable across seed) - g2 = torch.Generator().manual_seed(seed + 1) - keep = torch.tensor(keep)[torch.randperm(len(keep), generator=g2)].tolist() - return keep - +import os +import torch +from torch.utils.data import Subset +from torchvision import datasets, transforms +from timm.data import create_transform +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + +def build_dataset(is_train, args): + transform = build_transform(is_train, args) + root = os.path.join(args.data_path, is_train) + dataset = datasets.ImageFolder(root, transform=transform) + + if is_train == 'train': + ratio = float(getattr(args, "dataratio", 1.0)) + seed = int(getattr(args, "seed", 0)) + stratified = bool(getattr(args, "stratified", False)) + + if 0.0 < ratio < 1.0: + if stratified: + idx = _stratified_indices(dataset.targets, ratio, seed) + else: + # simple uniform subsample with torch.Generator for reproducibility + g = torch.Generator().manual_seed(seed) + n = len(dataset) + k = max(1, int(n * ratio)) + idx = torch.randperm(n, generator=g)[:k].tolist() + dataset = Subset(dataset, idx) + + return dataset + +def build_transform(is_train, args): + mean = IMAGENET_DEFAULT_MEAN + std = IMAGENET_DEFAULT_STD + + if is_train == 'train': + return create_transform( + input_size=args.input_size, + is_training=True, + color_jitter=args.color_jitter, + auto_augment=args.aa, + interpolation='bicubic', + re_prob=args.reprob, + re_mode=args.remode, + re_count=args.recount, + mean=mean, + std=std, + ) + + # eval transform + crop_pct = 224 / 256 if args.input_size <= 224 else 1.0 + size = int(args.input_size / crop_pct) + t = [ + transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC), + transforms.CenterCrop(args.input_size), + transforms.ToTensor(), + transforms.Normalize(mean, std), + ] + return transforms.Compose(t) + +# ---- helpers ---- + +def _stratified_indices(targets, ratio: float, seed: int): + """Maintain class proportions. Ensures at least 1 sample per class when possible.""" + t = torch.as_tensor(targets) + classes = torch.unique(t) + g = torch.Generator().manual_seed(seed) + + keep = [] + for c in classes.tolist(): + cls_idx = torch.nonzero(t == c, as_tuple=False).view(-1) + if len(cls_idx) == 0: + continue + k = max(1, int(round(len(cls_idx) * ratio))) + sel = cls_idx[torch.randperm(len(cls_idx), generator=g)[:k]] + keep.extend(sel.tolist()) + + # shuffle final indices (stable across seed) + g2 = torch.Generator().manual_seed(seed + 1) + keep = torch.tensor(keep)[torch.randperm(len(keep), generator=g2)].tolist() + return keep + diff --git a/util/lr_decay.py b/util/lr_decay.py index 652fcd87..c915491b 100644 --- a/util/lr_decay.py +++ b/util/lr_decay.py @@ -1,74 +1,74 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# Partly revised by YZ @UCL&Moorfields -# -------------------------------------------------------- - -import json - - -def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=.75): - """ - Parameter groups for layer-wise lr decay - Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 - """ - param_group_names = {} - param_groups = {} - - if hasattr(model, 'blocks'): - num_layers = len(model.blocks) + 1 - else: - # use the number of layers in the ResNet model as a default value - num_layers = len(model.layer1) + len(model.layer2) + len(model.layer3) + len(model.layer4) + 1 - - layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1)) - - for n, p in model.named_parameters(): - if not p.requires_grad: - continue - - # no decay: all 1D parameters and model specific ones - if p.ndim == 1 or n in no_weight_decay_list: - g_decay = "no_decay" - this_decay = 0. - else: - g_decay = "decay" - this_decay = weight_decay - - layer_id = get_layer_id_for_vit(n, num_layers) - group_name = "layer_%d_%s" % (layer_id, g_decay) - - if group_name not in param_group_names: - this_scale = layer_scales[layer_id] - - param_group_names[group_name] = { - "lr_scale": this_scale, - "weight_decay": this_decay, - "params": [], - } - param_groups[group_name] = { - "lr_scale": this_scale, - "weight_decay": this_decay, - "params": [], - } - - param_group_names[group_name]["params"].append(n) - param_groups[group_name]["params"].append(p) - - # print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) - - return list(param_groups.values()) - - -def get_layer_id_for_vit(name, num_layers): - """ - Assign a parameter with its layer id - Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33 - """ - if name in ['cls_token', 'pos_embed']: - return 0 - elif name.startswith('patch_embed'): - return 0 - elif name.startswith('blocks'): - return int(name.split('.')[1]) + 1 - else: +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# Partly revised by YZ @UCL&Moorfields +# -------------------------------------------------------- + +import json + + +def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=.75): + """ + Parameter groups for layer-wise lr decay + Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 + """ + param_group_names = {} + param_groups = {} + + if hasattr(model, 'blocks'): + num_layers = len(model.blocks) + 1 + else: + # use the number of layers in the ResNet model as a default value + num_layers = len(model.layer1) + len(model.layer2) + len(model.layer3) + len(model.layer4) + 1 + + layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1)) + + for n, p in model.named_parameters(): + if not p.requires_grad: + continue + + # no decay: all 1D parameters and model specific ones + if p.ndim == 1 or n in no_weight_decay_list: + g_decay = "no_decay" + this_decay = 0. + else: + g_decay = "decay" + this_decay = weight_decay + + layer_id = get_layer_id_for_vit(n, num_layers) + group_name = "layer_%d_%s" % (layer_id, g_decay) + + if group_name not in param_group_names: + this_scale = layer_scales[layer_id] + + param_group_names[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "params": [], + } + param_groups[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "params": [], + } + + param_group_names[group_name]["params"].append(n) + param_groups[group_name]["params"].append(p) + + # print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) + + return list(param_groups.values()) + + +def get_layer_id_for_vit(name, num_layers): + """ + Assign a parameter with its layer id + Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33 + """ + if name in ['cls_token', 'pos_embed']: + return 0 + elif name.startswith('patch_embed'): + return 0 + elif name.startswith('blocks'): + return int(name.split('.')[1]) + 1 + else: return num_layers \ No newline at end of file diff --git a/util/lr_sched.py b/util/lr_sched.py index 178e1bfb..7a3a107c 100644 --- a/util/lr_sched.py +++ b/util/lr_sched.py @@ -1,20 +1,20 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# Partly revised by YZ @UCL&Moorfields -# -------------------------------------------------------- - -import math - -def adjust_learning_rate(optimizer, epoch, args): - """Decay the learning rate with half-cycle cosine after warmup""" - if epoch < args.warmup_epochs: - lr = args.lr * epoch / args.warmup_epochs - else: - lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \ - (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) - for param_group in optimizer.param_groups: - if "lr_scale" in param_group: - param_group["lr"] = lr * param_group["lr_scale"] - else: - param_group["lr"] = lr - return lr +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# Partly revised by YZ @UCL&Moorfields +# -------------------------------------------------------- + +import math + +def adjust_learning_rate(optimizer, epoch, args): + """Decay the learning rate with half-cycle cosine after warmup""" + if epoch < args.warmup_epochs: + lr = args.lr * epoch / args.warmup_epochs + else: + lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \ + (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) + for param_group in optimizer.param_groups: + if "lr_scale" in param_group: + param_group["lr"] = lr * param_group["lr_scale"] + else: + param_group["lr"] = lr + return lr diff --git a/util/misc.py b/util/misc.py index 47f7fde2..fd4ffb2a 100644 --- a/util/misc.py +++ b/util/misc.py @@ -1,369 +1,369 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# Partly revised by YZ @UCL&Moorfields -# -------------------------------------------------------- - -import builtins -import datetime -import os -import time -from collections import defaultdict, deque -from pathlib import Path - -import torch -import torch.distributed as dist -from math import inf - - -class SmoothedValue(object): - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - return self.total / self.count - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, - avg=self.avg, - global_avg=self.global_avg, - max=self.max, - value=self.value) - - -class MetricLogger(object): - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if v is None: - continue - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append( - "{}: {}".format(name, str(meter)) - ) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None): - i = 0 - if not header: - header = '' - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt='{avg:.4f}') - data_time = SmoothedValue(fmt='{avg:.4f}') - space_fmt = ':' + str(len(str(len(iterable)))) + 'd' - log_msg = [ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}' - ] - if torch.cuda.is_available(): - log_msg.append('max mem: {memory:.0f}') - log_msg = self.delimiter.join(log_msg) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB)) - else: - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time))) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('{} Total time: {} ({:.4f} s / it)'.format( - header, total_time_str, total_time / len(iterable))) - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - builtin_print = builtins.print - - def print(*args, **kwargs): - force = kwargs.pop('force', False) - force = force or (get_world_size() > 8) - if is_master or force: - now = datetime.datetime.now().time() - builtin_print('[{}] '.format(now), end='') # print with time stamp - builtin_print(*args, **kwargs) - - builtins.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if args.dist_on_itp: - args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) - args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) - args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) - args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) - os.environ['LOCAL_RANK'] = str(args.gpu) - os.environ['RANK'] = str(args.rank) - os.environ['WORLD_SIZE'] = str(args.world_size) - # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] - elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ['WORLD_SIZE']) - args.gpu = int(os.environ['LOCAL_RANK']) - elif 'SLURM_PROCID' in os.environ: - args.rank = int(os.environ['SLURM_PROCID']) - args.gpu = args.rank % torch.cuda.device_count() - else: - print('Not using distributed mode') - setup_for_distributed(is_master=True) # hack - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = 'nccl' - print('| distributed init (rank {}): {}, gpu {}'.format( - args.rank, args.dist_url, args.gpu), flush=True) - torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, - world_size=args.world_size, rank=args.rank) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) - - -class NativeScalerWithGradNormCount: - state_dict_key = "amp_scaler" - - def __init__(self): - self._scaler = torch.cuda.amp.GradScaler() - - def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): - self._scaler.scale(loss).backward(create_graph=create_graph) - if update_grad: - if clip_grad is not None: - assert parameters is not None - self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place - norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) - else: - self._scaler.unscale_(optimizer) - norm = get_grad_norm_(parameters) - self._scaler.step(optimizer) - self._scaler.update() - else: - norm = None - return norm - - def state_dict(self): - return self._scaler.state_dict() - - def load_state_dict(self, state_dict): - self._scaler.load_state_dict(state_dict) - - -def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = [p for p in parameters if p.grad is not None] - norm_type = float(norm_type) - if len(parameters) == 0: - return torch.tensor(0.) - device = parameters[0].grad.device - if norm_type == inf: - total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) - else: - total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), - norm_type) - return total_norm - - -def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, mode): - output_dir = Path(args.output_dir) - epoch_name = str(epoch) - os.makedirs(os.path.join(args.output_dir, args.task), exist_ok=True) - if loss_scaler is not None: - if mode == 'best': - checkpoint_paths = [os.path.join(args.output_dir, args.task, 'checkpoint-best.pth')] - else: - checkpoint_paths = [os.path.join(args.output_dir, args.task, 'checkpoint-latest.pth')] - for checkpoint_path in checkpoint_paths: - if mode == 'best': - to_save = { - 'model': model_without_ddp.state_dict(), - 'epoch': epoch, - 'args': args, } - else: - if epoch == args.epochs - 1: - to_save = { - 'model': model_without_ddp.state_dict(), - 'args': args, } - else: - to_save = { - 'model': model_without_ddp.state_dict(), - 'optimizer': optimizer.state_dict(), - 'epoch': epoch, - 'scaler': loss_scaler.state_dict(), - 'args': args, - } - - save_on_master(to_save, checkpoint_path) - else: - if mode == 'best': - to_save = { - 'model': model_without_ddp.state_dict(), - 'epoch': epoch, } - torch.save(to_save, os.path.join(args.output_dir, args.task, "checkpoint-best.pth")) - else: - if epoch == args.epochs - 1: - to_save = { - 'model': model_without_ddp.state_dict(), } - else: - to_save = { - 'model': model_without_ddp.state_dict(), - 'optimizer': optimizer.state_dict(), - 'epoch': epoch, - 'args': args, - } - torch.save(to_save, os.path.join(args.output_dir, args.task, "checkpoint-latest.pth")) - - -def load_model(args, model_without_ddp, optimizer, loss_scaler): - if args.resume: - if args.resume.startswith('https'): - checkpoint = torch.hub.load_state_dict_from_url( - args.resume, map_location='cpu', check_hash=True) - else: - checkpoint = torch.load(args.resume, map_location='cpu') - if 'model' in checkpoint: - checkpoint_model = checkpoint['model'] - else: - checkpoint_model = checkpoint - model_without_ddp.load_state_dict(checkpoint_model, strict=False) - print("Resume checkpoint %s" % args.resume) - if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval): - optimizer.load_state_dict(checkpoint['optimizer']) - args.start_epoch = checkpoint['epoch'] + 1 - if 'scaler' in checkpoint: - loss_scaler.load_state_dict(checkpoint['scaler']) - print("With optim & sched!") - - -def all_reduce_mean(x): - world_size = get_world_size() - if world_size > 1: - x_reduce = torch.tensor(x).cuda() - dist.all_reduce(x_reduce) - x_reduce /= world_size - return x_reduce.item() - else: - return x +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# Partly revised by YZ @UCL&Moorfields +# -------------------------------------------------------- + +import builtins +import datetime +import os +import time +from collections import defaultdict, deque +from pathlib import Path + +import torch +import torch.distributed as dist +from math import inf + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + log_msg = [ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f}') + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + builtin_print = builtins.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + force = force or (get_world_size() > 8) + if is_master or force: + now = datetime.datetime.now().time() + builtin_print('[{}] '.format(now), end='') # print with time stamp + builtin_print(*args, **kwargs) + + builtins.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if args.dist_on_itp: + args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) + args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['RANK'] = str(args.rank) + os.environ['WORLD_SIZE'] = str(args.world_size) + # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] + elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + setup_for_distributed(is_master=True) # hack + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}, gpu {}'.format( + args.rank, args.dist_url, args.gpu), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +class NativeScalerWithGradNormCount: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): + self._scaler.scale(loss).backward(create_graph=create_graph) + if update_grad: + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) + else: + self._scaler.unscale_(optimizer) + norm = get_grad_norm_(parameters) + self._scaler.step(optimizer) + self._scaler.update() + else: + norm = None + return norm + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) + + +def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = [p for p in parameters if p.grad is not None] + norm_type = float(norm_type) + if len(parameters) == 0: + return torch.tensor(0.) + device = parameters[0].grad.device + if norm_type == inf: + total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) + else: + total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), + norm_type) + return total_norm + + +def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, mode): + output_dir = Path(args.output_dir) + epoch_name = str(epoch) + os.makedirs(os.path.join(args.output_dir, args.task), exist_ok=True) + if loss_scaler is not None: + if mode == 'best': + checkpoint_paths = [os.path.join(args.output_dir, args.task, 'checkpoint-best.pth')] + else: + checkpoint_paths = [os.path.join(args.output_dir, args.task, 'checkpoint-latest.pth')] + for checkpoint_path in checkpoint_paths: + if mode == 'best': + to_save = { + 'model': model_without_ddp.state_dict(), + 'epoch': epoch, + 'args': args, } + else: + if epoch == args.epochs - 1: + to_save = { + 'model': model_without_ddp.state_dict(), + 'args': args, } + else: + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'scaler': loss_scaler.state_dict(), + 'args': args, + } + + save_on_master(to_save, checkpoint_path) + else: + if mode == 'best': + to_save = { + 'model': model_without_ddp.state_dict(), + 'epoch': epoch, } + torch.save(to_save, os.path.join(args.output_dir, args.task, "checkpoint-best.pth")) + else: + if epoch == args.epochs - 1: + to_save = { + 'model': model_without_ddp.state_dict(), } + else: + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'args': args, + } + torch.save(to_save, os.path.join(args.output_dir, args.task, "checkpoint-latest.pth")) + + +def load_model(args, model_without_ddp, optimizer, loss_scaler): + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + if 'model' in checkpoint: + checkpoint_model = checkpoint['model'] + else: + checkpoint_model = checkpoint + model_without_ddp.load_state_dict(checkpoint_model, strict=False) + print("Resume checkpoint %s" % args.resume) + if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval): + optimizer.load_state_dict(checkpoint['optimizer']) + args.start_epoch = checkpoint['epoch'] + 1 + if 'scaler' in checkpoint: + loss_scaler.load_state_dict(checkpoint['scaler']) + print("With optim & sched!") + + +def all_reduce_mean(x): + world_size = get_world_size() + if world_size > 1: + x_reduce = torch.tensor(x).cuda() + dist.all_reduce(x_reduce) + x_reduce /= world_size + return x_reduce.item() + else: + return x diff --git a/util/pos_embed.py b/util/pos_embed.py index 4652ff22..11f7128b 100644 --- a/util/pos_embed.py +++ b/util/pos_embed.py @@ -1,92 +1,92 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# Partly revised by YZ @UCL&Moorfields -# -------------------------------------------------------- - -import numpy as np - -import torch - -# -------------------------------------------------------- -# 2D sine-cosine position embedding -# References: -# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py -# MoCo v3: https://github.com/facebookresearch/moco-v3 -# -------------------------------------------------------- -def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): - """ - grid_size: int of the grid height and width - return: - pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) - """ - grid_h = np.arange(grid_size, dtype=np.float32) - grid_w = np.arange(grid_size, dtype=np.float32) - grid = np.meshgrid(grid_w, grid_h) # here w goes first - grid = np.stack(grid, axis=0) - - grid = grid.reshape([2, 1, grid_size, grid_size]) - pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) - if cls_token: - pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) - return pos_embed - - -def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): - assert embed_dim % 2 == 0 - - # use half of dimensions to encode grid_h - emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) - emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) - - emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) - return emb - - -def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): - """ - embed_dim: output dimension for each position - pos: a list of positions to be encoded: size (M,) - out: (M, D) - """ - assert embed_dim % 2 == 0 - omega = np.arange(embed_dim // 2, dtype=np.float) - omega /= embed_dim / 2. - omega = 1. / 10000**omega # (D/2,) - - pos = pos.reshape(-1) # (M,) - out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product - - emb_sin = np.sin(out) # (M, D/2) - emb_cos = np.cos(out) # (M, D/2) - - emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) - return emb - - -# -------------------------------------------------------- -# Interpolate position embeddings for high-resolution -# References: -# DeiT: https://github.com/facebookresearch/deit -# -------------------------------------------------------- -def interpolate_pos_embed(model, checkpoint_model): - if 'pos_embed' in checkpoint_model: - pos_embed_checkpoint = checkpoint_model['pos_embed'] - embedding_size = pos_embed_checkpoint.shape[-1] - num_patches = model.patch_embed.num_patches - num_extra_tokens = model.pos_embed.shape[-2] - num_patches - # height (== width) for the checkpoint position embedding - orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) - # height (== width) for the new position embedding - new_size = int(num_patches ** 0.5) - # class_token and dist_token are kept unchanged - if orig_size != new_size: - print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) - extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] - # only the position tokens are interpolated - pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] - pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) - pos_tokens = torch.nn.functional.interpolate( - pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) - pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) - new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) - checkpoint_model['pos_embed'] = new_pos_embed +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# Partly revised by YZ @UCL&Moorfields +# -------------------------------------------------------- + +import numpy as np + +import torch + +# -------------------------------------------------------- +# 2D sine-cosine position embedding +# References: +# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py +# MoCo v3: https://github.com/facebookresearch/moco-v3 +# -------------------------------------------------------- +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + grid_h = np.arange(grid_size, dtype=np.float32) + grid_w = np.arange(grid_size, dtype=np.float32) + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size, grid_size]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float) + omega /= embed_dim / 2. + omega = 1. / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +# -------------------------------------------------------- +# Interpolate position embeddings for high-resolution +# References: +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- +def interpolate_pos_embed(model, checkpoint_model): + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.patch_embed.num_patches + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed From f5f9279af914a0299c0754e16873695deb2d13d7 Mon Sep 17 00:00:00 2001 From: MDSALMANSHAMS Date: Fri, 16 Jan 2026 10:42:13 +0530 Subject: [PATCH 4/7] training --- .gitignore | 2 + engine_segmentation.py | 12 +- examples/RETFound_MendeleyOCT_demo.ipynb | 1133 ++++++++++++++++++++-- main_segmentation.py | 72 +- 4 files changed, 1131 insertions(+), 88 deletions(-) diff --git a/.gitignore b/.gitignore index bc0b8174..f207be41 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ __pycache__/ # Distribution / packaging .Python build/ +checkpoints Data develop-eggs/ dist/ @@ -19,6 +20,7 @@ lib/ lib64/ parts/ sdist/ +segmentation_output var/ wheels/ pip-wheel-metadata/ diff --git a/engine_segmentation.py b/engine_segmentation.py index 18a13dd7..3e966d0d 100644 --- a/engine_segmentation.py +++ b/engine_segmentation.py @@ -28,13 +28,21 @@ def compute_metrics(preds, targets, smooth=1e-6): def train_segmentation(model, loader, loss_fn, optimizer, device): model.train() total = 0 - for x, y in loader: + + for step, (x, y) in enumerate(loader): x, y = x.to(device), y.to(device) + optimizer.zero_grad() - loss = loss_fn(model(x), y) + out = model(x) + loss = loss_fn(out, y) loss.backward() optimizer.step() + total += loss.item() * x.size(0) + + if step % 10 == 0: + print(f" [batch {step}/{len(loader)}] loss: {loss.item():.4f}") + return total / len(loader.dataset) diff --git a/examples/RETFound_MendeleyOCT_demo.ipynb b/examples/RETFound_MendeleyOCT_demo.ipynb index 677e460b..bd7ad34a 100644 --- a/examples/RETFound_MendeleyOCT_demo.ipynb +++ b/examples/RETFound_MendeleyOCT_demo.ipynb @@ -4,6 +4,7 @@ "cell_type": "markdown", "id": "76b39fb1", "metadata": { + "id": "76b39fb1", "jp-MarkdownHeadingCollapsed": true }, "source": [ @@ -35,10 +36,186 @@ "\n" ] }, + { + "cell_type": "code", + "execution_count": 1, + "id": "rT2jO4AtHkjN", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 1712, + "status": "ok", + "timestamp": 1768473950572, + "user": { + "displayName": "MD SALMAN SHAMS", + "userId": "17411188514128174175" + }, + "user_tz": -330 + }, + "id": "rT2jO4AtHkjN", + "outputId": "5f092f62-ea87-432f-dfb4-4a5c4af56745" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" + ] + } + ], + "source": [ + "from google.colab import drive\n", + "drive.mount('/content/drive')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "odaiivGvH5e5", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 5, + "status": "ok", + "timestamp": 1768473955126, + "user": { + "displayName": "MD SALMAN SHAMS", + "userId": "17411188514128174175" + }, + "user_tz": -330 + }, + "id": "odaiivGvH5e5", + "outputId": "19f386ec-2157-4910-d427-0f16aeaca009" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/content/drive/MyDrive/Assignments/RETFound\n" + ] + } + ], + "source": [ + "%cd /content/drive/MyDrive/Assignments/RETFound" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "E8fpYFoWH-do", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 4991, + "status": "ok", + "timestamp": 1768473962175, + "user": { + "displayName": "MD SALMAN SHAMS", + "userId": "17411188514128174175" + }, + "user_tz": -330 + }, + "id": "E8fpYFoWH-do", + "outputId": "ec45e395-a59f-4566-af41-116c80d30455" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: opencv-python~=4.9.0.80 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 1)) (4.9.0.80)\n", + "Requirement already satisfied: Pillow~=10.2.0 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 2)) (10.2.0)\n", + "Requirement already satisfied: pycm~=4.0 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 3)) (4.5)\n", + "Requirement already satisfied: scikit-learn~=1.4.2 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 4)) (1.4.2)\n", + "Requirement already satisfied: timm~=0.9.2 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 5)) (0.9.16)\n", + "Requirement already satisfied: numpy~=1.26.4 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 7)) (1.26.4)\n", + "Requirement already satisfied: matplotlib~=3.8.4 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 8)) (3.8.4)\n", + "Requirement already satisfied: scikit-multilearn~=0.2.0 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 9)) (0.2.0)\n", + "Requirement already satisfied: huggingface-hub~=0.23.4 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 10)) (0.23.5)\n", + "Requirement already satisfied: tensorboard~=2.17.0 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 11)) (2.17.1)\n", + "Requirement already satisfied: albumentations~=1.4.3 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 12)) (1.4.24)\n", + "Requirement already satisfied: art>=1.8 in /usr/local/lib/python3.12/dist-packages (from pycm~=4.0->-r requirements.txt (line 3)) (6.5)\n", + "Requirement already satisfied: scipy>=1.6.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn~=1.4.2->-r requirements.txt (line 4)) (1.16.3)\n", + "Requirement already satisfied: joblib>=1.2.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn~=1.4.2->-r requirements.txt (line 4)) (1.5.3)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn~=1.4.2->-r requirements.txt (line 4)) (3.6.0)\n", + "Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from timm~=0.9.2->-r requirements.txt (line 5)) (2.9.0+cu126)\n", + "Requirement already satisfied: torchvision in /usr/local/lib/python3.12/dist-packages (from timm~=0.9.2->-r requirements.txt (line 5)) (0.24.0+cu126)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.12/dist-packages (from timm~=0.9.2->-r requirements.txt (line 5)) (6.0.3)\n", + "Requirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (from timm~=0.9.2->-r requirements.txt (line 5)) (0.7.0)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (1.3.3)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (0.12.1)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (4.61.1)\n", + "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (1.4.9)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (25.0)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (3.3.1)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (2.9.0.post0)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (3.20.2)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (2025.3.0)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (2.32.4)\n", + "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (4.67.1)\n", + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (4.15.0)\n", + "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (1.4.0)\n", + "Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (1.76.0)\n", + "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (3.10)\n", + "Requirement already satisfied: protobuf!=4.24.0,>=3.19.6 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (5.29.5)\n", + "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (75.2.0)\n", + "Requirement already satisfied: six>1.9 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (1.17.0)\n", + "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (0.7.2)\n", + "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (3.1.5)\n", + "Requirement already satisfied: pydantic>=2.9.2 in /usr/local/lib/python3.12/dist-packages (from albumentations~=1.4.3->-r requirements.txt (line 12)) (2.12.3)\n", + "Requirement already satisfied: albucore==0.0.23 in /usr/local/lib/python3.12/dist-packages (from albumentations~=1.4.3->-r requirements.txt (line 12)) (0.0.23)\n", + "Requirement already satisfied: opencv-python-headless>=4.9.0.80 in /usr/local/lib/python3.12/dist-packages (from albumentations~=1.4.3->-r requirements.txt (line 12)) (4.11.0.86)\n", + "Requirement already satisfied: stringzilla>=3.10.4 in /usr/local/lib/python3.12/dist-packages (from albucore==0.0.23->albumentations~=1.4.3->-r requirements.txt (line 12)) (4.6.0)\n", + "Requirement already satisfied: simsimd>=5.9.2 in /usr/local/lib/python3.12/dist-packages (from albucore==0.0.23->albumentations~=1.4.3->-r requirements.txt (line 12)) (6.5.12)\n", + "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.9.2->albumentations~=1.4.3->-r requirements.txt (line 12)) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.41.4 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.9.2->albumentations~=1.4.3->-r requirements.txt (line 12)) (2.41.4)\n", + "Requirement already satisfied: typing-inspection>=0.4.2 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.9.2->albumentations~=1.4.3->-r requirements.txt (line 12)) (0.4.2)\n", + "Requirement already satisfied: markupsafe>=2.1.1 in /usr/local/lib/python3.12/dist-packages (from werkzeug>=1.0.1->tensorboard~=2.17.0->-r requirements.txt (line 11)) (3.0.3)\n", + "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (3.4.4)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (3.11)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (2.5.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (2026.1.4)\n", + "Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (1.14.0)\n", + "Requirement already satisfied: networkx>=2.5.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (3.6.1)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (3.1.6)\n", + "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.77)\n", + "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.77)\n", + "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.80)\n", + "Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (9.10.2.21)\n", + "Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.4.1)\n", + "Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (11.3.0.4)\n", + "Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (10.3.7.77)\n", + "Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (11.7.1.2)\n", + "Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.5.4.2)\n", + "Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (0.7.1)\n", + "Requirement already satisfied: nvidia-nccl-cu12==2.27.5 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (2.27.5)\n", + "Requirement already satisfied: nvidia-nvshmem-cu12==3.3.20 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (3.3.20)\n", + "Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.77)\n", + "Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.85)\n", + "Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (1.11.1.6)\n", + "Requirement already satisfied: triton==3.5.0 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (3.5.0)\n", + "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->timm~=0.9.2->-r requirements.txt (line 5)) (1.3.0)\n" + ] + } + ], + "source": [ + "!pip install -r requirements.txt" + ] + }, { "cell_type": "markdown", "id": "7ec435a7", - "metadata": {}, + "metadata": { + "id": "7ec435a7" + }, "source": [ "## 1. Install environment\n", "1. Follow [RETFound README](https://github.com/rmaphoh/RETFound) to install environment\n", @@ -50,7 +227,9 @@ "cell_type": "code", "execution_count": null, "id": "7cbf5e93-6ca0-4401-88e6-64e39968e7cd", - "metadata": {}, + "metadata": { + "id": "7cbf5e93-6ca0-4401-88e6-64e39968e7cd" + }, "outputs": [], "source": [ "import sys, torch\n", @@ -70,7 +249,9 @@ { "cell_type": "markdown", "id": "ed67953f", - "metadata": {}, + "metadata": { + "id": "ed67953f" + }, "source": [ "## 2. Prepare MendeleyOCT dataset\n", "1. Download from the [shared data pool](https://github.com/rmaphoh/RETFound/blob/main/BENCHMARK.md).\n", @@ -80,7 +261,9 @@ { "cell_type": "markdown", "id": "357be2fa-a914-4d1f-8759-76b2b1c3f20f", - "metadata": {}, + "metadata": { + "id": "357be2fa-a914-4d1f-8759-76b2b1c3f20f" + }, "source": [ "## 3. Hyperparameter and path settings\n", "1. Can choose finetune or lp (linear probe)\n", @@ -91,7 +274,9 @@ "cell_type": "code", "execution_count": null, "id": "5f675843", - "metadata": {}, + "metadata": { + "id": "5f675843" + }, "outputs": [], "source": [ "from pathlib import Path\n", @@ -117,14 +302,18 @@ "cell_type": "code", "execution_count": null, "id": "fa3d8d10", - "metadata": {}, + "metadata": { + "id": "fa3d8d10" + }, "outputs": [], "source": [] }, { "cell_type": "markdown", "id": "6ac04845", - "metadata": {}, + "metadata": { + "id": "6ac04845" + }, "source": [ "## 4. Fine-tuning and testing RETFound on MESSIDOR2" ] @@ -134,6 +323,7 @@ "execution_count": null, "id": "d23ff751", "metadata": { + "id": "d23ff751", "scrolled": true }, "outputs": [], @@ -157,113 +347,909 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "id": "b55116e5", - "metadata": {}, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 12, + "status": "ok", + "timestamp": 1768473965211, + "user": { + "displayName": "MD SALMAN SHAMS", + "userId": "17411188514128174175" + }, + "user_tz": -330 + }, + "id": "b55116e5", + "outputId": "2ecba17a-1652-4641-9760-f44f741b554e" + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "F:\\GitHub\\RETFound\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\IPython\\core\\magics\\osm.py:417: UserWarning: using dhist requires you to install the `pickleshare` library.\n", - " self.shell.db['dhist'] = compress_dhist(dhist)[-100:]\n" + "/content/drive/MyDrive/Assignments/RETFound\n" ] } ], "source": [ - "%cd F:\\GitHub\\RETFound" + "%cd /content/drive/MyDrive/Assignments/RETFound" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "id": "374fdce3", - "metadata": {}, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "executionInfo": { + "elapsed": 12160408, + "status": "ok", + "timestamp": 1768486129477, + "user": { + "displayName": "MD SALMAN SHAMS", + "userId": "17411188514128174175" + }, + "user_tz": -330 + }, + "id": "374fdce3", + "outputId": "47f94636-3e02-400e-cf47-fba01f0b88bd" + }, "outputs": [ { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\albumentations\\__init__.py:24: UserWarning: A new version of Albumentations is available: 2.0.8 (you have 1.4.24). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n", + "/usr/local/lib/python3.12/dist-packages/albumentations/__init__.py:24: UserWarning: A new version of Albumentations is available: 2.0.8 (you have 1.4.24). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n", " check_for_updates()\n", - "Traceback (most recent call last):\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\utils\\_errors.py\", line 304, in hf_raise_for_status\n", - " response.raise_for_status()\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\requests\\models.py\", line 1026, in raise_for_status\n", - " raise HTTPError(http_error_msg, response=self)\n", - "requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://huggingface.co/YukunZhou/RETFound_OCT/resolve/main/RETFound_OCT.pth\n", - "\n", - "The above exception was the direct cause of the following exception:\n", + "Loaded 826 valid samples from Segmentation/Data/train\n", + "Loaded 200 valid samples from Segmentation/Data/val\n", + "Loaded 50 valid samples from Segmentation/Data/test\n", + "/usr/local/lib/python3.12/dist-packages/torch/utils/data/dataloader.py:627: UserWarning: This DataLoader will create 4 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n", + " warnings.warn(\n", + "Position interpolate from 14x14 to 16x16\n", + "Pretrained RETFound weights loaded.\n", "\n", - "Traceback (most recent call last):\n", - " File \"F:\\GitHub\\RETFound\\main_segmentation.py\", line 122, in \n", - " main()\n", - " File \"F:\\GitHub\\RETFound\\main_segmentation.py\", line 90, in main\n", - " ckpt = hf_hub_download(f\"YukunZhou/{args.finetune}\", f\"{args.finetune}.pth\")\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\utils\\_validators.py\", line 114, in _inner_fn\n", - " return fn(*args, **kwargs)\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1221, in hf_hub_download\n", - " return _hf_hub_download_to_cache_dir(\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n", - " _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1823, in _raise_on_head_call_error\n", - " raise head_call_error\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1722, in _get_metadata_or_catch_error\n", - " metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\utils\\_validators.py\", line 114, in _inner_fn\n", - " return fn(*args, **kwargs)\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 1645, in get_hf_file_metadata\n", - " r = _request_wrapper(\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 372, in _request_wrapper\n", - " response = _request_wrapper(\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\file_download.py\", line 396, in _request_wrapper\n", - " hf_raise_for_status(response)\n", - " File \"f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\huggingface_hub\\utils\\_errors.py\", line 352, in hf_raise_for_status\n", - " raise RepositoryNotFoundError(message, response) from e\n", - "huggingface_hub.utils._errors.RepositoryNotFoundError: 401 Client Error. (Request ID: Root=1-695fc45d-79fe26933f26030a6b9841b0;54857051-1858-4db9-bda3-3c975c6024e2)\n", + "[DEBUG] Starting training loop...\n", "\n", - "Repository Not Found for url: https://huggingface.co/YukunZhou/RETFound_OCT/resolve/main/RETFound_OCT.pth.\n", - "Please make sure you specified the correct `repo_id` and `repo_type`.\n", - "If you are trying to access a private or gated repo, make sure you are authenticated.\n", - "Invalid username or password.\n" + "[DEBUG] Entered epoch 1\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 1.6072\n", + " [batch 10/207] loss: 0.5284\n", + " [batch 20/207] loss: 0.5346\n", + " [batch 30/207] loss: 0.4621\n", + " [batch 40/207] loss: 0.4229\n", + " [batch 50/207] loss: 0.4235\n", + " [batch 60/207] loss: 0.3985\n", + " [batch 70/207] loss: 0.3799\n", + " [batch 80/207] loss: 0.4031\n", + " [batch 90/207] loss: 0.3644\n", + " [batch 100/207] loss: 0.3574\n", + " [batch 110/207] loss: 0.4511\n", + " [batch 120/207] loss: 0.3842\n", + " [batch 130/207] loss: 0.3465\n", + " [batch 140/207] loss: 0.3782\n", + " [batch 150/207] loss: 0.3437\n", + " [batch 160/207] loss: 0.3442\n", + " [batch 170/207] loss: 0.3764\n", + " [batch 180/207] loss: 0.3371\n", + " [batch 190/207] loss: 0.3378\n", + " [batch 200/207] loss: 0.3950\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 1: Train=0.4005 | Val=0.2806 | Dice=0.5605 | IoU=0.3894\n", + "[DEBUG] Entered epoch 2\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.3567\n", + " [batch 10/207] loss: 0.2937\n", + " [batch 20/207] loss: 0.3549\n", + " [batch 30/207] loss: 0.2875\n", + " [batch 40/207] loss: 0.2788\n", + " [batch 50/207] loss: 0.2848\n", + " [batch 60/207] loss: 0.3167\n", + " [batch 70/207] loss: 0.2793\n", + " [batch 80/207] loss: 0.3208\n", + " [batch 90/207] loss: 0.2842\n", + " [batch 100/207] loss: 0.3256\n", + " [batch 110/207] loss: 0.2269\n", + " [batch 120/207] loss: 0.3196\n", + " [batch 130/207] loss: 0.2960\n", + " [batch 140/207] loss: 0.3300\n", + " [batch 150/207] loss: 0.3175\n", + " [batch 160/207] loss: 0.2969\n", + " [batch 170/207] loss: 0.3674\n", + " [batch 180/207] loss: 0.2710\n", + " [batch 190/207] loss: 0.3173\n", + " [batch 200/207] loss: 0.2744\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 2: Train=0.3019 | Val=0.2676 | Dice=0.5939 | IoU=0.4224\n", + "[DEBUG] Entered epoch 3\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.2541\n", + " [batch 10/207] loss: 0.2887\n", + " [batch 20/207] loss: 0.3042\n", + " [batch 30/207] loss: 0.2080\n", + " [batch 40/207] loss: 0.2231\n", + " [batch 50/207] loss: 0.3497\n", + " [batch 60/207] loss: 0.3842\n", + " [batch 70/207] loss: 0.2624\n", + " [batch 80/207] loss: 0.2801\n", + " [batch 90/207] loss: 0.3401\n", + " [batch 100/207] loss: 0.3218\n", + " [batch 110/207] loss: 0.2471\n", + " [batch 120/207] loss: 0.2856\n", + " [batch 130/207] loss: 0.3261\n", + " [batch 140/207] loss: 0.2053\n", + " [batch 150/207] loss: 0.2462\n", + " [batch 160/207] loss: 0.2413\n", + " [batch 170/207] loss: 0.3874\n", + " [batch 180/207] loss: 0.2323\n", + " [batch 190/207] loss: 0.3317\n", + " [batch 200/207] loss: 0.2441\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 3: Train=0.2727 | Val=0.2280 | Dice=0.6600 | IoU=0.4925\n", + "[DEBUG] Entered epoch 4\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.2119\n", + " [batch 10/207] loss: 0.3079\n", + " [batch 20/207] loss: 0.2442\n", + " [batch 30/207] loss: 0.2435\n", + " [batch 40/207] loss: 0.3318\n", + " [batch 50/207] loss: 0.2625\n", + " [batch 60/207] loss: 0.2661\n", + " [batch 70/207] loss: 0.2390\n", + " [batch 80/207] loss: 0.2467\n", + " [batch 90/207] loss: 0.3138\n", + " [batch 100/207] loss: 0.2086\n", + " [batch 110/207] loss: 0.2197\n", + " [batch 120/207] loss: 0.2395\n", + " [batch 130/207] loss: 0.1610\n", + " [batch 140/207] loss: 0.3063\n", + " [batch 150/207] loss: 0.2948\n", + " [batch 160/207] loss: 0.2099\n", + " [batch 170/207] loss: 0.2426\n", + " [batch 180/207] loss: 0.1962\n", + " [batch 190/207] loss: 0.2409\n", + " [batch 200/207] loss: 0.2781\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 4: Train=0.2448 | Val=0.2342 | Dice=0.6503 | IoU=0.4818\n", + "[DEBUG] Entered epoch 5\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.3115\n", + " [batch 10/207] loss: 0.1931\n", + " [batch 20/207] loss: 0.2424\n", + " [batch 30/207] loss: 0.2089\n", + " [batch 40/207] loss: 0.1868\n", + " [batch 50/207] loss: 0.3348\n", + " [batch 60/207] loss: 0.2479\n", + " [batch 70/207] loss: 0.1659\n", + " [batch 80/207] loss: 0.2025\n", + " [batch 90/207] loss: 0.1686\n", + " [batch 100/207] loss: 0.2567\n", + " [batch 110/207] loss: 0.1917\n", + " [batch 120/207] loss: 0.2328\n", + " [batch 130/207] loss: 0.2900\n", + " [batch 140/207] loss: 0.2101\n", + " [batch 150/207] loss: 0.2216\n", + " [batch 160/207] loss: 0.2112\n", + " [batch 170/207] loss: 0.2272\n", + " [batch 180/207] loss: 0.1955\n", + " [batch 190/207] loss: 0.2354\n", + " [batch 200/207] loss: 0.3457\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 5: Train=0.2316 | Val=0.2245 | Dice=0.6685 | IoU=0.5020\n", + "[DEBUG] Entered epoch 6\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.2025\n", + " [batch 10/207] loss: 0.2152\n", + " [batch 20/207] loss: 0.2111\n", + " [batch 30/207] loss: 0.2317\n", + " [batch 40/207] loss: 0.2358\n", + " [batch 50/207] loss: 0.1877\n", + " [batch 60/207] loss: 0.1501\n", + " [batch 70/207] loss: 0.2540\n", + " [batch 80/207] loss: 0.2082\n", + " [batch 90/207] loss: 0.2259\n", + " [batch 100/207] loss: 0.1742\n", + " [batch 110/207] loss: 0.1807\n", + " [batch 120/207] loss: 0.1410\n", + " [batch 130/207] loss: 0.3102\n", + " [batch 140/207] loss: 0.2125\n", + " [batch 150/207] loss: 0.2967\n", + " [batch 160/207] loss: 0.2384\n", + " [batch 170/207] loss: 0.1771\n", + " [batch 180/207] loss: 0.2419\n", + " [batch 190/207] loss: 0.3038\n", + " [batch 200/207] loss: 0.2155\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 6: Train=0.2164 | Val=0.2218 | Dice=0.6714 | IoU=0.5053\n", + "[DEBUG] Entered epoch 7\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.1777\n", + " [batch 10/207] loss: 0.2458\n", + " [batch 20/207] loss: 0.2361\n", + " [batch 30/207] loss: 0.2720\n", + " [batch 40/207] loss: 0.2114\n", + " [batch 50/207] loss: 0.2370\n", + " [batch 60/207] loss: 0.2767\n", + " [batch 70/207] loss: 0.1874\n", + " [batch 80/207] loss: 0.1919\n", + " [batch 90/207] loss: 0.2213\n", + " [batch 100/207] loss: 0.2213\n", + " [batch 110/207] loss: 0.2263\n", + " [batch 120/207] loss: 0.1775\n", + " [batch 130/207] loss: 0.2739\n", + " [batch 140/207] loss: 0.3291\n", + " [batch 150/207] loss: 0.2236\n", + " [batch 160/207] loss: 0.2162\n", + " [batch 170/207] loss: 0.1912\n", + " [batch 180/207] loss: 0.1648\n", + " [batch 190/207] loss: 0.1859\n", + " [batch 200/207] loss: 0.2054\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 7: Train=0.2038 | Val=0.2250 | Dice=0.6699 | IoU=0.5036\n", + "[DEBUG] Entered epoch 8\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.2239\n", + " [batch 10/207] loss: 0.1675\n", + " [batch 20/207] loss: 0.1499\n", + " [batch 30/207] loss: 0.2443\n", + " [batch 40/207] loss: 0.2731\n", + " [batch 50/207] loss: 0.1959\n", + " [batch 60/207] loss: 0.2296\n", + " [batch 70/207] loss: 0.2078\n", + " [batch 80/207] loss: 0.2190\n", + " [batch 90/207] loss: 0.2172\n", + " [batch 100/207] loss: 0.1288\n", + " [batch 110/207] loss: 0.1876\n", + " [batch 120/207] loss: 0.1710\n", + " [batch 130/207] loss: 0.2748\n", + " [batch 140/207] loss: 0.1927\n", + " [batch 150/207] loss: 0.1707\n", + " [batch 160/207] loss: 0.2482\n", + " [batch 170/207] loss: 0.1914\n", + " [batch 180/207] loss: 0.2109\n", + " [batch 190/207] loss: 0.1399\n", + " [batch 200/207] loss: 0.2101\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 8: Train=0.1969 | Val=0.2102 | Dice=0.6928 | IoU=0.5299\n", + "[DEBUG] Entered epoch 9\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.1398\n", + " [batch 10/207] loss: 0.1192\n", + " [batch 20/207] loss: 0.1453\n", + " [batch 30/207] loss: 0.2658\n", + " [batch 40/207] loss: 0.2172\n", + " [batch 50/207] loss: 0.1517\n", + " [batch 60/207] loss: 0.1373\n", + " [batch 70/207] loss: 0.2481\n", + " [batch 80/207] loss: 0.2536\n", + " [batch 90/207] loss: 0.1781\n", + " [batch 100/207] loss: 0.2304\n", + " [batch 110/207] loss: 0.1664\n", + " [batch 120/207] loss: 0.3063\n", + " [batch 130/207] loss: 0.1773\n", + " [batch 140/207] loss: 0.2314\n", + " [batch 150/207] loss: 0.3274\n", + " [batch 160/207] loss: 0.1585\n", + " [batch 170/207] loss: 0.3672\n", + " [batch 180/207] loss: 0.2085\n", + " [batch 190/207] loss: 0.1775\n", + " [batch 200/207] loss: 0.1586\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 9: Train=0.1890 | Val=0.2178 | Dice=0.6853 | IoU=0.5213\n", + "[DEBUG] Entered epoch 10\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.1770\n", + " [batch 10/207] loss: 0.1227\n", + " [batch 20/207] loss: 0.1684\n", + " [batch 30/207] loss: 0.1987\n", + " [batch 40/207] loss: 0.1156\n", + " [batch 50/207] loss: 0.1463\n", + " [batch 60/207] loss: 0.1355\n", + " [batch 70/207] loss: 0.2587\n", + " [batch 80/207] loss: 0.0852\n", + " [batch 90/207] loss: 0.1101\n", + " [batch 100/207] loss: 0.1334\n", + " [batch 110/207] loss: 0.1751\n", + " [batch 120/207] loss: 0.2327\n", + " [batch 130/207] loss: 0.1607\n", + " [batch 140/207] loss: 0.1764\n", + " [batch 150/207] loss: 0.2233\n", + " [batch 160/207] loss: 0.1401\n", + " [batch 170/207] loss: 0.2363\n", + " [batch 180/207] loss: 0.2062\n", + " [batch 190/207] loss: 0.1247\n", + " [batch 200/207] loss: 0.1477\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 10: Train=0.1837 | Val=0.2289 | Dice=0.6750 | IoU=0.5094\n", + "[DEBUG] Entered epoch 11\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.2078\n", + " [batch 10/207] loss: 0.2047\n", + " [batch 20/207] loss: 0.1852\n", + " [batch 30/207] loss: 0.2619\n", + " [batch 40/207] loss: 0.1754\n", + " [batch 50/207] loss: 0.1004\n", + " [batch 60/207] loss: 0.1033\n", + " [batch 70/207] loss: 0.2141\n", + " [batch 80/207] loss: 0.1825\n", + " [batch 90/207] loss: 0.1713\n", + " [batch 100/207] loss: 0.1536\n", + " [batch 110/207] loss: 0.2070\n", + " [batch 120/207] loss: 0.1225\n", + " [batch 130/207] loss: 0.2451\n", + " [batch 140/207] loss: 0.1151\n", + " [batch 150/207] loss: 0.1468\n", + " [batch 160/207] loss: 0.1393\n", + " [batch 170/207] loss: 0.1352\n", + " [batch 180/207] loss: 0.1514\n", + " [batch 190/207] loss: 0.1821\n", + " [batch 200/207] loss: 0.1583\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 11: Train=0.1776 | Val=0.2207 | Dice=0.6855 | IoU=0.5215\n", + "[DEBUG] Entered epoch 12\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.2290\n", + " [batch 10/207] loss: 0.1820\n", + " [batch 20/207] loss: 0.1776\n", + " [batch 30/207] loss: 0.1927\n", + " [batch 40/207] loss: 0.1674\n", + " [batch 50/207] loss: 0.1655\n", + " [batch 60/207] loss: 0.1710\n", + " [batch 70/207] loss: 0.1582\n", + " [batch 80/207] loss: 0.1816\n", + " [batch 90/207] loss: 0.1819\n", + " [batch 100/207] loss: 0.1738\n", + " [batch 110/207] loss: 0.2001\n", + " [batch 120/207] loss: 0.2124\n", + " [batch 130/207] loss: 0.1337\n", + " [batch 140/207] loss: 0.2463\n", + " [batch 150/207] loss: 0.1945\n", + " [batch 160/207] loss: 0.3018\n", + " [batch 170/207] loss: 0.1278\n", + " [batch 180/207] loss: 0.2135\n", + " [batch 190/207] loss: 0.2194\n", + " [batch 200/207] loss: 0.2093\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 12: Train=0.1721 | Val=0.2217 | Dice=0.6907 | IoU=0.5275\n", + "[DEBUG] Entered epoch 13\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.1652\n", + " [batch 10/207] loss: 0.1329\n", + " [batch 20/207] loss: 0.2092\n", + " [batch 30/207] loss: 0.1648\n", + " [batch 40/207] loss: 0.0932\n", + " [batch 50/207] loss: 0.1448\n", + " [batch 60/207] loss: 0.2008\n", + " [batch 70/207] loss: 0.1754\n", + " [batch 80/207] loss: 0.2081\n", + " [batch 90/207] loss: 0.1241\n", + " [batch 100/207] loss: 0.1880\n", + " [batch 110/207] loss: 0.1601\n", + " [batch 120/207] loss: 0.1282\n", + " [batch 130/207] loss: 0.1487\n", + " [batch 140/207] loss: 0.1517\n", + " [batch 150/207] loss: 0.1544\n", + " [batch 160/207] loss: 0.1018\n", + " [batch 170/207] loss: 0.1403\n", + " [batch 180/207] loss: 0.1175\n", + " [batch 190/207] loss: 0.1488\n", + " [batch 200/207] loss: 0.1415\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 13: Train=0.1682 | Val=0.2622 | Dice=0.6457 | IoU=0.4768\n", + "[DEBUG] Entered epoch 14\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.2855\n", + " [batch 10/207] loss: 0.2600\n", + " [batch 20/207] loss: 0.1778\n", + " [batch 30/207] loss: 0.1939\n", + " [batch 40/207] loss: 0.1395\n", + " [batch 50/207] loss: 0.0920\n", + " [batch 60/207] loss: 0.1603\n", + " [batch 70/207] loss: 0.1238\n", + " [batch 80/207] loss: 0.1673\n", + " [batch 90/207] loss: 0.1552\n", + " [batch 100/207] loss: 0.1225\n", + " [batch 110/207] loss: 0.1592\n", + " [batch 120/207] loss: 0.1972\n", + " [batch 130/207] loss: 0.1940\n", + " [batch 140/207] loss: 0.1740\n", + " [batch 150/207] loss: 0.1977\n", + " [batch 160/207] loss: 0.1552\n", + " [batch 170/207] loss: 0.2466\n", + " [batch 180/207] loss: 0.2127\n", + " [batch 190/207] loss: 0.1929\n", + " [batch 200/207] loss: 0.1205\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 14: Train=0.1700 | Val=0.2158 | Dice=0.6897 | IoU=0.5263\n", + "[DEBUG] Entered epoch 15\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.1854\n", + " [batch 10/207] loss: 0.2445\n", + " [batch 20/207] loss: 0.0977\n", + " [batch 30/207] loss: 0.1831\n", + " [batch 40/207] loss: 0.2082\n", + " [batch 50/207] loss: 0.1230\n", + " [batch 60/207] loss: 0.2431\n", + " [batch 70/207] loss: 0.2035\n", + " [batch 80/207] loss: 0.1267\n", + " [batch 90/207] loss: 0.1210\n", + " [batch 100/207] loss: 0.1787\n", + " [batch 110/207] loss: 0.2013\n", + " [batch 120/207] loss: 0.2087\n", + " [batch 130/207] loss: 0.1681\n", + " [batch 140/207] loss: 0.1317\n", + " [batch 150/207] loss: 0.1911\n", + " [batch 160/207] loss: 0.1193\n", + " [batch 170/207] loss: 0.1786\n", + " [batch 180/207] loss: 0.1416\n", + " [batch 190/207] loss: 0.1293\n", + " [batch 200/207] loss: 0.1544\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 15: Train=0.1661 | Val=0.2179 | Dice=0.6994 | IoU=0.5378\n", + "[DEBUG] Entered epoch 16\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.1455\n", + " [batch 10/207] loss: 0.1057\n", + " [batch 20/207] loss: 0.1510\n", + " [batch 30/207] loss: 0.1456\n", + " [batch 40/207] loss: 0.2355\n", + " [batch 50/207] loss: 0.1749\n", + " [batch 60/207] loss: 0.1486\n", + " [batch 70/207] loss: 0.1764\n", + " [batch 80/207] loss: 0.1339\n", + " [batch 90/207] loss: 0.1780\n", + " [batch 100/207] loss: 0.1619\n", + " [batch 110/207] loss: 0.1672\n", + " [batch 120/207] loss: 0.2510\n", + " [batch 130/207] loss: 0.1626\n", + " [batch 140/207] loss: 0.1840\n", + " [batch 150/207] loss: 0.1420\n", + " [batch 160/207] loss: 0.1930\n", + " [batch 170/207] loss: 0.1624\n", + " [batch 180/207] loss: 0.1335\n", + " [batch 190/207] loss: 0.1410\n", + " [batch 200/207] loss: 0.1213\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 16: Train=0.1537 | Val=0.2204 | Dice=0.7009 | IoU=0.5395\n", + "[DEBUG] Entered epoch 17\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.1523\n", + " [batch 10/207] loss: 0.1845\n", + " [batch 20/207] loss: 0.1752\n", + " [batch 30/207] loss: 0.2439\n", + " [batch 40/207] loss: 0.1606\n", + " [batch 50/207] loss: 0.1179\n", + " [batch 60/207] loss: 0.1376\n", + " [batch 70/207] loss: 0.1196\n", + " [batch 80/207] loss: 0.2147\n", + " [batch 90/207] loss: 0.1457\n", + " [batch 100/207] loss: 0.2614\n", + " [batch 110/207] loss: 0.2404\n", + " [batch 120/207] loss: 0.1848\n", + " [batch 130/207] loss: 0.1519\n", + " [batch 140/207] loss: 0.1041\n", + " [batch 150/207] loss: 0.1542\n", + " [batch 160/207] loss: 0.2578\n", + " [batch 170/207] loss: 0.1130\n", + " [batch 180/207] loss: 0.1311\n", + " [batch 190/207] loss: 0.1231\n", + " [batch 200/207] loss: 0.1946\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 17: Train=0.1509 | Val=0.2207 | Dice=0.6998 | IoU=0.5383\n", + "[DEBUG] Entered epoch 18\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.1491\n", + " [batch 10/207] loss: 0.1399\n", + " [batch 20/207] loss: 0.1529\n", + " [batch 30/207] loss: 0.2028\n", + " [batch 40/207] loss: 0.1463\n", + " [batch 50/207] loss: 0.1690\n", + " [batch 60/207] loss: 0.1515\n", + " [batch 70/207] loss: 0.1616\n", + " [batch 80/207] loss: 0.1476\n", + " [batch 90/207] loss: 0.1398\n", + " [batch 100/207] loss: 0.0772\n", + " [batch 110/207] loss: 0.0930\n", + " [batch 120/207] loss: 0.1421\n", + " [batch 130/207] loss: 0.1534\n", + " [batch 140/207] loss: 0.1148\n", + " [batch 150/207] loss: 0.1338\n", + " [batch 160/207] loss: 0.1688\n", + " [batch 170/207] loss: 0.1613\n", + " [batch 180/207] loss: 0.1792\n", + " [batch 190/207] loss: 0.1392\n", + " [batch 200/207] loss: 0.1403\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 18: Train=0.1491 | Val=0.2116 | Dice=0.7052 | IoU=0.5446\n", + "[DEBUG] Entered epoch 19\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.1171\n", + " [batch 10/207] loss: 0.1994\n", + " [batch 20/207] loss: 0.1493\n", + " [batch 30/207] loss: 0.1619\n", + " [batch 40/207] loss: 0.1725\n", + " [batch 50/207] loss: 0.1558\n", + " [batch 60/207] loss: 0.1124\n", + " [batch 70/207] loss: 0.1338\n", + " [batch 80/207] loss: 0.1428\n", + " [batch 90/207] loss: 0.0972\n", + " [batch 100/207] loss: 0.1496\n", + " [batch 110/207] loss: 0.1751\n", + " [batch 120/207] loss: 0.1270\n", + " [batch 130/207] loss: 0.1437\n", + " [batch 140/207] loss: 0.2481\n", + " [batch 150/207] loss: 0.1262\n", + " [batch 160/207] loss: 0.1557\n", + " [batch 170/207] loss: 0.1365\n", + " [batch 180/207] loss: 0.0968\n", + " [batch 190/207] loss: 0.1993\n", + " [batch 200/207] loss: 0.1454\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 19: Train=0.1451 | Val=0.2132 | Dice=0.7078 | IoU=0.5478\n", + "[DEBUG] Entered epoch 20\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + " [batch 0/207] loss: 0.2005\n", + " [batch 10/207] loss: 0.2464\n", + " [batch 20/207] loss: 0.1515\n", + " [batch 30/207] loss: 0.2253\n", + " [batch 40/207] loss: 0.0954\n", + " [batch 50/207] loss: 0.1562\n", + " [batch 60/207] loss: 0.1923\n", + " [batch 70/207] loss: 0.1598\n", + " [batch 80/207] loss: 0.1240\n", + " [batch 90/207] loss: 0.1108\n", + " [batch 100/207] loss: 0.1739\n", + " [batch 110/207] loss: 0.2150\n", + " [batch 120/207] loss: 0.1125\n", + " [batch 130/207] loss: 0.1525\n", + " [batch 140/207] loss: 0.1313\n", + " [batch 150/207] loss: 0.1273\n", + " [batch 160/207] loss: 0.1075\n", + " [batch 170/207] loss: 0.1172\n", + " [batch 180/207] loss: 0.3110\n", + " [batch 190/207] loss: 0.1552\n", + " [batch 200/207] loss: 0.1810\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Epoch 20: Train=0.1420 | Val=0.2250 | Dice=0.7002 | IoU=0.5387\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "/content/drive/MyDrive/Assignments/RETFound/main_segmentation.py:62: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " return img, torch.tensor(mask, dtype=torch.long)\n", + "Test: Loss=0.2997 | Dice=0.6362 | IoU=0.4665\n" ] } ], "source": [ "!python main_segmentation.py \\\n", " --data_path Segmentation/Data \\\n", - " --finetune RETFound_OCT \\\n", - " --epochs 2 \\\n", - " --batch_size 2\n" + " --finetune checkpoints/checkpoint-best.pth \\\n", + " --epochs 20 \\\n", + " --batch_size 4 \\\n", + " --img_size 256" ] }, { "cell_type": "markdown", "id": "84ce93ac", - "metadata": {}, + "metadata": { + "id": "84ce93ac" + }, "source": [ "## 5. Evaluation-only" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "0af0f8a7", "metadata": { + "id": "0af0f8a7", "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "'{sys.executable}' is not recognized as an internal or external command,\n", + "operable program or batch file.\n" + ] + } + ], "source": [ "import sys\n", "\n", - "CKPT = OUTPUT_DIR / \"checkpoint-best.pth\"\n", + "CKPT = r\"F:\\GitHub\\RETFound\\segmentation_output\\best.pth\"\n", "\n", "!{sys.executable} main_finetune.py \\\n", " --model {MODEL} \\\n", @@ -271,7 +1257,7 @@ " --finetune {FINETUNE} \\\n", " --savemodel \\\n", " --global_pool \\\n", - " --batch_size 128 \\\n", + " --batch_size 1 \\\n", " --nb_classes {NUM_CLASS} \\\n", " --data_path {DATA_PATH} \\\n", " --input_size {INPUT_SIZE} \\\n", @@ -285,12 +1271,19 @@ "cell_type": "code", "execution_count": null, "id": "02d2dce7-31c2-48e2-87ce-9223b74cf94e", - "metadata": {}, + "metadata": { + "id": "02d2dce7-31c2-48e2-87ce-9223b74cf94e" + }, "outputs": [], "source": [] } ], "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, "environment": { "kernel": "retfound", "name": "workbench-notebooks.m128", diff --git a/main_segmentation.py b/main_segmentation.py index 3987e588..642cd359 100644 --- a/main_segmentation.py +++ b/main_segmentation.py @@ -31,23 +31,35 @@ class OCTDrusenDataset(Dataset): def __init__(self, root, transform=None): self.image_dir = os.path.join(root, "images") self.mask_dir = os.path.join(root, "masks") - self.images = sorted(os.listdir(self.image_dir)) self.transform = transform + self.samples = [] + for img_name in sorted(os.listdir(self.image_dir)): + stem = os.path.splitext(img_name)[0] + mask_name = stem + "_mask.png" + mask_path = os.path.join(self.mask_dir, mask_name) + if os.path.isfile(mask_path): + self.samples.append((img_name, mask_name)) + + print(f"Loaded {len(self.samples)} valid samples from {root}") + def __len__(self): - return len(self.images) + return len(self.samples) def __getitem__(self, idx): - img = cv2.imread(os.path.join(self.image_dir, self.images[idx])) + img_name, mask_name = self.samples[idx] + + img = cv2.imread(os.path.join(self.image_dir, img_name)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - mask = cv2.imread(os.path.join(self.mask_dir, self.images[idx]), cv2.IMREAD_GRAYSCALE) + + mask = cv2.imread(os.path.join(self.mask_dir, mask_name), cv2.IMREAD_GRAYSCALE) + mask = (mask > 0).astype("uint8") if self.transform: aug = self.transform(image=img, mask=mask) img, mask = aug["image"], aug["mask"] - return img, mask.long() - + return img, torch.tensor(mask, dtype=torch.long) # ========================= # Main @@ -72,7 +84,7 @@ def main(): transform = Compose([ Resize(args.img_size, args.img_size), - Normalize((0.485,0.456,0.406), (0.229,0.224,0.225)), + Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ToTensorV2() ]) @@ -84,38 +96,66 @@ def main(): val_loader = DataLoader(val_ds, args.batch_size, shuffle=False, num_workers=4) test_loader = DataLoader(test_ds, args.batch_size, shuffle=False, num_workers=4) - model = RETFoundSegmentation(args.img_size, args.patch_size).to(device) + model = RETFoundSegmentation(args.img_size, args.patch_size, num_classes=2, drop_path=args.drop_path).to(device) + # ------------------------- + # Load pretrained weights + # ------------------------- if args.finetune: - ckpt = hf_hub_download(f"YukunZhou/{args.finetune}", f"{args.finetune}.pth") - state = torch.load(ckpt, map_location="cpu") + if os.path.isfile(args.finetune): + ckpt_path = args.finetune + else: + ckpt_path = hf_hub_download( + repo_id=f"YukunZhou/{args.finetune}", + filename="pytorch_model.bin" + ) + + state = torch.load(ckpt_path, map_location="cpu", weights_only=False) state = state["model"] if "model" in state else state + + for k in ["head.weight", "head.bias"]: + if k in state: + del state[k] + interpolate_pos_embed(model.encoder, state) model.encoder.load_state_dict(state, strict=False) + print("Pretrained RETFound weights loaded.") + # ------------------------- + # Optimization + # ------------------------- ce_weights = torch.tensor([float(x) for x in args.ce_weight.split(",")]).to(device) ce_loss = nn.CrossEntropyLoss(weight=ce_weights) def loss_fn(out, tgt): return combined_loss_fn(out, tgt, ce_loss, args.dice_weight) - opt = optim.AdamW(model.parameters(), lr=args.lr) + optimizer = optim.AdamW(model.parameters(), lr=args.lr) + + print("\n[DEBUG] Starting training loop...\n") - best = 1e9 + # ------------------------- + # Training Loop + # ------------------------- + best = float("inf") for e in range(args.epochs): - train_loss = train_segmentation(model, train_loader, loss_fn, opt, device) + print(f"[DEBUG] Entered epoch {e+1}") + train_loss = train_segmentation(model, train_loader, loss_fn, optimizer, device) val_loss, P, T = evaluate_segmentation(model, val_loader, loss_fn, device) acc, dice, iou = compute_metrics(P, T) - print(f"Epoch {e+1}: Train={train_loss:.4f} Val={val_loss:.4f} Dice={dice:.4f} IoU={iou:.4f}") + print(f"Epoch {e+1}: Train={train_loss:.4f} | Val={val_loss:.4f} | Dice={dice:.4f} | IoU={iou:.4f}") if val_loss < best: best = val_loss - torch.save(model.state_dict(), f"{args.output_dir}/best.pth") + torch.save(model.state_dict(), os.path.join(args.output_dir, "best.pth")) + # ------------------------- + # Final Test + # ------------------------- test_loss, P, T = evaluate_segmentation(model, test_loader, loss_fn, device) acc, dice, iou = compute_metrics(P, T) - print(f"Test: Loss={test_loss:.4f} Dice={dice:.4f} IoU={iou:.4f}") + print(f"Test: Loss={test_loss:.4f} | Dice={dice:.4f} | IoU={iou:.4f}") if __name__ == "__main__": From 3267906d510f5431bba263aa0775667d11203852 Mon Sep 17 00:00:00 2001 From: MDSALMANSHAMS Date: Fri, 16 Jan 2026 11:25:57 +0530 Subject: [PATCH 5/7] inferencing --- examples/RETFound_MendeleyOCT_demo.ipynb | 123 ++++++++++++- infer_test_multi_iou.py | 212 +++++++++++++++++++++++ inference_segmentation.py | 147 ++++++++++++++++ 3 files changed, 479 insertions(+), 3 deletions(-) create mode 100644 infer_test_multi_iou.py create mode 100644 inference_segmentation.py diff --git a/examples/RETFound_MendeleyOCT_demo.ipynb b/examples/RETFound_MendeleyOCT_demo.ipynb index bd7ad34a..9df2fd56 100644 --- a/examples/RETFound_MendeleyOCT_demo.ipynb +++ b/examples/RETFound_MendeleyOCT_demo.ipynb @@ -347,7 +347,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "id": "b55116e5", "metadata": { "colab": { @@ -371,7 +371,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "/content/drive/MyDrive/Assignments/RETFound\n" + "[WinError 3] The system cannot find the path specified: '/content/drive/MyDrive/Assignments/RETFound'\n", + "f:\\GitHub\\RETFound\\examples\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\IPython\\core\\magics\\osm.py:393: UserWarning: using bookmarks requires you to install the `pickleshare` library.\n", + " bkms = self.shell.db.get('bookmarks', {})\n" ] } ], @@ -1269,11 +1278,119 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, + "id": "861c872d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "F:\\GitHub\\RETFound\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\IPython\\core\\magics\\osm.py:417: UserWarning: using dhist requires you to install the `pickleshare` library.\n", + " self.shell.db['dhist'] = compress_dhist(dhist)[-100:]\n" + ] + } + ], + "source": [ + "%cd F:\\GitHub\\RETFound" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "id": "02d2dce7-31c2-48e2-87ce-9223b74cf94e", "metadata": { "id": "02d2dce7-31c2-48e2-87ce-9223b74cf94e" }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "===== Running Inference =====\n", + "\n", + "DRUSEN-100580-1.jpeg Dice: 0.3590 IoU: 0.2188\n", + "DRUSEN-103885-1.jpeg Dice: 0.5493 IoU: 0.3786\n", + "DRUSEN-103885-2.jpeg Dice: 0.6422 IoU: 0.4730\n", + "DRUSEN-103885-3.jpeg Dice: 0.6422 IoU: 0.4730\n", + "DRUSEN-103885-4.jpeg Dice: 0.6587 IoU: 0.4911\n", + "DRUSEN-103885-5.jpeg Dice: 0.6587 IoU: 0.4911\n", + "DRUSEN-142234-1.jpeg Dice: 0.2874 IoU: 0.1678\n", + "DRUSEN-142234-10.jpeg Dice: 0.3077 IoU: 0.1818\n", + "DRUSEN-142234-11.jpeg Dice: 0.6610 IoU: 0.4937\n", + "DRUSEN-142234-12.jpeg Dice: 0.1434 IoU: 0.0773\n", + "DRUSEN-142234-13.jpeg Dice: 0.5970 IoU: 0.4255\n", + "DRUSEN-142234-14.jpeg Dice: 0.3913 IoU: 0.2432\n", + "DRUSEN-142234-15.jpeg Dice: 0.0000 IoU: 0.0000\n", + "DRUSEN-142234-16.jpeg Dice: 0.0000 IoU: 0.0000\n", + "DRUSEN-142234-17.jpeg Dice: 0.7333 IoU: 0.5789\n", + "DRUSEN-142234-18.jpeg Dice: 0.1111 IoU: 0.0588\n", + "DRUSEN-142234-19.jpeg Dice: 0.0000 IoU: 0.0000\n", + "DRUSEN-142234-2.jpeg Dice: 0.5764 IoU: 0.4049\n", + "DRUSEN-142234-20.jpeg Dice: 0.2250 IoU: 0.1268\n", + "DRUSEN-142234-21.jpeg Dice: 0.5943 IoU: 0.4228\n", + "DRUSEN-142234-22.jpeg Dice: 0.4255 IoU: 0.2703\n", + "DRUSEN-142234-23.jpeg Dice: 0.4754 IoU: 0.3118\n", + "DRUSEN-142234-25.jpeg Dice: 0.0000 IoU: 0.0000\n", + "DRUSEN-142234-26.jpeg Dice: 0.1892 IoU: 0.1045\n", + "DRUSEN-142234-27.jpeg Dice: 0.5161 IoU: 0.3478\n", + "DRUSEN-142234-3.jpeg Dice: 0.4615 IoU: 0.3000\n", + "DRUSEN-142234-4.jpeg Dice: 0.3585 IoU: 0.2184\n", + "DRUSEN-142234-5.jpeg Dice: 0.3864 IoU: 0.2394\n", + "DRUSEN-142234-6.jpeg Dice: 0.5495 IoU: 0.3788\n", + "DRUSEN-142234-7.jpeg Dice: 0.0000 IoU: 0.0000\n", + "DRUSEN-142234-8.jpeg Dice: 0.1793 IoU: 0.0985\n", + "DRUSEN-142234-9.jpeg Dice: 0.2857 IoU: 0.1667\n", + "DRUSEN-163081-1.jpeg Dice: 0.7474 IoU: 0.5967\n", + "DRUSEN-228939-1.jpeg Dice: 0.7668 IoU: 0.6218\n", + "DRUSEN-228939-2.jpeg Dice: 0.6174 IoU: 0.4465\n", + "DRUSEN-303435-1.jpeg Dice: 0.3904 IoU: 0.2425\n", + "DRUSEN-349021-1.jpeg Dice: 0.7522 IoU: 0.6029\n", + "DRUSEN-349021-2.jpeg Dice: 0.6172 IoU: 0.4464\n", + "DRUSEN-364469-1.jpeg Dice: 0.7781 IoU: 0.6368\n", + "DRUSEN-364469-2.jpeg Dice: 0.6948 IoU: 0.5323\n", + "DRUSEN-364469-3.jpeg Dice: 0.6527 IoU: 0.4845\n", + "DRUSEN-364469-4.jpeg Dice: 0.6513 IoU: 0.4829\n", + "DRUSEN-457907-1.jpeg Dice: 0.3455 IoU: 0.2089\n", + "DRUSEN-95633-1.jpeg Dice: 0.4597 IoU: 0.2985\n", + "DRUSEN-9800172-2.jpeg Dice: 0.6992 IoU: 0.5375\n", + "DRUSEN-9837663-1.jpeg Dice: 0.8452 IoU: 0.7318\n", + "DRUSEN-9861332-1.jpeg Dice: 0.8508 IoU: 0.7404\n", + "DRUSEN-9884539-1.jpeg Dice: 0.7580 IoU: 0.6103\n", + "DRUSEN-9894035-2.jpeg Dice: 0.7279 IoU: 0.5722\n", + "DRUSEN-9928043-1.jpeg Dice: 0.7005 IoU: 0.5391\n", + "\n", + "===== FINAL REPORT =====\n", + "Mean Dice: 0.4804\n", + "Mean IoU : 0.3495\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\albumentations\\__init__.py:24: UserWarning: A new version of Albumentations is available: 2.0.8 (you have 1.4.24). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n", + " check_for_updates()\n" + ] + } + ], + "source": [ + "!python inference_segmentation.py" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26ff35ae", + "metadata": {}, "outputs": [], "source": [] } diff --git a/infer_test_multi_iou.py b/infer_test_multi_iou.py new file mode 100644 index 00000000..e6ecf467 --- /dev/null +++ b/infer_test_multi_iou.py @@ -0,0 +1,212 @@ +import os +import cv2 +import torch +import numpy as np +from albumentations import Compose, Resize, Normalize +from albumentations.pytorch import ToTensorV2 + +from models_segmentation import RETFoundSegmentation + +DEVICE = "cuda" if torch.cuda.is_available() else "cpu" + + +# ============================================================ +# Metric utilities +# ============================================================ + +def basic_iou(pred, gt, smooth=1e-6): + pred = pred.astype(bool) + gt = gt.astype(bool) + + inter = (pred & gt).sum() + union = (pred | gt).sum() + + return (inter + smooth) / (union + smooth) + + +def dice_score(pred, gt, smooth=1e-6): + pred = pred.astype(bool) + gt = gt.astype(bool) + + inter = (pred & gt).sum() + return (2 * inter + smooth) / (pred.sum() + gt.sum() + smooth) + + +def tolerance_iou(pred, gt, k=2): + kernel = np.ones((k*2+1, k*2+1), np.uint8) + gt_dilated = cv2.dilate(gt.astype(np.uint8), kernel) + return basic_iou(pred, gt_dilated) + + +def lesion_level_iou(pred, gt): + num_gt, gt_labels = cv2.connectedComponents(gt.astype(np.uint8)) + num_pr, pr_labels = cv2.connectedComponents(pred.astype(np.uint8)) + + scores = [] + for g in range(1, num_gt): + gt_comp = (gt_labels == g) + + best = 0 + for p in range(1, num_pr): + pr_comp = (pr_labels == p) + best = max(best, basic_iou(pr_comp, gt_comp)) + + scores.append(best) + + if len(scores) == 0: + return 1.0 if pred.sum() == 0 else 0.0 + + return np.mean(scores) + + +def positive_slice_iou(pred, gt): + if gt.sum() == 0: + return None + return basic_iou(pred, gt) + + +# ============================================================ +# Model loader +# ============================================================ +def load_model(ckpt, img_size=256): + model = RETFoundSegmentation(img_size=img_size).to(DEVICE) + + state = torch.load(ckpt, map_location=DEVICE, weights_only=False) + model.load_state_dict(state) + + model.eval() + return model + + +# ============================================================ +# Preprocess +# ============================================================ +def preprocess(img_path, img_size=256): + img = cv2.imread(img_path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + tf = Compose([ + Resize(img_size, img_size), + Normalize((0.485,0.456,0.406),(0.229,0.224,0.225)), + ToTensorV2() + ]) + + aug = tf(image=img) + return aug["image"].unsqueeze(0), img + + +# ============================================================ +# Overlay visualization +# ============================================================ +def make_overlay(img, mask): + mask = mask.astype(np.uint8) + + mask = cv2.resize( + mask, + (img.shape[1], img.shape[0]), + interpolation=cv2.INTER_NEAREST + ) + + color = np.zeros_like(img, dtype=np.uint8) + color[:, :, 1] = mask * 255 # green channel + + return cv2.addWeighted(img, 0.7, color, 0.3, 0) + + +# ============================================================ +# MAIN +# ============================================================ +def run_inference( + ckpt="segmentation_output/best.pth", + test_img_dir="Segmentation/Data/test/images", + test_mask_dir="Segmentation/Data/test/masks", + out_dir="segmentation_output/inference_overlays", + img_size=256 +): + + os.makedirs(out_dir, exist_ok=True) + + model = load_model(ckpt, img_size) + + results = { + "pixel_iou": [], + "dice": [], + "tol2_iou": [], + "tol3_iou": [], + "lesion_iou": [], + "positive_iou": [] + } + + print("\n===== Multi-Metric Inference (Overlay Only) =====\n") + + for name in sorted(os.listdir(test_img_dir)): + + img_path = os.path.join(test_img_dir, name) + + stem = os.path.splitext(name)[0] + gt_name = stem + "_mask.png" + gt_path = os.path.join(test_mask_dir, gt_name) + + if not os.path.isfile(gt_path): + continue + + # ----- Predict ----- + tensor, orig = preprocess(img_path, img_size) + + with torch.no_grad(): + out = model(tensor.to(DEVICE)) + pred = out.argmax(1).squeeze().cpu().numpy() + + # ----- Load GT ----- + gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE) + gt = (gt > 0).astype("uint8") + + gt = cv2.resize( + gt, + (pred.shape[1], pred.shape[0]), + interpolation=cv2.INTER_NEAREST + ) + + # ----- Metrics ----- + piou = basic_iou(pred, gt) + dice = dice_score(pred, gt) + t2 = tolerance_iou(pred, gt, k=2) + t3 = tolerance_iou(pred, gt, k=3) + liou = lesion_level_iou(pred, gt) + pos = positive_slice_iou(pred, gt) + + results["pixel_iou"].append(piou) + results["dice"].append(dice) + results["tol2_iou"].append(t2) + results["tol3_iou"].append(t3) + results["lesion_iou"].append(liou) + + if pos is not None: + results["positive_iou"].append(pos) + + # ----- SAVE ONLY OVERLAY ----- + over = make_overlay(orig, pred) + cv2.imwrite(os.path.join(out_dir, stem + "_overlay.png"), over) + + print(f"{name:22s} IoU:{piou:.3f} Dice:{dice:.3f} " + f"Tol2:{t2:.3f} Lesion:{liou:.3f}") + + # ======================================================== + print("\n===== FINAL SUMMARY =====") + + def mean(x): + return round(float(np.mean(x)), 4) if len(x) else None + + for k, v in results.items(): + print(f"{k:15s}: {mean(v)}") + + print("\nInterpretation Guide:") + print("- pixel_iou : strict pixel overlap") + print("- dice : balanced for small lesions") + print("- tol2/tol3 : clinical tolerance ±2/3 px") + print("- lesion_iou : object-level matching") + print("- positive_iou: only slices with drusen") + + +if __name__ == "__main__": + run_inference() diff --git a/inference_segmentation.py b/inference_segmentation.py new file mode 100644 index 00000000..fae6f0c0 --- /dev/null +++ b/inference_segmentation.py @@ -0,0 +1,147 @@ +import os +import cv2 +import torch +import numpy as np +from albumentations import Compose, Resize, Normalize +from albumentations.pytorch import ToTensorV2 + +from models_segmentation import RETFoundSegmentation + +DEVICE = "cuda" if torch.cuda.is_available() else "cpu" + + +# ============================================================ +# Metrics +# ============================================================ +def dice_iou(pred, gt, smooth=1e-6): + pred = pred.astype(bool) + gt = gt.astype(bool) + + inter = (pred & gt).sum() + union = (pred | gt).sum() + + dice = (2 * inter + smooth) / (pred.sum() + gt.sum() + smooth) + iou = (inter + smooth) / (union + smooth) + + return dice, iou + + +# ============================================================ +# Model loader +# ============================================================ +def load_model(ckpt, img_size=256): + model = RETFoundSegmentation(img_size=img_size).to(DEVICE) + + state = torch.load(ckpt, map_location=DEVICE, weights_only=False) + model.load_state_dict(state) + + model.eval() + return model + + +# ============================================================ +# Preprocess +# ============================================================ +def preprocess(img_path, img_size=256): + img = cv2.imread(img_path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + tf = Compose([ + Resize(img_size, img_size), + Normalize((0.485, 0.456, 0.406), + (0.229, 0.224, 0.225)), + ToTensorV2() + ]) + + aug = tf(image=img) + return aug["image"].unsqueeze(0), img + + +# ============================================================ +# Overlay visualization +# ============================================================ +def make_overlay(img, mask): + # Ensure correct type + mask = mask.astype(np.uint8) + + mask = cv2.resize( + mask, + (img.shape[1], img.shape[0]), + interpolation=cv2.INTER_NEAREST + ) + + color = np.zeros_like(img, dtype=np.uint8) + color[:, :, 1] = mask * 255 # Green channel + + overlay = cv2.addWeighted(img, 0.7, color, 0.3, 0) + return overlay + + +# ============================================================ +# Main inference +# ============================================================ +def run_inference( + ckpt="segmentation_output/best.pth", + test_img_dir="Segmentation/Data/test/images", + test_mask_dir="Segmentation/Data/test/masks", + out_dir="segmentation_output/inference", + img_size=256 +): + + os.makedirs(out_dir, exist_ok=True) + + model = load_model(ckpt, img_size) + + dice_scores = [] + iou_scores = [] + + print("\n===== Running Inference =====\n") + + for name in sorted(os.listdir(test_img_dir)): + + img_path = os.path.join(test_img_dir, name) + + stem = os.path.splitext(name)[0] + gt_name = stem + "_mask.png" + gt_path = os.path.join(test_mask_dir, gt_name) + + if not os.path.isfile(gt_path): + continue + + # ----- Predict ----- + tensor, orig = preprocess(img_path, img_size) + + with torch.no_grad(): + out = model(tensor.to(DEVICE)) + pred = out.argmax(1).squeeze().cpu().numpy() + + # ----- Load GT ----- + gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE) + gt = (gt > 0).astype("uint8") + + # Align GT to prediction resolution + gt = cv2.resize( + gt, + (pred.shape[1], pred.shape[0]), + interpolation=cv2.INTER_NEAREST + ) + + # ----- Metrics ----- + dice, iou = dice_iou(pred, gt) + dice_scores.append(dice) + iou_scores.append(iou) + + # ----- Save outputs ----- + over = make_overlay(orig, pred) + cv2.imwrite(os.path.join(out_dir, stem + "_overlay.png"), over) + + print(f"{name:25s} Dice: {dice:.4f} IoU: {iou:.4f}") + + # --------------------------------------------------------- + print("\n===== FINAL REPORT =====") + print("Mean Dice:", round(np.mean(dice_scores), 4)) + print("Mean IoU :", round(np.mean(iou_scores), 4)) + + +if __name__ == "__main__": + run_inference() From a124e49c3d3efc0c57840648d2b554e57bac45d2 Mon Sep 17 00:00:00 2001 From: MDSALMANSHAMS Date: Fri, 16 Jan 2026 15:03:19 +0530 Subject: [PATCH 6/7] changes --- Segmentation/main.py | 0 Segmentation/segmentation_finetune.py | 363 ---------------- engine_segmentation.py | 124 +++++- examples/RETFound_MendeleyOCT_demo.ipynb | 507 ++++++----------------- infer_test_multi_iou.py | 212 ---------- inference_segmentation.py | 125 +++++- main_segmentation.py | 131 ++++-- models_segmentation.py | 105 ++++- 8 files changed, 568 insertions(+), 999 deletions(-) delete mode 100644 Segmentation/main.py delete mode 100644 Segmentation/segmentation_finetune.py delete mode 100644 infer_test_multi_iou.py diff --git a/Segmentation/main.py b/Segmentation/main.py deleted file mode 100644 index e69de29b..00000000 diff --git a/Segmentation/segmentation_finetune.py b/Segmentation/segmentation_finetune.py deleted file mode 100644 index b4db1a8c..00000000 --- a/Segmentation/segmentation_finetune.py +++ /dev/null @@ -1,363 +0,0 @@ -import os -import argparse -import time -import logging -import numpy as np -import torch -import torch.nn as nn -import torch.optim as optim -import torch.nn.functional as F -from torch.utils.data import Dataset, DataLoader -import eyepy as ep -from albumentations import Compose, Resize, Normalize -from albumentations.pytorch import ToTensorV2 -from huggingface_hub import hf_hub_download -from util.pos_embed import interpolate_pos_embed - -# Minimal logging configuration -logging.basicConfig( - level=logging.WARNING, - format="%(asctime)s [%(levelname)s] %(message)s", - handlers=[logging.StreamHandler()], -) -logger = logging.getLogger(__name__) - - -# ------------------------------ -# Multi-eye Dataset for .eye files in a folder -# ------------------------------ -class MultiEyeSegmentationDataset(Dataset): - def __init__(self, folder, transform=None): - """ - folder: Path to a folder containing .eye files. - transform: Albumentations transform applied to both image and mask. - """ - # List all .eye files in the folder - self.eye_files = sorted( - [os.path.join(folder, f) for f in os.listdir(folder) if f.endswith(".eye")] - ) - self.transform = transform - # Build a list of (eye_file, bscan_index) tuples for all volumes - self.samples = [] - for eye_file in self.eye_files: - volume = ep.EyeVolume.load(eye_file) - num_scans = volume.shape[0] - for idx in range(num_scans): - self.samples.append((eye_file, idx)) - # Cache loaded volumes to avoid repeated disk I/O - self.cache = {} - - def __len__(self): - return len(self.samples) - - def __getitem__(self, index): - eye_file, idx = self.samples[index] - if eye_file not in self.cache: - self.cache[eye_file] = ep.EyeVolume.load(eye_file) - volume = self.cache[eye_file] - image = volume[idx].data # assume shape: (H, W) - image = np.stack([image] * 3, axis=-1) # convert grayscale to 3 channels - # Convert mask from bool to uint8 to avoid OpenCV errors - mask = volume.volume_maps["drusen"].data[idx].astype(np.uint8) # shape: (H, W) - if self.transform: - augmented = self.transform(image=image, mask=mask) - image = augmented["image"] - mask = augmented["mask"] - mask = mask.long() - else: - image = torch.tensor(image, dtype=torch.float).permute(2, 0, 1) / 255.0 - mask = torch.tensor(mask, dtype=torch.long) - return image, mask - - -# ------------------------------ -# Segmentation Model Components -# ------------------------------ -from models_vit import RETFound_mae # Ensure this import is correct - - -class SegmentationHead(nn.Module): - def __init__(self, hidden_dim, num_classes, img_size, patch_size): - super().__init__() - self.patch_size = patch_size - self.h = img_size // patch_size - self.w = img_size // patch_size - self.conv = nn.Sequential( - nn.Conv2d(hidden_dim, hidden_dim // 2, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(hidden_dim // 2, num_classes, kernel_size=1), - ) - - def forward(self, x): - # x: [B, num_tokens, hidden_dim] - B, N, C = x.shape - x = x.reshape(B, self.h, self.w, C) - x = x.permute(0, 3, 1, 2) # [B, C, h, w] - x = F.interpolate( - x, scale_factor=self.patch_size, mode="bilinear", align_corners=False - ) - x = self.conv(x) - return x - - -class RETFoundSegmentation(nn.Module): - def __init__( - self, img_size=512, patch_size=16, hidden_dim=1024, num_classes=2, drop_path=0.2 - ): - super().__init__() - self.encoder = RETFound_mae( - img_size=img_size, - num_classes=num_classes, - drop_path_rate=drop_path, - global_pool=False, - ) - self.seg_head = SegmentationHead(hidden_dim, num_classes, img_size, patch_size) - - def forward(self, x): - B = x.shape[0] - x_tokens = self.encoder.patch_embed(x) # [B, num_patches, hidden_dim] - cls_tokens = self.encoder.cls_token.expand(B, -1, -1) - x_tokens = torch.cat((cls_tokens, x_tokens), dim=1) - x_tokens = x_tokens + self.encoder.pos_embed - x_tokens = self.encoder.pos_drop(x_tokens) - for blk in self.encoder.blocks: - x_tokens = blk(x_tokens) - x_tokens = self.encoder.norm(x_tokens) - tokens = x_tokens[:, 1:] # exclude cls token - seg_map = self.seg_head(tokens) - return seg_map - - -# ------------------------------ -# Loss Functions for Imbalanced Segmentation -# ------------------------------ -def dice_loss(pred, target, smooth=1e-6): - """ - Computes Dice loss. - pred: logits [B, num_classes, H, W] - target: ground truth [B, H, W] (long) - """ - pred_soft = F.softmax(pred, dim=1) - num_classes = pred.shape[1] - target_onehot = ( - F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float() - ) - intersection = (pred_soft * target_onehot).sum(dim=(2, 3)) - union = pred_soft.sum(dim=(2, 3)) + target_onehot.sum(dim=(2, 3)) - dice = (2.0 * intersection + smooth) / (union + smooth) - return 1 - dice.mean() - - -def combined_loss_fn(outputs, targets, ce_loss_fn, dice_weight=1.0): - ce_loss = ce_loss_fn(outputs, targets) - d_loss = dice_loss(outputs, targets) - return ce_loss + dice_weight * d_loss - - -# ------------------------------ -# Metrics Function -# ------------------------------ -def compute_metrics(preds, targets, smooth=1e-6): - pixel_acc = np.mean(preds == targets) - intersection = np.sum(preds * targets) - dice = (2.0 * intersection + smooth) / (np.sum(preds) + np.sum(targets) + smooth) - union = np.sum(preds) + np.sum(targets) - intersection - iou = (intersection + smooth) / (union + smooth) - return pixel_acc, dice, iou - - -# ------------------------------ -# Training and Evaluation Loops -# ------------------------------ -def train_segmentation(model, dataloader, loss_fn, optimizer, device): - model.train() - running_loss = 0.0 - for images, masks in dataloader: - images = images.to(device) - masks = masks.to(device) - optimizer.zero_grad() - outputs = model(images) - loss = loss_fn(outputs, masks) - loss.backward() - optimizer.step() - running_loss += loss.item() * images.size(0) - return running_loss / len(dataloader.dataset) - - -def evaluate_segmentation(model, dataloader, loss_fn, device): - model.eval() - running_loss = 0.0 - all_preds = [] - all_targets = [] - with torch.no_grad(): - for images, masks in dataloader: - images = images.to(device) - masks = masks.to(device) - outputs = model(images) - loss = loss_fn(outputs, masks) - running_loss += loss.item() * images.size(0) - preds = outputs.argmax(dim=1) - all_preds.append(preds.cpu()) - all_targets.append(masks.cpu()) - epoch_loss = running_loss / len(dataloader.dataset) - all_preds = torch.cat(all_preds, dim=0).numpy() - all_targets = torch.cat(all_targets, dim=0).numpy() - return epoch_loss, all_preds, all_targets - - -# ------------------------------ -# Main Training Script -# ------------------------------ -def main(): - parser = argparse.ArgumentParser(description="RETFound Segmentation Fine-tuning") - parser.add_argument( - "--data_path", - type=str, - required=True, - help="Base dataset directory with subfolders: train, val, test", - ) - parser.add_argument("--epochs", type=int, default=50, help="Number of epochs") - parser.add_argument("--batch_size", type=int, default=4, help="Batch size") - parser.add_argument("--lr", type=float, default=1e-4, help="Learning rate") - parser.add_argument( - "--img_size", type=int, default=512, help="Input image size (square)" - ) - parser.add_argument( - "--patch_size", type=int, default=16, help="Patch size used by the encoder" - ) - parser.add_argument("--drop_path", type=float, default=0.2, help="Drop path rate") - parser.add_argument( - "--output_dir", - type=str, - default="./segmentation_output", - help="Directory to save model checkpoints", - ) - parser.add_argument( - "--finetune", - type=str, - default="", - help="Path to pretrained RETFound checkpoint (or repo name for download)", - ) - parser.add_argument( - "--dice_weight", type=float, default=1.0, help="Weight for dice loss term" - ) - parser.add_argument( - "--ce_weight", - type=str, - default="0.3,0.7", - help="Comma-separated weights for cross entropy loss (background, drusen)", - ) - args = parser.parse_args() - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - os.makedirs(args.output_dir, exist_ok=True) - - # Minimal transform: resize and normalize - transform = Compose( - [ - Resize(args.img_size, args.img_size), - Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), - ToTensorV2(), - ] - ) - - # Create datasets for train, val, and test - train_folder = os.path.join(args.data_path, "train") - val_folder = os.path.join(args.data_path, "val") - test_folder = os.path.join(args.data_path, "test") - from torch.utils.data import DataLoader - - train_dataset = MultiEyeSegmentationDataset(train_folder, transform=transform) - val_dataset = MultiEyeSegmentationDataset(val_folder, transform=transform) - test_dataset = MultiEyeSegmentationDataset(test_folder, transform=transform) - - train_loader = DataLoader( - train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0 - ) - val_loader = DataLoader( - val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 - ) - test_loader = DataLoader( - test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 - ) - - model = RETFoundSegmentation( - img_size=args.img_size, - patch_size=args.patch_size, - hidden_dim=1024, - num_classes=2, - drop_path=args.drop_path, - ) - model.to(device) - - # ----- Load Pretrained RETFound Foundation Weights ----- - if args.finetune: - if os.path.exists(args.finetune): - checkpoint_path = args.finetune - print( - f"Loading pretrained weights from local checkpoint: {checkpoint_path}" - ) - else: - print(f"Downloading pretrained weights from: {args.finetune}") - checkpoint_path = hf_hub_download( - repo_id=f"YukunZhou/{args.finetune}", - filename=f"{args.finetune}.pth", - ) - checkpoint = torch.load(checkpoint_path, map_location="cpu") - if "model" in checkpoint: - pretrained_dict = checkpoint["model"] - else: - pretrained_dict = checkpoint - for k in ["head.weight", "head.bias"]: - if k in pretrained_dict: - print(f"Removing key {k} from pretrained checkpoint") - del pretrained_dict[k] - interpolate_pos_embed(model.encoder, pretrained_dict) - model.encoder.load_state_dict(pretrained_dict, strict=False) - print("Pretrained RETFound encoder weights loaded.") - - # Create weighted CrossEntropyLoss for class weighting - ce_weights = [float(x) for x in args.ce_weight.split(",")] - ce_weights_tensor = torch.tensor(ce_weights, device=device) - ce_loss_fn = nn.CrossEntropyLoss(weight=ce_weights_tensor) - - def loss_fn(outputs, targets): - return combined_loss_fn( - outputs, targets, ce_loss_fn, dice_weight=args.dice_weight - ) - - optimizer = optim.AdamW(model.parameters(), lr=args.lr) - - best_loss = float("inf") - for epoch in range(args.epochs): - train_loss = train_segmentation(model, train_loader, loss_fn, optimizer, device) - val_loss, all_preds, all_targets = evaluate_segmentation( - model, val_loader, loss_fn, device - ) - pixel_acc, dice, iou = compute_metrics(all_preds, all_targets) - print( - f"Epoch {epoch+1} - Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f}" - ) - print(f"Metrics: Pixel Acc: {pixel_acc:.4f}, Dice: {dice:.4f}, IoU: {iou:.4f}") - if val_loss < best_loss: - best_loss = val_loss - checkpoint_path = os.path.join( - args.output_dir, f"checkpoint_epoch{epoch+1}.pth" - ) - torch.save(model.state_dict(), checkpoint_path) - print(f"Saved checkpoint: {checkpoint_path}") - - # Evaluate on test set - test_loss, test_preds, test_targets = evaluate_segmentation( - model, test_loader, loss_fn, device - ) - pixel_acc, dice, iou = compute_metrics(test_preds, test_targets) - print(f"Test Loss: {test_loss:.4f}") - print(f"Test Metrics: Pixel Acc: {pixel_acc:.4f}, Dice: {dice:.4f}, IoU: {iou:.4f}") - - -if __name__ == "__main__": - main() - -# example usage: -# !python segmentation_finetune.py --data_path ./data/ --epochs 50 --batch --data_path "Data" --finetune "" --epochs 50 --batch_size 1 --lr 1e-4 --img_size 256 --patch_size 16 --drop_path 0.2 --ce_weight "0.3,0.7" --dice_weight 1.0 --output_dir "./segmentation_output/2" diff --git a/engine_segmentation.py b/engine_segmentation.py index 3e966d0d..c1c11229 100644 --- a/engine_segmentation.py +++ b/engine_segmentation.py @@ -3,58 +3,180 @@ import numpy as np +# ============================================================ +# Dice Loss +# ============================================================ def dice_loss(pred, target, smooth=1e-6): + """ + Computes multi-class Dice loss. + + Args: + pred : Raw logits from the model [B, C, H, W] + target : Ground truth labels [B, H, W] + smooth : Small constant to avoid division by zero + + Returns: + Scalar dice loss (1 - dice coefficient) + """ + + # Convert logits to probabilities pred = F.softmax(pred, dim=1) + + # Number of classes (e.g., 2 for background/drusen) num_classes = pred.shape[1] + + # Convert target to one-hot representation target_oh = F.one_hot(target, num_classes).permute(0, 3, 1, 2).float() + + # Intersection between prediction and ground truth inter = (pred * target_oh).sum((2, 3)) + + # Sum of prediction and ground truth areas union = pred.sum((2, 3)) + target_oh.sum((2, 3)) + + # Dice coefficient → converted to loss (1 - dice) return 1 - ((2 * inter + smooth) / (union + smooth)).mean() +# ============================================================ +# Combined CE + Dice Loss +# ============================================================ def combined_loss_fn(outputs, targets, ce_fn, dice_w=1.0): + """ + Combines Cross-Entropy loss with Dice loss. + + Args: + outputs : Model logits [B, C, H, W] + targets : Ground truth [B, H, W] + ce_fn : CrossEntropyLoss function + dice_w : Weight for dice loss term + + Returns: + Weighted sum of CE and Dice loss + """ + return ce_fn(outputs, targets) + dice_w * dice_loss(outputs, targets) +# ============================================================ +# Metric Computation +# ============================================================ def compute_metrics(preds, targets, smooth=1e-6): + """ + Computes pixel accuracy, Dice, and IoU. + + Args: + preds : Binary predictions (numpy array) + targets : Binary ground truth (numpy array) + + Returns: + pixel_acc : Pixel-wise accuracy + dice : Dice coefficient + iou : Intersection over Union + """ + + # Pixel-wise accuracy pixel_acc = (preds == targets).mean() + + # Intersection between prediction and GT inter = (preds & targets).sum() + + # Dice coefficient dice = (2 * inter + smooth) / (preds.sum() + targets.sum() + smooth) + + # IoU computation union = preds.sum() + targets.sum() - inter iou = (inter + smooth) / (union + smooth) + return pixel_acc, dice, iou +# ============================================================ +# Training Loop +# ============================================================ def train_segmentation(model, loader, loss_fn, optimizer, device): + """ + One epoch training for segmentation model. + + Args: + model : Segmentation network + loader : Training dataloader + loss_fn : Loss function (CE + Dice) + optimizer : Optimizer + device : cuda/cpu + + Returns: + Average epoch loss + """ + model.train() total = 0 for step, (x, y) in enumerate(loader): + + # Move data to device x, y = x.to(device), y.to(device) optimizer.zero_grad() + + # Forward pass out = model(x) + + # Compute loss loss = loss_fn(out, y) + + # Backpropagation loss.backward() optimizer.step() + # Accumulate loss total += loss.item() * x.size(0) + # Progress print if step % 10 == 0: print(f" [batch {step}/{len(loader)}] loss: {loss.item():.4f}") return total / len(loader.dataset) +# ============================================================ +# Validation / Evaluation Loop +# ============================================================ @torch.no_grad() def evaluate_segmentation(model, loader, loss_fn, device): + """ + Runs inference on validation/test set and collects predictions. + + Args: + model : Trained model + loader : Val/test dataloader + loss_fn : Loss function + device : cuda/cpu + + Returns: + avg_loss : Average loss over dataset + P : All predictions (numpy) + T : All ground truth (numpy) + """ + model.eval() - total, P, T = 0, [], [] + + total = 0 + P, T = [], [] + for x, y in loader: + x, y = x.to(device), y.to(device) + + # Forward pass out = model(x) + + # Loss computation loss = loss_fn(out, y) total += loss.item() * x.size(0) + + # Store predictions and targets P.append(out.argmax(1).cpu()) T.append(y.cpu()) + return total / len(loader.dataset), torch.cat(P).numpy(), torch.cat(T).numpy() diff --git a/examples/RETFound_MendeleyOCT_demo.ipynb b/examples/RETFound_MendeleyOCT_demo.ipynb index 9df2fd56..019638ed 100644 --- a/examples/RETFound_MendeleyOCT_demo.ipynb +++ b/examples/RETFound_MendeleyOCT_demo.ipynb @@ -16,198 +16,23 @@ "\n", "**Date**: 08 Jan 2026\n", "\n", + "**Contribution:** \n", + "This notebook extends the original RETFound classification pipeline to **semantic segmentation** by adding a lightweight decoder on top of the pretrained ViT encoder and training with CE + Dice loss.\n", + "\n", "**Performance**:\n", "\n", "\n", "\n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", "\n", "\n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", "\n", - "
AccuracyRecallF1 ScoreROC AUCPR AUCDiceIOU
0.70910.56160.60780.90370.68630.48040.3495
\n" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "rT2jO4AtHkjN", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "executionInfo": { - "elapsed": 1712, - "status": "ok", - "timestamp": 1768473950572, - "user": { - "displayName": "MD SALMAN SHAMS", - "userId": "17411188514128174175" - }, - "user_tz": -330 - }, - "id": "rT2jO4AtHkjN", - "outputId": "5f092f62-ea87-432f-dfb4-4a5c4af56745" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" - ] - } - ], - "source": [ - "from google.colab import drive\n", - "drive.mount('/content/drive')" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "odaiivGvH5e5", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "executionInfo": { - "elapsed": 5, - "status": "ok", - "timestamp": 1768473955126, - "user": { - "displayName": "MD SALMAN SHAMS", - "userId": "17411188514128174175" - }, - "user_tz": -330 - }, - "id": "odaiivGvH5e5", - "outputId": "19f386ec-2157-4910-d427-0f16aeaca009" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/content/drive/MyDrive/Assignments/RETFound\n" - ] - } - ], - "source": [ - "%cd /content/drive/MyDrive/Assignments/RETFound" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "E8fpYFoWH-do", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "executionInfo": { - "elapsed": 4991, - "status": "ok", - "timestamp": 1768473962175, - "user": { - "displayName": "MD SALMAN SHAMS", - "userId": "17411188514128174175" - }, - "user_tz": -330 - }, - "id": "E8fpYFoWH-do", - "outputId": "ec45e395-a59f-4566-af41-116c80d30455" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: opencv-python~=4.9.0.80 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 1)) (4.9.0.80)\n", - "Requirement already satisfied: Pillow~=10.2.0 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 2)) (10.2.0)\n", - "Requirement already satisfied: pycm~=4.0 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 3)) (4.5)\n", - "Requirement already satisfied: scikit-learn~=1.4.2 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 4)) (1.4.2)\n", - "Requirement already satisfied: timm~=0.9.2 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 5)) (0.9.16)\n", - "Requirement already satisfied: numpy~=1.26.4 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 7)) (1.26.4)\n", - "Requirement already satisfied: matplotlib~=3.8.4 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 8)) (3.8.4)\n", - "Requirement already satisfied: scikit-multilearn~=0.2.0 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 9)) (0.2.0)\n", - "Requirement already satisfied: huggingface-hub~=0.23.4 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 10)) (0.23.5)\n", - "Requirement already satisfied: tensorboard~=2.17.0 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 11)) (2.17.1)\n", - "Requirement already satisfied: albumentations~=1.4.3 in /usr/local/lib/python3.12/dist-packages (from -r requirements.txt (line 12)) (1.4.24)\n", - "Requirement already satisfied: art>=1.8 in /usr/local/lib/python3.12/dist-packages (from pycm~=4.0->-r requirements.txt (line 3)) (6.5)\n", - "Requirement already satisfied: scipy>=1.6.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn~=1.4.2->-r requirements.txt (line 4)) (1.16.3)\n", - "Requirement already satisfied: joblib>=1.2.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn~=1.4.2->-r requirements.txt (line 4)) (1.5.3)\n", - "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn~=1.4.2->-r requirements.txt (line 4)) (3.6.0)\n", - "Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from timm~=0.9.2->-r requirements.txt (line 5)) (2.9.0+cu126)\n", - "Requirement already satisfied: torchvision in /usr/local/lib/python3.12/dist-packages (from timm~=0.9.2->-r requirements.txt (line 5)) (0.24.0+cu126)\n", - "Requirement already satisfied: pyyaml in /usr/local/lib/python3.12/dist-packages (from timm~=0.9.2->-r requirements.txt (line 5)) (6.0.3)\n", - "Requirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (from timm~=0.9.2->-r requirements.txt (line 5)) (0.7.0)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (1.3.3)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (0.12.1)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (4.61.1)\n", - "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (1.4.9)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (25.0)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (3.3.1)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.12/dist-packages (from matplotlib~=3.8.4->-r requirements.txt (line 8)) (2.9.0.post0)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (3.20.2)\n", - "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (2025.3.0)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (2.32.4)\n", - "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (4.67.1)\n", - "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (4.15.0)\n", - "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (1.4.0)\n", - "Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (1.76.0)\n", - "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (3.10)\n", - "Requirement already satisfied: protobuf!=4.24.0,>=3.19.6 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (5.29.5)\n", - "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (75.2.0)\n", - "Requirement already satisfied: six>1.9 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (1.17.0)\n", - "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (0.7.2)\n", - "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from tensorboard~=2.17.0->-r requirements.txt (line 11)) (3.1.5)\n", - "Requirement already satisfied: pydantic>=2.9.2 in /usr/local/lib/python3.12/dist-packages (from albumentations~=1.4.3->-r requirements.txt (line 12)) (2.12.3)\n", - "Requirement already satisfied: albucore==0.0.23 in /usr/local/lib/python3.12/dist-packages (from albumentations~=1.4.3->-r requirements.txt (line 12)) (0.0.23)\n", - "Requirement already satisfied: opencv-python-headless>=4.9.0.80 in /usr/local/lib/python3.12/dist-packages (from albumentations~=1.4.3->-r requirements.txt (line 12)) (4.11.0.86)\n", - "Requirement already satisfied: stringzilla>=3.10.4 in /usr/local/lib/python3.12/dist-packages (from albucore==0.0.23->albumentations~=1.4.3->-r requirements.txt (line 12)) (4.6.0)\n", - "Requirement already satisfied: simsimd>=5.9.2 in /usr/local/lib/python3.12/dist-packages (from albucore==0.0.23->albumentations~=1.4.3->-r requirements.txt (line 12)) (6.5.12)\n", - "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.9.2->albumentations~=1.4.3->-r requirements.txt (line 12)) (0.7.0)\n", - "Requirement already satisfied: pydantic-core==2.41.4 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.9.2->albumentations~=1.4.3->-r requirements.txt (line 12)) (2.41.4)\n", - "Requirement already satisfied: typing-inspection>=0.4.2 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.9.2->albumentations~=1.4.3->-r requirements.txt (line 12)) (0.4.2)\n", - "Requirement already satisfied: markupsafe>=2.1.1 in /usr/local/lib/python3.12/dist-packages (from werkzeug>=1.0.1->tensorboard~=2.17.0->-r requirements.txt (line 11)) (3.0.3)\n", - "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (3.4.4)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (3.11)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (2.5.0)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface-hub~=0.23.4->-r requirements.txt (line 10)) (2026.1.4)\n", - "Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (1.14.0)\n", - "Requirement already satisfied: networkx>=2.5.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (3.6.1)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (3.1.6)\n", - "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.77)\n", - "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.77)\n", - "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.80)\n", - "Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (9.10.2.21)\n", - "Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.4.1)\n", - "Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (11.3.0.4)\n", - "Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (10.3.7.77)\n", - "Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (11.7.1.2)\n", - "Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.5.4.2)\n", - "Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (0.7.1)\n", - "Requirement already satisfied: nvidia-nccl-cu12==2.27.5 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (2.27.5)\n", - "Requirement already satisfied: nvidia-nvshmem-cu12==3.3.20 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (3.3.20)\n", - "Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.77)\n", - "Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (12.6.85)\n", - "Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (1.11.1.6)\n", - "Requirement already satisfied: triton==3.5.0 in /usr/local/lib/python3.12/dist-packages (from torch->timm~=0.9.2->-r requirements.txt (line 5)) (3.5.0)\n", - "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->timm~=0.9.2->-r requirements.txt (line 5)) (1.3.0)\n" - ] - } - ], - "source": [ - "!pip install -r requirements.txt" + "\n", + "\n", + "\n" ] }, { @@ -220,17 +45,29 @@ "## 1. Install environment\n", "1. Follow [RETFound README](https://github.com/rmaphoh/RETFound) to install environment\n", "2. Restart this Jupyter Notebook\n", - "3. Select Kernel retfound" + "3. Select Kernel retfound\n", + "\n", + "> **Note:** Ensure the same PyTorch / timm versions as the original RETFound repository to avoid weight-loading issues." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "7cbf5e93-6ca0-4401-88e6-64e39968e7cd", "metadata": { "id": "7cbf5e93-6ca0-4401-88e6-64e39968e7cd" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Project root: F:\\GitHub\\RETFound\n", + "sys.executable: f:\\GitHub\\RETFound\\venv3.9\\Scripts\\python.exe\n", + "torch version: 2.8.0+cpu\n" + ] + } + ], "source": [ "import sys, torch\n", "from pathlib import Path\n", @@ -254,8 +91,14 @@ }, "source": [ "## 2. Prepare MendeleyOCT dataset\n", - "1. Download from the [shared data pool](https://github.com/rmaphoh/RETFound/blob/main/BENCHMARK.md).\n", - "2. Put the data folder under the project directory, e.g. \"RETFound/MESSIDOR2\"\n" + "1. Download from the [gdrive](https://drive.google.com/drive/folders/1gBFXrkhRpp8EbTBlTn72h-UvS6a1JYBv?usp=sharing).\n", + "2. Put the data folder under the project directory, e.g. \"RETFound/MendeleyOCT\".\n", + "\n", + "> **Note:** \n", + "The dataset used in this work has been **preprocessed and annotated for the segmentation task**. \n", + "> - Each B-scan was **horizontally cropped from the top and bottom** to focus on the retinal region. \n", + "> - Binary pixel-level annotations were created for **drusen segmentation** (0: background, 1: drusen). \n", + "> - Image–mask pairs are provided in JPEG/PNG format for direct training.\n" ] }, { @@ -265,49 +108,46 @@ "id": "357be2fa-a914-4d1f-8759-76b2b1c3f20f" }, "source": [ - "## 3. Hyperparameter and path settings\n", - "1. Can choose finetune or lp (linear probe)\n", - "2. Model selection [info](https://github.com/rmaphoh/RETFound#:~:text=In%20train.sh%2C%20the%20model%20can%20be%20selected%20by%20changing%20the%20hyperparameters%20MODEL%2C%20MODEL_ARCH%2C%20FINETUNE%3A)" + "## 3. Hyperparameter and Path Settings\n", + "- Backbone: RETFound ViT-Large \n", + "- Task: Binary drusen segmentation \n", + "- Loss: Cross-Entropy + Dice \n", + "- Image size: 256×256 \n", + "- Classes: 2 (background, drusen)\n", + "\n", + "> **Note:** Encoder check point can be downloaded from [here](https://drive.google.com/drive/folders/14SQdLuIxfkiqz_zmpvNkd9Ka4NTW3Fml?usp=sharing)" ] }, { "cell_type": "code", - "execution_count": null, - "id": "5f675843", - "metadata": { - "id": "5f675843" - }, - "outputs": [], + "execution_count": 5, + "id": "7f192e16", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Dataset: MendeleyOCT\\Data\n", + "Checkpoint: checkpoints\\checkpoint-best.pth\n" + ] + } + ], "source": [ "from pathlib import Path\n", - "ADAPTATION='finetune'\n", - "MODEL='RETFound_dinov2'\n", - "MODEL_ARCH='retfound_dinov2'\n", - "FINETUNE='RETFound_dinov2_meh'\n", - "DATASET='MESSIDOR2'\n", - "NUM_CLASS=5\n", - "DATA_PATH=PROJECT_ROOT/DATASET\n", - "BATCH_SIZE=24\n", - "EPOCHS=50\n", - "INPUT_SIZE=224\n", - "WORLD_SIZE=1\n", - "TASK=f\"{MODEL_ARCH}_{DATASET}_{ADAPTATION}\"\n", - "OUTPUT_DIR=PROJECT_ROOT/'output_dir'/TASK\n", - "print('DATA_PATH:',DATA_PATH)\n", - "print('TASK:',TASK)\n", - "print('OUTPUT_DIR:',OUTPUT_DIR)" + "\n", + "DATA_PATH = Path(\"MendeleyOCT/Data\")\n", + "CKPT = Path(\"checkpoints/checkpoint-best.pth\")\n", + "\n", + "IMG_SIZE = 256\n", + "BATCH_SIZE = 4\n", + "EPOCHS = 20\n", + "CE_WEIGHT = \"0.1,0.9\"\n", + "\n", + "print(\"Dataset:\", DATA_PATH)\n", + "print(\"Checkpoint:\", CKPT)\n" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "fa3d8d10", - "metadata": { - "id": "fa3d8d10" - }, - "outputs": [], - "source": [] - }, { "cell_type": "markdown", "id": "6ac04845", @@ -315,82 +155,15 @@ "id": "6ac04845" }, "source": [ - "## 4. Fine-tuning and testing RETFound on MESSIDOR2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d23ff751", - "metadata": { - "id": "d23ff751", - "scrolled": true - }, - "outputs": [], - "source": [ - "import sys\n", + "## 4. Fine-tuning RETFound for Segmentation\n", "\n", - "!{sys.executable} main_finetune.py \\\n", - " --model {MODEL} \\\n", - " --model_arch {MODEL_ARCH} \\\n", - " --finetune {FINETUNE} \\\n", - " --savemodel \\\n", - " --global_pool \\\n", - " --batch_size {BATCH_SIZE} \\\n", - " --epochs {EPOCHS} \\\n", - " --nb_classes {NUM_CLASS} \\\n", - " --data_path {DATA_PATH} \\\n", - " --input_size {INPUT_SIZE} \\\n", - " --task {TASK} \\\n", - " --adaptation {ADAPTATION}" + "The pretrained ViT encoder is initialized from RETFound weights, \n", + "and a lightweight convolutional decoder is trained for pixel prediction." ] }, { "cell_type": "code", - "execution_count": 2, - "id": "b55116e5", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "executionInfo": { - "elapsed": 12, - "status": "ok", - "timestamp": 1768473965211, - "user": { - "displayName": "MD SALMAN SHAMS", - "userId": "17411188514128174175" - }, - "user_tz": -330 - }, - "id": "b55116e5", - "outputId": "2ecba17a-1652-4641-9760-f44f741b554e" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[WinError 3] The system cannot find the path specified: '/content/drive/MyDrive/Assignments/RETFound'\n", - "f:\\GitHub\\RETFound\\examples\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\IPython\\core\\magics\\osm.py:393: UserWarning: using bookmarks requires you to install the `pickleshare` library.\n", - " bkms = self.shell.db.get('bookmarks', {})\n" - ] - } - ], - "source": [ - "%cd /content/drive/MyDrive/Assignments/RETFound" - ] - }, - { - "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "374fdce3", "metadata": { "colab": { @@ -1220,11 +993,13 @@ ], "source": [ "!python main_segmentation.py \\\n", - " --data_path Segmentation/Data \\\n", + " --data_path MendeleyOCT/Data \\\n", " --finetune checkpoints/checkpoint-best.pth \\\n", - " --epochs 20 \\\n", - " --batch_size 4 \\\n", - " --img_size 256" + " --epochs EPOCHS \\\n", + " --batch_size BATCH_SIZE \\\n", + " --img_size IMG_SIZE \\\n", + " --ce_weight CE_WEIGHT \\\n", + " --output_dir segmentation_output" ] }, { @@ -1234,77 +1009,17 @@ "id": "84ce93ac" }, "source": [ - "## 5. Evaluation-only" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "0af0f8a7", - "metadata": { - "id": "0af0f8a7", - "scrolled": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "'{sys.executable}' is not recognized as an internal or external command,\n", - "operable program or batch file.\n" - ] - } - ], - "source": [ - "import sys\n", - "\n", - "CKPT = r\"F:\\GitHub\\RETFound\\segmentation_output\\best.pth\"\n", + "## 5. Inference and Evaluation\n", "\n", - "!{sys.executable} main_finetune.py \\\n", - " --model {MODEL} \\\n", - " --model_arch {MODEL_ARCH} \\\n", - " --finetune {FINETUNE} \\\n", - " --savemodel \\\n", - " --global_pool \\\n", - " --batch_size 1 \\\n", - " --nb_classes {NUM_CLASS} \\\n", - " --data_path {DATA_PATH} \\\n", - " --input_size {INPUT_SIZE} \\\n", - " --task {TASK} \\\n", - " --adaptation {ADAPTATION} \\\n", - " --eval \\\n", - " --resume {CKPT}\n" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "861c872d", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "F:\\GitHub\\RETFound\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "f:\\GitHub\\RETFound\\venv3.9\\lib\\site-packages\\IPython\\core\\magics\\osm.py:417: UserWarning: using dhist requires you to install the `pickleshare` library.\n", - " self.shell.db['dhist'] = compress_dhist(dhist)[-100:]\n" - ] - } - ], - "source": [ - "%cd F:\\GitHub\\RETFound" + "The following script:\n", + "- generates overlay visualizations \n", + "- computes Dice and IOU \n", + "- reports final metrics on the test set" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "02d2dce7-31c2-48e2-87ce-9223b74cf94e", "metadata": { "id": "02d2dce7-31c2-48e2-87ce-9223b74cf94e" @@ -1383,15 +1098,65 @@ } ], "source": [ - "!python inference_segmentation.py" + "!python inference_segmentation.py \\\n", + " --data_path MendeleyOCT/Data \\\n", + " --ckpt segmentation_output/best.pth \\\n", + " --out_dir segmentation_output/inference \\\n", + " --img_size 256" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "26ff35ae", + "cell_type": "markdown", + "id": "cdccae46", + "metadata": {}, + "source": [ + "## 7. Summary\n", + "\n", + "- Extended RETFound from classification to **segmentation** using a lightweight decoder. \n", + "- Reused MAE-pretrained ViT encoder for efficient transfer learning. \n", + "- Implemented training, inference, and visualization pipeline. \n", + "- Evaluated with Dice and IoU metric for fair assessment. \n", + "- Demonstrated adaptability of RETFound to dense prediction tasks." + ] + }, + { + "cell_type": "markdown", + "id": "20f7ee18", + "metadata": {}, + "source": [ + "## 8. Future Work\n", + "\n", + "To further improve the robustness and clinical applicability of the model, the following directions can be explored:\n", + "\n", + "- **Improve model performance**\n", + " - Explore stronger decoders and multi-scale feature fusion \n", + " - Incorporate boundary-aware or focal losses for thin drusen regions \n", + "\n", + "- **Reduce ambiguity in annotations**\n", + " - Refine labeling protocol and inter-grader consistency \n", + " - Introduce soft labels or uncertainty modeling\n", + "\n", + "- **Increase dataset size**\n", + " - Add more annotated OCT volumes from diverse devices and populations \n", + " - Include multi-center data for better generalization\n", + "\n", + "- **Advanced preprocessing**\n", + " - More accurate retinal region localization \n", + " - Intensity normalization and artifact removal tailored for OCT\n", + "\n", + "- **Architectural extensions**\n", + " - Deeper or attention-based decoder when computation allows \n", + " - Skip connections from intermediate ViT layers\n", + "\n", + "- **Hyperparameter optimization**\n", + " - Learning rate schedules, class weighting, and threshold calibration \n", + " - Validation-based model selection and post-processing\n" + ] + }, + { + "cell_type": "markdown", + "id": "b66a3d45", "metadata": {}, - "outputs": [], "source": [] } ], diff --git a/infer_test_multi_iou.py b/infer_test_multi_iou.py deleted file mode 100644 index e6ecf467..00000000 --- a/infer_test_multi_iou.py +++ /dev/null @@ -1,212 +0,0 @@ -import os -import cv2 -import torch -import numpy as np -from albumentations import Compose, Resize, Normalize -from albumentations.pytorch import ToTensorV2 - -from models_segmentation import RETFoundSegmentation - -DEVICE = "cuda" if torch.cuda.is_available() else "cpu" - - -# ============================================================ -# Metric utilities -# ============================================================ - -def basic_iou(pred, gt, smooth=1e-6): - pred = pred.astype(bool) - gt = gt.astype(bool) - - inter = (pred & gt).sum() - union = (pred | gt).sum() - - return (inter + smooth) / (union + smooth) - - -def dice_score(pred, gt, smooth=1e-6): - pred = pred.astype(bool) - gt = gt.astype(bool) - - inter = (pred & gt).sum() - return (2 * inter + smooth) / (pred.sum() + gt.sum() + smooth) - - -def tolerance_iou(pred, gt, k=2): - kernel = np.ones((k*2+1, k*2+1), np.uint8) - gt_dilated = cv2.dilate(gt.astype(np.uint8), kernel) - return basic_iou(pred, gt_dilated) - - -def lesion_level_iou(pred, gt): - num_gt, gt_labels = cv2.connectedComponents(gt.astype(np.uint8)) - num_pr, pr_labels = cv2.connectedComponents(pred.astype(np.uint8)) - - scores = [] - for g in range(1, num_gt): - gt_comp = (gt_labels == g) - - best = 0 - for p in range(1, num_pr): - pr_comp = (pr_labels == p) - best = max(best, basic_iou(pr_comp, gt_comp)) - - scores.append(best) - - if len(scores) == 0: - return 1.0 if pred.sum() == 0 else 0.0 - - return np.mean(scores) - - -def positive_slice_iou(pred, gt): - if gt.sum() == 0: - return None - return basic_iou(pred, gt) - - -# ============================================================ -# Model loader -# ============================================================ -def load_model(ckpt, img_size=256): - model = RETFoundSegmentation(img_size=img_size).to(DEVICE) - - state = torch.load(ckpt, map_location=DEVICE, weights_only=False) - model.load_state_dict(state) - - model.eval() - return model - - -# ============================================================ -# Preprocess -# ============================================================ -def preprocess(img_path, img_size=256): - img = cv2.imread(img_path) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - - tf = Compose([ - Resize(img_size, img_size), - Normalize((0.485,0.456,0.406),(0.229,0.224,0.225)), - ToTensorV2() - ]) - - aug = tf(image=img) - return aug["image"].unsqueeze(0), img - - -# ============================================================ -# Overlay visualization -# ============================================================ -def make_overlay(img, mask): - mask = mask.astype(np.uint8) - - mask = cv2.resize( - mask, - (img.shape[1], img.shape[0]), - interpolation=cv2.INTER_NEAREST - ) - - color = np.zeros_like(img, dtype=np.uint8) - color[:, :, 1] = mask * 255 # green channel - - return cv2.addWeighted(img, 0.7, color, 0.3, 0) - - -# ============================================================ -# MAIN -# ============================================================ -def run_inference( - ckpt="segmentation_output/best.pth", - test_img_dir="Segmentation/Data/test/images", - test_mask_dir="Segmentation/Data/test/masks", - out_dir="segmentation_output/inference_overlays", - img_size=256 -): - - os.makedirs(out_dir, exist_ok=True) - - model = load_model(ckpt, img_size) - - results = { - "pixel_iou": [], - "dice": [], - "tol2_iou": [], - "tol3_iou": [], - "lesion_iou": [], - "positive_iou": [] - } - - print("\n===== Multi-Metric Inference (Overlay Only) =====\n") - - for name in sorted(os.listdir(test_img_dir)): - - img_path = os.path.join(test_img_dir, name) - - stem = os.path.splitext(name)[0] - gt_name = stem + "_mask.png" - gt_path = os.path.join(test_mask_dir, gt_name) - - if not os.path.isfile(gt_path): - continue - - # ----- Predict ----- - tensor, orig = preprocess(img_path, img_size) - - with torch.no_grad(): - out = model(tensor.to(DEVICE)) - pred = out.argmax(1).squeeze().cpu().numpy() - - # ----- Load GT ----- - gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE) - gt = (gt > 0).astype("uint8") - - gt = cv2.resize( - gt, - (pred.shape[1], pred.shape[0]), - interpolation=cv2.INTER_NEAREST - ) - - # ----- Metrics ----- - piou = basic_iou(pred, gt) - dice = dice_score(pred, gt) - t2 = tolerance_iou(pred, gt, k=2) - t3 = tolerance_iou(pred, gt, k=3) - liou = lesion_level_iou(pred, gt) - pos = positive_slice_iou(pred, gt) - - results["pixel_iou"].append(piou) - results["dice"].append(dice) - results["tol2_iou"].append(t2) - results["tol3_iou"].append(t3) - results["lesion_iou"].append(liou) - - if pos is not None: - results["positive_iou"].append(pos) - - # ----- SAVE ONLY OVERLAY ----- - over = make_overlay(orig, pred) - cv2.imwrite(os.path.join(out_dir, stem + "_overlay.png"), over) - - print(f"{name:22s} IoU:{piou:.3f} Dice:{dice:.3f} " - f"Tol2:{t2:.3f} Lesion:{liou:.3f}") - - # ======================================================== - print("\n===== FINAL SUMMARY =====") - - def mean(x): - return round(float(np.mean(x)), 4) if len(x) else None - - for k, v in results.items(): - print(f"{k:15s}: {mean(v)}") - - print("\nInterpretation Guide:") - print("- pixel_iou : strict pixel overlap") - print("- dice : balanced for small lesions") - print("- tol2/tol3 : clinical tolerance ±2/3 px") - print("- lesion_iou : object-level matching") - print("- positive_iou: only slices with drusen") - - -if __name__ == "__main__": - run_inference() diff --git a/inference_segmentation.py b/inference_segmentation.py index fae6f0c0..b8e901ad 100644 --- a/inference_segmentation.py +++ b/inference_segmentation.py @@ -1,12 +1,14 @@ import os import cv2 import torch +import argparse import numpy as np from albumentations import Compose, Resize, Normalize from albumentations.pytorch import ToTensorV2 from models_segmentation import RETFoundSegmentation +# Select device automatically DEVICE = "cuda" if torch.cuda.is_available() else "cpu" @@ -14,12 +16,28 @@ # Metrics # ============================================================ def dice_iou(pred, gt, smooth=1e-6): + """ + Compute Dice coefficient and IoU between prediction and ground truth. + + Args: + pred : Binary prediction mask (numpy) + gt : Binary ground truth mask (numpy) + smooth: Small constant to avoid division by zero + + Returns: + dice : Dice similarity score + iou : Intersection over Union score + """ + + # Convert to boolean for logical operations pred = pred.astype(bool) gt = gt.astype(bool) + # Intersection and union areas inter = (pred & gt).sum() union = (pred | gt).sum() + # Dice and IoU formulas dice = (2 * inter + smooth) / (pred.sum() + gt.sum() + smooth) iou = (inter + smooth) / (union + smooth) @@ -30,8 +48,20 @@ def dice_iou(pred, gt, smooth=1e-6): # Model loader # ============================================================ def load_model(ckpt, img_size=256): + """ + Load trained RETFound segmentation model. + + Args: + ckpt : Path to checkpoint + img_size : Input size used during training + + Returns: + model in evaluation mode + """ + model = RETFoundSegmentation(img_size=img_size).to(DEVICE) + # Load weights state = torch.load(ckpt, map_location=DEVICE, weights_only=False) model.load_state_dict(state) @@ -43,6 +73,18 @@ def load_model(ckpt, img_size=256): # Preprocess # ============================================================ def preprocess(img_path, img_size=256): + """ + Read and preprocess input image. + + - Read image using OpenCV + - Convert BGR → RGB + - Resize and normalize as per RETFound training + - Convert to tensor + + Returns: + tensor image for model, original image for visualization + """ + img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) @@ -61,18 +103,32 @@ def preprocess(img_path, img_size=256): # Overlay visualization # ============================================================ def make_overlay(img, mask): - # Ensure correct type + """ + Create green overlay on original image using predicted mask. + + Args: + img : Original RGB image + mask : Binary prediction mask + + Returns: + Overlay visualization + """ + + # Ensure uint8 type mask = mask.astype(np.uint8) + # Resize mask back to original image size mask = cv2.resize( mask, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_NEAREST ) + # Create green color mask color = np.zeros_like(img, dtype=np.uint8) color[:, :, 1] = mask * 255 # Green channel + # Blend with original image overlay = cv2.addWeighted(img, 0.7, color, 0.3, 0) return overlay @@ -80,27 +136,44 @@ def make_overlay(img, mask): # ============================================================ # Main inference # ============================================================ -def run_inference( - ckpt="segmentation_output/best.pth", - test_img_dir="Segmentation/Data/test/images", - test_mask_dir="Segmentation/Data/test/masks", - out_dir="segmentation_output/inference", - img_size=256 -): +def run_inference(args): + """ + Perform inference on test set. + + - Load model + - Iterate over test images + - Predict masks + - Compute Dice & IoU + - Save overlay visualizations + """ - os.makedirs(out_dir, exist_ok=True) + # ----- Resolve dataset paths from single root ----- + test_img_dir = os.path.join(args.data_path, "test", "images") + test_mask_dir = os.path.join(args.data_path, "test", "masks") - model = load_model(ckpt, img_size) + # Validate paths + if not os.path.isdir(test_img_dir): + raise FileNotFoundError(f"Images folder not found: {test_img_dir}") + + if not os.path.isdir(test_mask_dir): + raise FileNotFoundError(f"Masks folder not found: {test_mask_dir}") + + os.makedirs(args.out_dir, exist_ok=True) + + # Load trained model + model = load_model(args.ckpt, args.img_size) dice_scores = [] iou_scores = [] print("\n===== Running Inference =====\n") + # Iterate through test images for name in sorted(os.listdir(test_img_dir)): img_path = os.path.join(test_img_dir, name) + # Corresponding ground truth name stem = os.path.splitext(name)[0] gt_name = stem + "_mask.png" gt_path = os.path.join(test_mask_dir, gt_name) @@ -109,7 +182,7 @@ def run_inference( continue # ----- Predict ----- - tensor, orig = preprocess(img_path, img_size) + tensor, orig = preprocess(img_path, args.img_size) with torch.no_grad(): out = model(tensor.to(DEVICE)) @@ -131,17 +204,41 @@ def run_inference( dice_scores.append(dice) iou_scores.append(iou) - # ----- Save outputs ----- + # ----- Save overlay only ----- over = make_overlay(orig, pred) - cv2.imwrite(os.path.join(out_dir, stem + "_overlay.png"), over) + cv2.imwrite(os.path.join(args.out_dir, stem + "_overlay.png"), over) + # Print per-image result print(f"{name:25s} Dice: {dice:.4f} IoU: {iou:.4f}") # --------------------------------------------------------- + # Final aggregated report print("\n===== FINAL REPORT =====") print("Mean Dice:", round(np.mean(dice_scores), 4)) print("Mean IoU :", round(np.mean(iou_scores), 4)) +# ============================================================ +# CLI +# ============================================================ if __name__ == "__main__": - run_inference() + + # Command line argument parser + parser = argparse.ArgumentParser(description="RETFound Segmentation Inference") + + parser.add_argument("--ckpt", type=str, required=True, + help="Path to trained model checkpoint") + + parser.add_argument("--data_path", type=str, required=True, + help="Root dataset folder containing test/images and test/masks") + + parser.add_argument("--out_dir", type=str, default="segmentation_output/inference", + help="Output directory for overlays") + + parser.add_argument("--img_size", type=int, default=256, + help="Input resize dimension") + + args = parser.parse_args() + + # Start inference + run_inference(args) diff --git a/main_segmentation.py b/main_segmentation.py index 642cd359..2b2c257c 100644 --- a/main_segmentation.py +++ b/main_segmentation.py @@ -20,24 +20,37 @@ compute_metrics, ) +# Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -# ========================= -# Dataset -# ========================= +# ============================================================ +# Dataset Definition +# ============================================================ class OCTDrusenDataset(Dataset): + """ + Dataset for OCT drusen segmentation. + + Expected structure: + root/ + ├── images/ + └── masks/ (same filename + _mask.png) + """ + def __init__(self, root, transform=None): + self.image_dir = os.path.join(root, "images") self.mask_dir = os.path.join(root, "masks") self.transform = transform + # Collect valid image–mask pairs self.samples = [] for img_name in sorted(os.listdir(self.image_dir)): stem = os.path.splitext(img_name)[0] mask_name = stem + "_mask.png" mask_path = os.path.join(self.mask_dir, mask_name) + if os.path.isfile(mask_path): self.samples.append((img_name, mask_name)) @@ -47,61 +60,96 @@ def __len__(self): return len(self.samples) def __getitem__(self, idx): + """ + Load image and mask and apply transforms. + """ + img_name, mask_name = self.samples[idx] + # ----- Image ----- img = cv2.imread(os.path.join(self.image_dir, img_name)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - mask = cv2.imread(os.path.join(self.mask_dir, mask_name), cv2.IMREAD_GRAYSCALE) + # ----- Mask ----- + mask = cv2.imread( + os.path.join(self.mask_dir, mask_name), + cv2.IMREAD_GRAYSCALE + ) + mask = (mask > 0).astype("uint8") + # ----- Transform ----- if self.transform: aug = self.transform(image=img, mask=mask) img, mask = aug["image"], aug["mask"] return img, torch.tensor(mask, dtype=torch.long) -# ========================= -# Main -# ========================= + +# ============================================================ +# Main Training Script +# ============================================================ def main(): + + # -------------------------------------------------------- + # Arguments + # -------------------------------------------------------- parser = argparse.ArgumentParser("RETFound Segmentation") + parser.add_argument("--data_path", type=str, required=True) parser.add_argument("--epochs", type=int, default=50) parser.add_argument("--batch_size", type=int, default=4) parser.add_argument("--lr", type=float, default=1e-4) + parser.add_argument("--img_size", type=int, default=512) parser.add_argument("--patch_size", type=int, default=16) parser.add_argument("--drop_path", type=float, default=0.2) + parser.add_argument("--finetune", type=str, default="") parser.add_argument("--output_dir", type=str, default="./segmentation_output") + parser.add_argument("--dice_weight", type=float, default=1.0) parser.add_argument("--ce_weight", type=str, default="0.3,0.7") + args = parser.parse_args() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") os.makedirs(args.output_dir, exist_ok=True) + # -------------------------------------------------------- + # Transform + # -------------------------------------------------------- transform = Compose([ Resize(args.img_size, args.img_size), - Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + Normalize((0.485, 0.456, 0.406), + (0.229, 0.224, 0.225)), ToTensorV2() ]) + # -------------------------------------------------------- + # Datasets & Loaders (ONLY TRAIN + VAL) + # -------------------------------------------------------- train_ds = OCTDrusenDataset(os.path.join(args.data_path, "train"), transform) val_ds = OCTDrusenDataset(os.path.join(args.data_path, "val"), transform) - test_ds = OCTDrusenDataset(os.path.join(args.data_path, "test"), transform) train_loader = DataLoader(train_ds, args.batch_size, shuffle=True, num_workers=4) val_loader = DataLoader(val_ds, args.batch_size, shuffle=False, num_workers=4) - test_loader = DataLoader(test_ds, args.batch_size, shuffle=False, num_workers=4) - - model = RETFoundSegmentation(args.img_size, args.patch_size, num_classes=2, drop_path=args.drop_path).to(device) - # ------------------------- - # Load pretrained weights - # ------------------------- + # -------------------------------------------------------- + # Model + # -------------------------------------------------------- + model = RETFoundSegmentation( + args.img_size, + args.patch_size, + num_classes=2, + drop_path=args.drop_path + ).to(device) + + # -------------------------------------------------------- + # Load Pretrained RETFound Encoder (optional) + # -------------------------------------------------------- if args.finetune: + if os.path.isfile(args.finetune): ckpt_path = args.finetune else: @@ -113,18 +161,23 @@ def main(): state = torch.load(ckpt_path, map_location="cpu", weights_only=False) state = state["model"] if "model" in state else state + # Remove classification head for k in ["head.weight", "head.bias"]: if k in state: del state[k] interpolate_pos_embed(model.encoder, state) model.encoder.load_state_dict(state, strict=False) + print("Pretrained RETFound weights loaded.") - # ------------------------- - # Optimization - # ------------------------- - ce_weights = torch.tensor([float(x) for x in args.ce_weight.split(",")]).to(device) + # -------------------------------------------------------- + # Loss & Optimizer + # -------------------------------------------------------- + ce_weights = torch.tensor( + [float(x) for x in args.ce_weight.split(",")] + ).to(device) + ce_loss = nn.CrossEntropyLoss(weight=ce_weights) def loss_fn(out, tgt): @@ -134,28 +187,40 @@ def loss_fn(out, tgt): print("\n[DEBUG] Starting training loop...\n") - # ------------------------- - # Training Loop - # ------------------------- + # -------------------------------------------------------- + # Training Loop (NO TEST DATA) + # -------------------------------------------------------- best = float("inf") + for e in range(args.epochs): + print(f"[DEBUG] Entered epoch {e+1}") - train_loss = train_segmentation(model, train_loader, loss_fn, optimizer, device) - val_loss, P, T = evaluate_segmentation(model, val_loader, loss_fn, device) + + train_loss = train_segmentation( + model, train_loader, loss_fn, optimizer, device + ) + + val_loss, P, T = evaluate_segmentation( + model, val_loader, loss_fn, device + ) + acc, dice, iou = compute_metrics(P, T) - print(f"Epoch {e+1}: Train={train_loss:.4f} | Val={val_loss:.4f} | Dice={dice:.4f} | IoU={iou:.4f}") + print( + f"Epoch {e+1}: " + f"Train={train_loss:.4f} | " + f"Val={val_loss:.4f} | " + f"Dice={dice:.4f} | " + f"IoU={iou:.4f}" + ) + # Save best model based on validation loss if val_loss < best: best = val_loss - torch.save(model.state_dict(), os.path.join(args.output_dir, "best.pth")) - - # ------------------------- - # Final Test - # ------------------------- - test_loss, P, T = evaluate_segmentation(model, test_loader, loss_fn, device) - acc, dice, iou = compute_metrics(P, T) - print(f"Test: Loss={test_loss:.4f} | Dice={dice:.4f} | IoU={iou:.4f}") + torch.save( + model.state_dict(), + os.path.join(args.output_dir, "best.pth") + ) if __name__ == "__main__": diff --git a/models_segmentation.py b/models_segmentation.py index 2bfe9132..9adef928 100644 --- a/models_segmentation.py +++ b/models_segmentation.py @@ -4,40 +4,135 @@ from models_vit import RETFound_mae +# ============================================================ +# Decoder / Segmentation Head +# ============================================================ class SegmentationHead(nn.Module): + """ + Lightweight decoder that converts ViT patch embeddings + into full-resolution segmentation map. + + Steps: + - Reshape sequence → 2D feature map + - Upsample to original image size + - Apply small CNN to produce class logits + """ + def __init__(self, hidden_dim, num_classes, img_size, patch_size): super().__init__() + + # Patch geometry from ViT self.patch_size = patch_size self.h = img_size // patch_size self.w = img_size // patch_size + + # Simple convolutional decoder self.conv = nn.Sequential( + # Reduce channel dimension nn.Conv2d(hidden_dim, hidden_dim // 2, 3, padding=1), nn.ReLU(inplace=True), + + # Final layer → number of classes nn.Conv2d(hidden_dim // 2, num_classes, 1), ) def forward(self, x): + """ + Args: + x: ViT token embeddings [B, N, C] + (without CLS token) + + Returns: + Segmentation logits [B, num_classes, H, W] + """ + B, N, C = x.shape + + # Reshape sequence back to 2D feature map x = x.reshape(B, self.h, self.w, C).permute(0, 3, 1, 2) - x = F.interpolate(x, scale_factor=self.patch_size, mode="bilinear", align_corners=False) + + # Upsample from patch grid → image resolution + x = F.interpolate( + x, + scale_factor=self.patch_size, + mode="bilinear", + align_corners=False + ) + + # Apply conv decoder to get class logits return self.conv(x) +# ============================================================ +# Full RETFound + Decoder Model +# ============================================================ class RETFoundSegmentation(nn.Module): - def __init__(self, img_size=512, patch_size=16, hidden_dim=1024, num_classes=2, drop_path=0.2): + """ + Segmentation model built on top of RETFound MAE encoder. + + Architecture: + RETFound ViT Encoder → SegmentationHead Decoder + """ + + def __init__( + self, + img_size=512, + patch_size=16, + hidden_dim=1024, + num_classes=2, + drop_path=0.2 + ): super().__init__() - self.encoder = RETFound_mae(img_size=img_size, num_classes=num_classes, - drop_path_rate=drop_path, global_pool=False) - self.seg_head = SegmentationHead(hidden_dim, num_classes, img_size, patch_size) + + # ---------------------------------------------------- + # Encoder: pretrained RETFound ViT (MAE) + # ---------------------------------------------------- + self.encoder = RETFound_mae( + img_size=img_size, + num_classes=num_classes, + drop_path_rate=drop_path, + global_pool=False # keep token sequence + ) + + # ---------------------------------------------------- + # Decoder head for pixel prediction + # ---------------------------------------------------- + self.seg_head = SegmentationHead( + hidden_dim, + num_classes, + img_size, + patch_size + ) def forward(self, x): + """ + Forward pass: + 1. Patch embedding + 2. Add CLS token + 3. Positional embedding + 4. Transformer blocks + 5. Decoder head + """ + B = x.size(0) + + # ----- Patch embedding ----- x = self.encoder.patch_embed(x) + + # ----- Add CLS token ----- cls = self.encoder.cls_token.expand(B, -1, -1) x = torch.cat((cls, x), dim=1) + + # ----- Positional encoding ----- x = x + self.encoder.pos_embed x = self.encoder.pos_drop(x) + + # ----- Transformer encoder blocks ----- for blk in self.encoder.blocks: x = blk(x) + + # ----- Final normalization ----- x = self.encoder.norm(x) + + # ----- Remove CLS token & decode ----- return self.seg_head(x[:, 1:]) From 98ab2487695484947529ae7c2eefa490d50b8c92 Mon Sep 17 00:00:00 2001 From: MDSALMANSHAMS Date: Fri, 16 Jan 2026 16:43:33 +0530 Subject: [PATCH 7/7] final changes --- examples/RETFound_MendeleyOCT_demo.ipynb | 69 +++++++---------------- tree.txt | Bin 0 -> 4484738 bytes 2 files changed, 20 insertions(+), 49 deletions(-) create mode 100644 tree.txt diff --git a/examples/RETFound_MendeleyOCT_demo.ipynb b/examples/RETFound_MendeleyOCT_demo.ipynb index 019638ed..7ea2b1bf 100644 --- a/examples/RETFound_MendeleyOCT_demo.ipynb +++ b/examples/RETFound_MendeleyOCT_demo.ipynb @@ -91,14 +91,15 @@ }, "source": [ "## 2. Prepare MendeleyOCT dataset\n", - "1. Download from the [gdrive](https://drive.google.com/drive/folders/1gBFXrkhRpp8EbTBlTn72h-UvS6a1JYBv?usp=sharing).\n", + "1. Download dataset from the [gdrive](https://drive.google.com/drive/folders/1gBFXrkhRpp8EbTBlTn72h-UvS6a1JYBv?usp=sharing).\n", "2. Put the data folder under the project directory, e.g. \"RETFound/MendeleyOCT\".\n", "\n", "> **Note:** \n", "The dataset used in this work has been **preprocessed and annotated for the segmentation task**. \n", "> - Each B-scan was **horizontally cropped from the top and bottom** to focus on the retinal region. \n", "> - Binary pixel-level annotations were created for **drusen segmentation** (0: background, 1: drusen). \n", - "> - Image–mask pairs are provided in JPEG/PNG format for direct training.\n" + "> - Image–mask pairs are provided in JPEG/PNG format for direct training.\n", + "> - Paired format: `images/*.jpeg` ↔ `masks/*_mask.png`" ] }, { @@ -994,12 +995,9 @@ "source": [ "!python main_segmentation.py \\\n", " --data_path MendeleyOCT/Data \\\n", - " --finetune checkpoints/checkpoint-best.pth \\\n", - " --epochs EPOCHS \\\n", - " --batch_size BATCH_SIZE \\\n", - " --img_size IMG_SIZE \\\n", - " --ce_weight CE_WEIGHT \\\n", - " --output_dir segmentation_output" + " --epochs 20 \\\n", + " --batch_size 4 \\\n", + " --finetune RETFound_OCT" ] }, { @@ -1099,10 +1097,9 @@ ], "source": [ "!python inference_segmentation.py \\\n", - " --data_path MendeleyOCT/Data \\\n", " --ckpt segmentation_output/best.pth \\\n", - " --out_dir segmentation_output/inference \\\n", - " --img_size 256" + " --data_path MendeleyOCT/Data \\\n", + " --out_dir segmentation_output/inference" ] }, { @@ -1110,13 +1107,13 @@ "id": "cdccae46", "metadata": {}, "source": [ - "## 7. Summary\n", + "## Summary\n", "\n", - "- Extended RETFound from classification to **segmentation** using a lightweight decoder. \n", - "- Reused MAE-pretrained ViT encoder for efficient transfer learning. \n", - "- Implemented training, inference, and visualization pipeline. \n", - "- Evaluated with Dice and IoU metric for fair assessment. \n", - "- Demonstrated adaptability of RETFound to dense prediction tasks." + "- Extended RETFound to **drusen segmentation** via decoder \n", + "- Reused MAE-pretrained ViT encoder \n", + "- Provided training & inference pipeline \n", + "- Evaluated with Dice and IoU metrics \n", + "- Demonstrated transfer to dense prediction" ] }, { @@ -1124,40 +1121,14 @@ "id": "20f7ee18", "metadata": {}, "source": [ - "## 8. Future Work\n", - "\n", - "To further improve the robustness and clinical applicability of the model, the following directions can be explored:\n", - "\n", - "- **Improve model performance**\n", - " - Explore stronger decoders and multi-scale feature fusion \n", - " - Incorporate boundary-aware or focal losses for thin drusen regions \n", - "\n", - "- **Reduce ambiguity in annotations**\n", - " - Refine labeling protocol and inter-grader consistency \n", - " - Introduce soft labels or uncertainty modeling\n", - "\n", - "- **Increase dataset size**\n", - " - Add more annotated OCT volumes from diverse devices and populations \n", - " - Include multi-center data for better generalization\n", + "## Future Work\n", "\n", - "- **Advanced preprocessing**\n", - " - More accurate retinal region localization \n", - " - Intensity normalization and artifact removal tailored for OCT\n", - "\n", - "- **Architectural extensions**\n", - " - Deeper or attention-based decoder when computation allows \n", - " - Skip connections from intermediate ViT layers\n", - "\n", - "- **Hyperparameter optimization**\n", - " - Learning rate schedules, class weighting, and threshold calibration \n", - " - Validation-based model selection and post-processing\n" + "- Improve annotation consistency \n", + "- Add more diverse OCT data \n", + "- Stronger preprocessing & normalization \n", + "- Deeper decoder (if compute allows) \n", + "- Hyperparameter tuning" ] - }, - { - "cell_type": "markdown", - "id": "b66a3d45", - "metadata": {}, - "source": [] } ], "metadata": { diff --git a/tree.txt b/tree.txt new file mode 100644 index 0000000000000000000000000000000000000000..ad8561c1e9d5a22b8ba217eeb12661b5bb7e2121 GIT binary patch literal 4484738 zcmeFaYm*(fkuADEPsIEOf1e;jBt?q6-)xB<`yScWk>v5c5fjnDXNwkJW;ZFxF~9xX zo2$TOJZkUVRlr)cNlC$ILbSV9CK8#6L?QwFfB)zAyH~sCyT`jHyEnVv?7rIlcK7Y> z5Ak2mch7bgyUY0d1F-hyJ>9alKeV) z^V{7QF{bzTj3?y#j~KmYlWd@ukl982=H>3k_$lRZ8NZVxaXo(HyV?KuB);DF9WeeQ zv=e_G#dlBQ@9$#_UQJR3wUSEG)!*#>&1;k9!?MuN%Z3MjnRMsXUS9Y+B~MSHrw>9a zU&jbMIlv91_-)wKt4Y%z$2k3Il2sbFC!tS&3GMkd{&)N@7@0$QMh^BN-VfdTCiL)& zK>IqZc{y6ypZ%PbGXC%Pqw&RL7Z3KzpBBGFn*8082K&j8zno-SYZQNn-}85$g#;gj zr#Lhk{C&2>`68tD*U5hW96h|T`y~F{4*&Sc^sno?zsDHE1MTCJSZoht-U`IKfeQ+H z%$o+xTL#Qe4Vbq@n4gC2+>9CVQ{foZn4efURq0dBt4gD4RFzKEs4A_hQ9XKf$D#wD zSy1mUP@9tk+?^=8b14!Jrk>*mg3Sq`Z>e4t~T9HZnI zBZu!(dA>UlrF;_63r+mN6ZCYJ6SIr^eTP8anjZH1eOx#{FJk!a9*x)5PWX!Y;8Qy%D_L3{=e+ z0rgfOBJ-!^^-ge#{)1PY1n!2G-VRiq=0gJa0u}xQt8$fG?*$S@3_hw~0>^R<%)=nt zaS=JO{h6>fBXKx0QD!^NrW^`Tkt;(U`WPp~WINBKlvxiSHgaB+hMMnNM(j_G*tf;l ztGOfgG^cz-mwYJo}tM#QuS8Gg-?$wz))=~Y;ihkFMe$R^Tvs7eAoajzJfEACE zcc&jf2F1y{(^8R9aq{lNc8;jiuRC?csVPo9ar$$$H?OljO8suLK0a)%uWhqHO1|sN zjdljAvDGpuGdXI$OKd0KS|8hHjg)%QW{;HEbrwmB+-8&1=yg^}jb3M$)aZ4VNsV4- zo3!X{)@clVU(SbBHo2Z_-)}MN#NSrd?6TXVYH5$WFjq2R;q0!YmF{53f3(Rr0E;CV*eb643d9&%z4s zg?8z7f#I`CRNZKmsJhWAQ8jz4tps#BQ1a^00o`0s@~WyUsWnR%4}2CHdpm3(`7Bih z9JnoUH9NyeyGmT8g_XEU8!K^@R#xIF?X1LAT3U&#w6%=uk%C7C9tkMDO_r<9+n|OG zH5l4eU5To*+!9r1ws?N(G^Mrs!CrIx1U8!b61#61uA9*o+VIu@D^9) z!CPG2nALJnj#?>Sm7i(lpz`1?uF8Y8xGEpk;;Ot@i>vcvrJi}TS9fN!mV)loW{c~Q zgU5zF7VNQKkM(+N*JHUlyT#5Rc1D!@gUB$Ks5(2SadnnZ`ARKQ+y5xYNQR z2hXV8Hd*m)leM8*(j#4uwcR$&nYiu3BL|PQ;a*1%zaDGzNEa)4hZImd>B05PnK;$t z;n(9OaZbU*ugBW%n5+#qAUWjlnaM6t|LNh^qra&1^6=}?U(}X*`1Q;d9#o}Gr1MM4 zn{bhN!Tf;R=5;4GP(285fO~s=sJhv*=2qtwYFyoHS>x(r-Wpfu#cEug7prk~UaZE| zd9fN-=Wl9UoyRG0J=zPuqnaaXZat%>%z2mmdTdykL$0~?XajC2bZCRelX?7@#~wV^ z;PGHe|AiPIr+$@D2qGI)Eo0wZX9qQ^F1D$0b+Jv2>ym@c#%g|bR#xNc?5xJsSz3*& zv$YaexhGoVdh{3f;;QDInqS>~R^#esv>MlAZ&+h`evaC?%k>0<6k#DggHY>ehTUs%oC8QB_s| zRO}?*jlZ=LC{a~2Ld&gcMrd(Wwov1GP?ffTOoi6Rz`E|m&K|0YmHJhp>f~CYsytUM z0o`boysGAJ+$D@%I8<`!vQ@xDjZwPoOlPw-s?NvMxGsKmT3hq0)7u(Xr@1w*Qg=%& z)6KgzzpALVwF=#gUh}KU+131dv={TGCX>=~>#<_^N)NvtYlCKZ`1M$u?krNP4<2j7 zou&@?dbCWJcdwu{K?Pyye$3x9RfZEx#UX^Y~uf8K+tbx?O@6 z*JEvXQ_gs*9DUypYw*s5-fps5-fps2;iMW?tMPgZJsu4XCAdEZq_Mrm3vDyuE*Xm!>eMOnqQAyDEF3Xem!=f+*_*o z_1J}SZ>i+h<9(HTKsC1>t3VXtur~1S!S#4ak8e=!QIy)CJIR2UH_!3PJ&GFFC0|cW zqTHjX<>0Xj-92orK6tFnGiu76nsU^1_oda+RqlM${JP}piAZjn<_hHwMJZp86qGv@ zHMgEP9WNO>W_aZeMJ)%9Rp{-}cDY6Q|=uq=R3N6qGv&rIsmo5^7wR96Z{9 zx8oh!;NceUSUR}X$p8^4PHQR8uOfS1qU!v4jjPk#8ds+aHLhpWJknKW_ev?~-VCa> zOqt!Q`PJnyN?Z@BZZ%x;s*7(*R9&=NqI!IS$0O+c0p9CF^||&;V2!HVU8`|*Gh~gc z^NTgE&T?y9o$c1RI_s@*b@p51>MXd%^;nxn%apTkDFx;1TjRRq;ITHu4yw4kl!J#` z53e4-;PDC`pP-x7OC8YpnR_v#W2Z-3sl1{j_H#%Vm% z1S#(?!#QZo3U`gDI?XD1)oE6Vs?)3zRi{}vtpUx_>OhIA8?6#mC)c~-vtb8XPgtVr zBv7L2^tD9Q&9Nn_YVCztFprjM?Ny`d;(@fH!)~rJ9!U4WVO6I^y%&h67jU7ftN`_e z>2^q^TyawY)}MD{_R>k9MOEd$TU=F4P~+-Gtdy) zD|NO+)#*TqsvE5mRX1A5ZQ)FqawP}vno)Ifg~!6#*c8F(JWP$N^Ds58OAb0OQ}e6S z+!|M>yEU#Vp2G^X)H78?*W&7YW6iHBs;l|+Xs>SPuI1Ka#k#tgmS2w*>vry1emz#K zdttBT*JH)H7xr3yJyz_|GTlv%wG?!x_*z_#H$!Gcvrpdg>zRv@4fF8pnTt`0?cvum z7vG9V)xobv8+12B*4nE(ec9rAtXOyYvgOxf71)RIXqm?+xltG4qU=XS{Ty zbuZ={*xRpoDSR}{P~?%~p9v07ABUZllUt*BaD)tI%oDr;zQRrXNh zVqZ?<6>D6N?y7uk&8x~Q)~HH7K#if+f1%#xlkj0cR{De*Rp%3GT%GOKxH|o+aaFT< zJ8C-3t@+jIZi(v|HC-;RmaeV}zs1#g+*-OGE%R`zn;%LUcro- zu3tvgSuX0< zbU^1_N(tz^ONpxTF1SMp>qX3lDs3$>Rk~VYsx-C4ROxAnsnXICQ>CMbt8U2sRJy+% zl~u~Qszp_03R+y9{v*3XDR@wIc2)AKvw{*;X9p#!&Js#gm0!i_2vjg@ql*?*WeF{= zDv#dcs%)XfRarxetD0$AT$P=*xGGC)aaA!)i|esAkCy3jhqV+uy6e#}kAA5@BJOH`eGm8d%VDp7UzRib+AOE)9n zepOVrrIm=zzG_^Zebu-+3oCIws5-kXdDYo%iK?^P5>;onC92MDOH`fRmZ(a*h0SW~ zk5aZjqbj{Y$*s~Gl(`+QUSCLm+vF>zji|esA-RatzUk|q)&*1S39#;VRH+5K>?#1#}4z6+Y%xxZ2&l*#;62v_LxEEYm zFIQq}G!~g>+-iUqepI&_pwIAEsP1!NYGU%1Q;m0NQ8hDpi>ey`daN{-+VZNg)D~4U z)3&G@`>#XEBv-PQ8yuvfgpr1t`^Zq5OE?O&5b>6>3)!BZDsylT`SUG&F!v`qipDb0pKc|Y-N>qnb9X?PQ|K$FwZdgZDRiZjPi)yE+<<&Di*;Q68%`=B|8VIN3ITS#W^E~1U%a6m|YxFy%T#bda2&A z*Z^*Da`5Vig>avjgI9;W;$2$@uMT^~`7H;p4tsU<{^R7h1Jz-#xK&2at3#`L zb?V=D|F-*|-EXQt{P%xHUJQR;>>lhccHhU(vEQySezW^x_x0|tyKi>)cYmJpYuIte z&3Js-`s`o0*LSaXFL&oc^qV^;`YkK^JqtQ+K|oE0UNRU{yn&u7KGbqA(eKD5Q=)$+ zN5@|EosfIVFRLX}qU%Q$r!CTZ1jvo+dC%y2-qZWe_ukjxsCh5Z9lWFF?UR^y9CF7h zoakQO^**xX{kB{qOLV=TLCk|{v-IwOS|dwz{iv4c`cW;>Kb4PaiLTd~65Xpa`njp( z{jOE+`gjJh3^HHIM}A`9zQothel`CNi!J$g*ldaKu-X#eVYel|!*WY}hwWy3uXMb! z@k++w)w106UJW-8VgD_?KB|rpN_4$PDAD!)phVZ}``wsFuo_9LH2rK++QD2SI>qex_4~7BkLVky}vBwuJ@NEy53)w=xTq7yC?C&7wQi5dA}N6t?xCuTHkL( zY=#%Y;;VgT zi?8;YExz7wmimbuF>Rz!^N&3-FTU4)z35)w;Pnh%zu@%>UZ3Fg2zq~j6SvrDOLuwe zHNM8z>wJx`*ZLY?ulF^+Uh`{wz3$iedhM_A)w4;7?wAd0eEn=#<9n?O=d!%=_i7Jr zLGbeLM8`;b@x9uK+Y-F|dwnx*CGhg^wJziXy!?Bu%j?^)kLZ*RPV{;4z1D?OeqR2) zk=v({DfII1^$w`E^YZWY4!Gyd%fAyHXA!;lUh73&vX_6a_1-pH?``wkj$I0`{JqwT zS`RP(PITlpy!hU^9VaWj{ClnUj@f!~KFTW}ul3^8q?doM^?Id`GnG#1psw4C@3k&e zSbF()kFVFdaDK-rA1}JYa?_d8bmy{OGB~q?7i=802RKQF_jf$!7*I`I;_LlFjjx~8 zYkYmoU*qfjT8*#wYc;;!uhsZ^zgFYxJx`6V_dPYf+V_;`@IYRC@93h&$IHJH9rfv6 ze6R1kWyqJ5+UfPoUcc=1%3iDW+N{@N9o`sgAG|%{h}95D;WPv4N*p`uHNM{8)cAUT zQ{(Hcs>avbRgJH=tQuc$TQ$Dkx@vs&Y*?Z@Dg$bKuXQ;p18V;D^KFf-{Tg4d_cgwH z)-BP!yz6I_l6Squmgst$D$(^eg%?dejkU^c%Q~n;*Xs<>vA^oLF%LIzWppp^`UtJ$ zU2RQR&*E$os><|h=Ner-|JCSfYXUmzE$+tOkc@iow#8S^P%XY%>sx&73{|6hd3V?p zGDaR>25ZGjvpB=xu-Fn^FZU8%?I~-?=ts5WT^)DeZ3pZIqdrrAQ!lVl8OXau^}eje z*ZZ;>U$2)nzFsqHe7$bg_zvwX^+!LG*ZiyF?pFWxu|mziItSbGubyj5?lJRv@x9&w zUewFK*LpD)UjDt-t3O*=YrnpppvCuEuXpa!@1WGu(bq$?_-^@ly#pdRr}gT0I9mC8 z=U)8|N6Ww0dcE4CKf75=N5B8m;(M)EzyH(n@15KAXE$5^y>q+%>}Jcq(|796KGxjh zUV1P8UhC4It!(AvwJ!bH%9ekxb?MJmw)}h7z5272HUIk4k1f8}e)XpxTmHS#3ErA? z`ZxXQ%2qyJ`^9@-Uio5vMrq3w`n8d;OZPt{UGh zAMe^ie}8DLJ&rxDTK?X-%PW1y4p%81+-T;UyBs@QHUHk&0eL;Ie7tj)V+X31zjy9( z>_FB0J4e^C166YG*nz6?z3U3B@VxT(`hKr;@Y1uO)A|!`HNM{8)cAUTQ{(Gxw#L_6ZH=$D+ZtbQxi!Atc58gMeW~Y@l6yzA zP~&^8*RlIr^Y67@R3>@H*NN_xjaM>Wujch>UXP}aMoNp)d&_$fiC~}7Q(K95C6I{) zx>{!tL*1U%p+||Awt4{1`L;enrhFJtTQjfnxEd=t?DF>_8$P!)e%}R8= zFDudY)>NYFtqC`T@}@EUEOk3H9Tj|zdpvRb1?Jeh8=&hovgBQ_ktMobBTIC>M&fK5 zG}5CpCAxl8OLV>5@5bB#d-8bT5?wEu5?!zFCAvN)DACnxSIpLVRMl(O8eJdHq?Ivh z9USpYx;GW8V-NbhK*lS(ZgjOZq3Sr@((I7?ry&)r>+hQOM_PP!)Y0N=Bh?yRKeDCl z)qb+(T|Gb5=<2wjMpyeu)RrJt1r@zFsL}O0Q}V7K)e>Dls>n^^EUaTi4DX)N^>T-Y z{497+k*azh87s3AU+*Vte7Ah`-mK(ZYBZ{_cljyG=eYK~WLyjr98$oHmQJ-(r*)|nDr@6Sqfbu5Cr4>5<}R!d|b z)tRIgU%e7-@zv6A@zr|Y;;S{k#n)YJ)xg5_Mvq zMAQum9-mX=>wQ9vuh*O!UmYj3^4IHT&A(ndYkaj&E7A42*;@M8P51Jz&W^PFd$rSR zyI$Go=c`gPySiyU-b3uaT%D#T^sa|JS2q9~X1&jcKmc`{EK` zZ%-w<-kwTyy*-uadV4C-_4ZVvdv!*?;=xIQdl6%%6|H)9Ez$MXTB7SUvP9QwuS%LU+hLqwBp`DH**NE72V~lO^NT8NC-PC8PIZCA!*+-I>fEJ=x(FU+vdheC?c6qkDPR+bJ@L99g|J zmFRkVD$(^8Ridl?I!ug)&D_-d_h@zr|Y;;ZMV7GG^yExy{eT6}fX z)Z)ACOY8A#?)ACh7T?Le*Rs5N@6~$sj8c!P-lA}V1=+)NjzvGC)cE=trN&q5e2K1~ ztx9ygMV09KnW{wB+iQugx7QL~Z?7e~*IxDWA8uzyO{-_chP-fzulEKuzTRR>d@s7* zAC$c7{XvPY_Xj1q-XE0cdVf%&>-|BA?(hfj1+MzOv`>`r9sZ!?-(j&OzQblqe23MR z_zt@*@g0_1;yY}&#CKS4iLZ`cvExPSa;!INd?)u_%ktWl*Q&fW<+UiUJ?UpdRN7!y z%~N@bTMA2j{cKg^>usvW*IQMMueYljUvF78zFzxle7F9q{e8*3W45aC^|5A+@3mh2 zX^GnV=x;S>@txY^^=w|h=Jje`pQeu%N}KZfONY0_J}vJU)MwUle{9Bg__Gq<;m=BZ zhs~Dw4y!Hk9d=vdJ1n=vci3);@37ty-)+D8`6_d-KgWaFw#4^ZuO}KQ`S)5cD%8B= z>$P6Y0$%>T)_XH_)yuyV9W_~AeDBJ>0e2)jwO@aSQ!5{@_3H0%YWa6cAFm#H@x609 z&f9zW_gWWDPkZ_I&b?mly}Wy)2KAhd`(W@MiQ_)@5?gCe*yq4)Hh6JSeSbe>0q={O z-t{--wCGwt)}m`;kQQA%rnTI)R@(BewbB+{8ymFfS|8A&E9Z?GTfIBkqC0I#o$G3O zcWS#j&(-qo)OK}_tL5FN?dtqi&AIvp@fO`VbEt0*Z+X|o)-Ad=Hf_|4PEB{}xihwN>bO3x!)_)l$Fq(p(e<%ciLRfGN_71?rbO3U zXo;@3(Gp#6r6s!FPD^yXrIzUG`66RGRvIO`b4(p8jgoigm^xM(CGS3sam00*bM-zY z;+GQLIqNw+yCbg4a#8P3)>6k=Yo9)<_aU$W)h>3!9Eo-OZA@9P{>^=@P> z7u;Osvn9Mz>_m6QkGI7YI<4HD@gv?tcgo#4>p92tPVBq+lp8xyq-**4A z`%U%dzay`SKks%AcF%YFeuKaNAMbzj+3rRB?)mP??&9e0x0JrveZBkZ?wj5H-Jb&q zC)w}DDBp-GtCU;4zWe*`b)cRPq235Ytc`h#z6uo@iLBDCiFXw$GRwG)S1AFU=t1QT z@<@=ghF5rW#G;=_B~YW@l<|se&fS>p?@A?5qI!6}C6mCXF;-ZIEA_R0!BnMLHL6Ok zHL6OkHR`9L(W+mjQpvTxy;Fx;o{RRNs&oK8Am3`D(t)}LTqOa#bcAY2?lfw=c8#i% zYmKUsYmKV0EzE_8Gt#>{s@bqb)yNg7h+O!hmPWH$R2{F#t|NL&HxeoI6}-Z0AtKVn zES`j@Moss2cB*;>^se z%DdDOxNVWZ9T6&ODd7pB1v<^b4jxu=h9K>J)Tq?iN+0ueU?4mii`tpz}2DRw$B@r9;Exb z6>|Y}if=^ebO7@!I0eT_UUBjUFB9SJ5S3iP>t_=tw7`X`(N|QYr+Gj*7vP2kNcPTq z@1o+hM%8$2@QU?Y@&~%nLe?Q^mr`F5yQP=3be=Ft^^?iB=)4P1@eXjxYO3U#P~l_X z30$ZeeNFQ#W^crAI$mM1uq|k;5*7SnC9BkdWL-qn$u+&r3MnWhP@<}Mg%3=AP`8dv zo;9QDcugmZ?+NFvlzqioUKzWzsG3;2Mb*UGh+5Km`(D_sPG3{>2H&EKUDEk)L@;=V zSH&yx81P!)Rf`HPVYNC=)4C|d@Jdwhsm%~!rBR}4yg-UDaLWKJRwn_(Eg4nED|8F~ zP{}L2RazJ6@_ESMAbL@*34n>rGc-##yR@jPu|kZUqCuU#Vs9I{2Bo*EF*UufF*R8% z)C{Iv3v5eesV%Ci|7kZTX@Hhj%vR7VRW>_GAfqZrE2C;wuPv{dI0$PC_!aEx=xi04 ze4t_$(B(y-`^XC-JErr2tQ^8xTcxcvrpB&nOpT@j6ICeK+0^+Jpkm)K?dm9H3S?wQ zl{$rt5F%E@?K)l|)07`o$`u*WROO}RwM12ED=?91)OrB)9(7)rWp$&4m=e*YGGYUd zC8|oUjIOa0t&>2Fs^c|{7NS-iuh=EUkJ1KEk*eJrz*-fnVC=stN2*3ujZ}@QnO#y< zNAlP@2>=y78!><`D+*N9cP0O#($yAKGg>XGZnUt*#QX~1s^hgp)x^S(K#8h}g zm3E&oT1vf2RY=eRB`R`4h=_D?TRNYDnE*aZ$1CP*?2aIZr$oi};3j|5aREOs(5XJKuuv#lCc zC4m}MHNVuT9=WP!t(sTWtOYL!4Nz8MAsWPbAnlOoGz+!v_`%+T4i)|y{ty`l9cqd+ zF&8Rl?iy9)v%qEYTgtfrdqdCyWxdSZ7^xCf<6Tmn03sJe@hTliYhs`(HLFF{=m1cW zA5`u;)R;Oxp#iBTS2Y_Ga&W7+*D zL~Gj?9r?%d&J8s>W(D}%WNm76jA6cmLyL|aD)gFnaj4ND6|74fl1cL%(U;!#)9MQU z-RcZ>7ZIJo-Z6)$B~zk%d0%={5hc;KbIBlomGAD*N+#7MV<#%bZ%%Zz+-u&|a<9>s z-W1o4CjZ@zD!dog_*g;c^*!agu!2Z)oOV>xDhew}5ASKGEupJ*rsiF(GeC#T8MSNW z4s9v#1aYFPpMi$yqD;D-qTqLtOV6Q5|V+J z%y)~ZC4+SaVkQso$Y-WFTQ8aR&JnF-Qsj=BO8wlFVpMov-a(@0J)>*ASj)TGi$U(0 z_oa8!GJa{>I-Y5HUwTVwi^_ku=!lizfmw;G9aZd+B7cq9TW?J@y4pw9=xU9u(bXDR zqpLNtMptVjGO5r?-f^PVNW_up{E=QGF>{o6o@jZm(bf8nT|CTtyaPqcJ3K&n7m5}= zovMZ2r0O9hI&={p466@)_6Pa4`i>MW@9kYFYV`Wf6eqe?-_wpgw9+v*AzNSHrJ|Kg zjjo+Nzr8#8jGA-oXhS3QvmWG9qN~RgE9_Lmq4xu*u0Vw9@QyXMRw^~NHVfI_8l&|9ExKAR zY1fdq$T)b<_r7>}cZ_Q0U7H=4-(92hU-LU`l<3%_f&a>P*=W(Bk%;jel_#vsq_(4`&zs3VHW;^Ugq?77e$sjYB^5Xia z7w0p|TWqwRy+v1RdwW-oTJH6oIa>7kmK(L)Yjm~Tk@baNckIz%g;%2Md9Ux*(T*y1 z_wnQK12~N&f^*^{iU+u8#AtBboAI`rUU#zgREjJBzfo+TLBH zMMqw|yu(O~o+4~ml_TP)(bcm@@&(v2NOv2lB?HOlJC2;_S}%s|06Z|Hudj(txd)&l z;!{VdExJ}`fR2h{WW^kuXKX#+*c;DxBPq2JQYq2ZyrV7&c@t!=^ipr{Owvjw?Jx0` zBW(R_gVb?HvaW``n$Z{>3??Vr0c`@<|2OMdi>7i?z`Q--zon$QhXNZPvhG+ z6U|5QclldH-+!7m?GdKx!8C%ecJC&v=fV3=6E82LKkE>&(}OtFfe8E4@`yN7i-;An z77?rBam3_lxNGo<0TZ!+norG$*L~NFd5x(V^%~PL?#O|t#|W7ZHRfFd<~;+Zy61@9 z^_Y6gMAV|^Q*W7AP3Za5TM=%G((|cC)Q^weZ|cWeKic}S*85A1x5R!nRtQ|Pj?NSC zJJ=!1nG-Fd!mFXjh_mwT4x>B*4LK=PXK3Q;8d1}0tY&e-i8-zDUYCe^9u?jU^@O?C z3U8M7-BVsxGuEi_haEEWJvuJPbeF4h#mJ=WIqFDK1&ktM0Vb+y&~xsgDf*6`Zk*;* z={fks>qPJxyp3R-PvlZDPVhs$dQ`6bdGGW*kQgk_BiIUn3`B@k7F*2s)@z+IA-gAkwb3t zwnSok3~^}=f>>;iW9lt)X%2$?u|3YG-ZGcwAjluv<9zC^XlV|D{INaGr&>GpVR zdatP;ZN10T`%8_tT%UuWX0xp|ye9`iB(}#874b$-4uZU~t$D071`2OpBWikGuEsSy zmWX;D72d2T2f;CBTaR^54uVK*YsB?A2;#79@p^I)#9}+gL~YQz90al0&N0{LAc)0w zj=4SuK`gd&%=I}4VzF&8dvXxOVv8IEs~u27@oe|J$}h0S;qg>&fj6<9#BW_p)eArE z{mti5EpfJc9KZMIUv&=0e)A%J@4NWz$44+<1Yh_)#NPX{UdJBkMbLc`^&zPCIEHo> zzlS`_v*-!_`!MRh3=O^j7wBE$9RExCo0KiveY`!IFi?wv3YKeAzAN$Z=qT2l_gxl+o3diBS^eiVB3cG8M{zwu>^#lyXHnHRg5 zM(asP>h&b|@8j?D;JVI+ky}ch|4!QYDl`*SLue0=`TSex-sMzV1nan%ctAxE>Vbfa z-aVNR57id6n9rus|31EY6JI|G)U)_`5Hx@VYgoe}`1>jJ9%Hn;Uw;p3_~LRUHSi00 zoCUw23yx8TwVd+Hpq#ix=Ja}GQt_@e&f7wlQ|34S*AIb?8m|ZO&Exn@yx09h$Y{Tx zY9);xLb4B6@Ry{LEb~$H?8S<2zY2PPn>a>R{Q5MSFZOEoK2B3#)0c-U)vo$7RhhwJ z;9IW8Sp673rQOjkS|s?`q?>7EklnvN?TY2`$p1AqGT`q;Xb}E}w?2-GF?qbO56_~% z%e4`@{3f_SPUoM|KA-eO#}9l0yv2jyA2Y$Lz<(4ppReHYGJ5~|J&%~^%JAuH`aPeKfwl%=$?tMxo=)0%9;375EB24=gV4nD=p$N0a%VA*!#e_Rxy2BT zEqpU@;nh=ZE+Qn-STmCmk!^}2pH7;OS!aYtSPkZB_|@bY$M7h*5uS+gE~JI{Xta-r z9G*`)jPV`eEOGQKY=IGz9`7v11KM{He@1)0G@?J`0jP0ntctPe)hhy`AEM6xo$YZu z#Y~B~?W(oITB=vuv5zw1=+e%8l=k}pA3fT5iYPqeMjEvr@%5?oGd@cD{m{1b=6=dY zzx87-N=y74V~*z$v!2JF6eBPBlAebxA~oa}u&!OZ-XsRw@p$Vv(z+OwSZk!>n>Y5! z*`-1Aad1&uB3tSzv|3yoz^$Wf}X`{^?06>(FAq0 zZGVh-9hoPrPO&mo*yh9NEBJaIBY+mZ6B!h)()uNUH3c}mj6XjGF8+m4MkY$xgVqDD zUCmL-OSG21>+=Y6ZFX7dK5ci=I%W+nnuo2$Me!`MFjrZVkMz zdStfDnT_^}cx-E*TV}@8l=M*m`@r_t$Z*!DmrFZycyaFf`^@$A$Y7-Xn`_f-@ib{i&fwKZTE-x z@_ul;hT4+frT$}XM1$I*?m92B{8wzQ%5M+Z$+GdcwtNusHuRzLEe}1H0x2;&P$Za z8cpNn=YD5Y>JM8Z0A+jJ*%^)LH*^l?(eVS;z@@d`txyE8_z6}ncf%fm%dtu z_Kl93Mmc(F?D^=bDI6R9LsoWeT9sgBf)@>YbK3OAeK~D@-Jf^u$%d2a+H5$#!)cG# zk^MkE4twyZx?Y~)Bg(p*I=@;o(yW}#x7x1DsijjRk78YT_P*ADwc+bmvvV78c(|*6= z4LPj&mKbccBSci~ND);hZ6fwaZafwsb++_SZ1<1J=*&lIY)Osn{xNe?osX%T;+iq1 zr9BEO!_2jA7NDfr?jP4)-$T^><2H&qAD2jt4L9Zew(q98-#56a&V7fQ;+ol& z>rvdbgD4sMTbN7w)(4zJ_gm7a&%}NdvYt41H15+euctMD0&TxWkM?-3JEMqebc3V$ zbB)()eR5l6t#58>`}NUHb*`^&ifiplZi;KhoYr;`xwdqU10E33F6vy@AW?hR&ZRvb4a{Zv^R$nB zvU6+AT()kCYqX1ciptHU_vld*+xFKPp_Ry0bLp=$Lh@aG>n%!`?Z;wi^Qwzmk-c1F zwXL50YI&_hs1xn~&VT=EdA}UX(~UK?itT>8Q_b(B!ge0LpkB*Gt*qyA(eQa}%8cz2 z@5)%Hr}~?IIZ2ka7DvPN$EaG5apK!l~q(1L$8n;C^r1 zDEn&4CE)D7rq5V;;9gGLeS}PGKZiIAg={XWB#}4G)u;WK`)n}X;(`eJO;l3i-u#iA zD(~qX(Hn*C)prw)YTn$^dsmoCESc8m^Zfhm+~p?^`2iQbeSX44>ncCuqC599E=tGz zkc;MWKjq^f-8cTR$0+;#pi63VKk1^h#E&u}f~};NMNlKLEoVF z_Djb#@^k6AC@s;H@j03?KHFobg+_Zy8f^8lBr@A$GNgU--0Hq_M(M2+yeo5adR?aP z#yE2GOFCVLQ+fY8ZgRUn-H~^H;Z1dv)Q5jI4^^bO3Iz3&dF^pb>*&+j!uR6bVbA*_ z`Njy`-h@c0XWcWm!nO0klK$#F)&Ac9Tz%9reM@J6KMOs=Ip97YMSbk^QT)2+9526J zdybditvi9suh!^neM{l8b650aZ2Qkf6X6)gc`n0E-N$~D{b!533wGE0UWI9MxIO+S z-`L#q^+>j6-17k;pAS0rUL;46a%a0_tT?)SX>-Xp#Al0h34M2j5~02&qW^{s;^Evh z@{||TOMh$UoH+_^_8mvLHGnd%Pwds25gwmeNtHUvHtWpjta^yTA)Rv3t98y#CB5?q zt<=BCl=}jmb@gSg(=JL&be^7&vEQ<}nA*pdU-$b9B8>Q)CH*DGJ{xCGd>TXwPFq+a z%W)afFS4y;r%}5gD%*Xrm6}E!d#TaO*i4PB_e+#J+j(rK?!z&yrKd}C@U`h3#<5%RwYO6XS>z9758Iee6XX5 zTa}L421kM|a$nd5fIcG=#~okPEUcg76I#j3dJJR?aLOFF&ExIchwFt8`KK4=Qa?e-TZ$iwH>f_fV;HK#FsYV@ZskNRJEAZss zZtd>vuI+x9)`+-O5dRNe(My=i*6EaCLha!t`7P9)z=JzD!wZMFSG#9l7hH|5S9|ET zvHM!d&gYvRgzff}A5G^w)>thyZ@i}^(pZm)w5A{9Q4Fav)?hoA%xFY#j8ZK*7o{bV z9hWF2$d)t2Xs?LJHq9V<)OLnZQ>bSkHF}<*M)s>IW413MO33qonrCKl;Oo`c?OdS7 z*ZT>|oGmD5{R5HN9`g%%ZGwAOkH?KEcZ3*de68~9E?57%CY)(Lj$N*;);-9X9n*A5 zjV*32!Z(+p&uinqAEs!0?N@VOvKC=U|MW9_DJivoO_Efj&Lyp;aBL*9Pkj{{hV?hr zj}KO9XL`5hS%+@&OXOD4`t??1nbzw#c1P2h{FFODMqxe52nXp~)F$EWp7pfnTHvrZ zjy7wk*6oYa8|@!8>?q^JNRN+dkri0`bV7rB5%z)V0#v<>_qmKX*N!>mR%@Svt8slK zZ^yh3e~0b(d_W||lWecn*-@{173FeNT(qvzc^}>VHsPXI+Kh|FVViPMoZFm>(h{2- zw?*0>TiT$0WliKZju@36IcK}izVy8*;}CTZYOU7uuGeA8p6xN+WqgDE-!hs_CmN9n zzz*gwZRhf1TzjvnAf{%pU8*53B2qv0yPVP4K8&~zD+^YH_Qd&!@Ot;5=C-vqUR=^! zz2Dy_H??1z%SKJ%nCwcw1FcH_1}j;drTW$1z%pq?v{L???J@cCcc_~mQ_WdAm5MjP z-mK2uA3IRM%CM?56^pbvf{l?X_{PQJ45N z`Zuow7Mdq6(|Zc}|O?I97zd#987Sy<)U z(DmmrhO~vJ@$d72k(@_6Ydt(iagBCz_62p&TVJP3s*C~H9@qELNNx$$tT8TX*wrk1N&O>#du*&-R#Am$`|0MUif!M24dOo;PyPcfO8a_t)L@+z>~q zYj23-ckA-0{A$@4y^H#XW53B*hHYA@r+6~eyseD{ON8~Y{G-ScuwrA~y09K|@cwn& z>UnMV)$X_PU*GQj-_Xm;)$d@{s67bSH+V3M)l;C_xGst)6>J+q&Ad z5}o^J4Z^zo#uA;PFGoD}HZ*v7r-)f~>t1F}+mbe{2=abw zlr*rPN6rzaWH6`dC5ux-PbV#fH#m#Og-ReL>`{(T@GGPS0$<=9}@{A7^^TY;5QK8O^gCqO_#HjHKa_ae^OO zvE;j!=Db%}JNRSi$6-4!KPryzhh3zb;Xfuh!wX-8hfMzQ#EhCaO#R`ys2t@)Pp*zT7`Umu9s!%yzh>xk5P5J>&-OtRkR5|b^l zd-!7ypT-=5o&I%sP;$VQoH)jh*gd^uWbi5?@pW@$+cR>Z>>)APuIVAi@7d49;y6;h zvCYRbUN2(xl2%KPhCGW>6Dhql8pv-ra$}x^Ho#WdJ7Th3(@WYJ_E)~|8ZT0H^r){V zh_8O^>I=tQ*%xB2=?lN1?mY>M>iM2y!&ccZVzOPsU)H*HFIiD8Y?b{X=9+%+8;%oJ z&wcR}rN~y|-`Xdsg<0m~7WboW24Xe~cP#Ey3yB^Ph$1$W?bkVuHldU13$=&>CS1)d-x&j;lR7B^DmU@yg%zRiNsnmLbNU1u!S41 zf0+CTcMX>y(6-p_??DgJns0t-8iv>cbx><$#1R^;P9)CHZKPYdxID>RZZ1kov?F_f zb#o5A%)B?-c&znV|I>4f+QB7bLJK=J$9z2#^;@{-{$t`D_kX4Hhp4^!xHw0x{ELt{ zPRC$gPH&NaOnpdo^Zo1J{a$5_1ZYERzWJp^5j-Wk&ORtv-5Bjh>Z=S`Gv1v0p04x| zlz`Gdx0pTs!0HkCrzf2|{vS$zera}?vuo+h6!M4XQ-qIP;!hvN>@_}S)IPT5Ily$1 za5PRPGTV}Z{^r4B=j?F8K;4^T^l+|EO;5dE;;68l&kE`u9P_c98y-#hD#X9*-u}WW zsQ3KfyyrkW#w}+Lwo86jZ?SW}Pd#Gk-n$#qiQxV>Td9%1q%-AXuM1LQzLV+cG~tt| zIKcX7-P#npb16=G6E?cmwlF)V6C%$dSC3c=|HewK2Mec9;9b^#d%64P`!@>Ke@m(1 zRMOi1r#qu?t0eX-dcJRGj`bXz#h9lT7T|N%^RUD}at26Q5S;T`Hv`ZYBU6iW>o~uf zP}cKHEbO0Xa7HCaj+RK0zdJtT5Q+YU?LHZg&OL`{wPWC;(~gmg(vmTxHNfjE`F%={ zExzm5`*cfq|M#Oli@m^o7C8Ng*%4^`5*3XAbNdpt2yv8ZU#R5j13y@G3&OtrQu8_4-N9Qg(sal`$OLaFyx~ zMmQSRGWHEfW7Kh} zO0_k)C@rxz`uOBoQ(k$=chq^de-EG4my;#}_E1OOK|s|ObmhUce~;5YBVIVwPhoce z`KF#+JNpH>Jepqc7@MuqeP%!!nH3QGBuQa?j=8j7N{rw85$0C6Hjbf`*XXQoX319I zvT0h03xA&#xo9EEft{9C=A%_xp^pyiidO2Pvd@ZLw6c}ED9pWpi_#J=!5OiQri2IcN6`QMTt7Z;f5h=}>E6=ubkXN{yYT1w;G(p|8fnET zmyNh0-7(pBTOsy)@s$c4}sTv=dioSZt!^k5(n5dag_;}uhp2u+{8s1L>XFsrtOY77ASzwNycEVmqKd>8u zlO{MhbT-*c+Gjh9{7$N!xQrNmBtNr{Iz~F}Qoxo*RwVn-F;W+kFVM;i``fr59@0uX zySV*drLBl%lB`?5gA)jsv5PpC0i>jQY;~W_Lx;-f2N_CmjvKYpzGmO4Lh{KL`+j_< z7Cs&CWF$&iDlOgdanIDtJ|b87xcluYVzzB9$CAeDptkM>?G~H=u4$CM%q1(_PJs$Z z?AW?!{J6Utqj5c?wDuh_%B5{wkF7v8O(RW6-voLofp-Zw9YUO~@of+bcbw0&-FN7WvkqKUhz)D5ucQNaxv*ka$QbrA@;-QTyPR9plobmgJ>9ekC=~dabQOT)f(MBz& z;-ijgPQ^tV^_+^2c~o>NPUcb5sW?$YRsLv-_RV&Gw%;v7QfFK525e6~?-p!LyWCCK zo_4w0ur>8^H)3nrhjuHr=A+)t*qVB~+p#tM`EJP89Mrod+fz@wDcjR6cU!im&g^vB zjoF@hxm&X}^}L(2HRXD@XKU))M`?^K*p5VJ^)6A`G1HveA$P7ytNr+z$|_Gl&OUcd?DPs2K*>Ro@KB%m+lq(fXKc3Ps62zS9aY4i@C(b%=zN%b z)wR-F@z~>!7(11g2RnQoHGJtzy(W@+91#I3?ayNlE6?cbY7qWD#Q~sl(?p?`QK}e6 zU3@x8gnM(~&3L{7zuhw;oMqU~V{;Yq2zS(*RLO*@)qKy{bLT)mdoz z+lcn@8>nykDgFy1qVr=dZ^wASITD;+LPh#E-1seq{a5O^FWM%zY#G6V zVsDj{*&bdcxHO)W+19@Is#U~RTy3Yplmpuy&1FQKWr-Wj@GssY(Syp_k}a~k>2_z_ zTlPwN7s(tOx3^>eNf$}(qj72P1t{FI_5$O~yY8LR`bUh$r`Dc;!fiVR+%5*0d2GV1 zQ9nrU!NI)(xU(IvKU~J&&tm@z_rl-?MCB?DBde1-&~RTT_L$DXzVcZiH4VrGI|k}? zf0@To=wVxFsnYNHJg7n*>GgII+I-IN)i^T(U^}Os=fthzpnW_f4|i$~FsGi5hkDLE z9Uq*NPs;)3?9+09IsJ4ToZ2E%Ot4k|uZkJA;v>ZrTT(j|b8O2)8_Jtdgd^~CISE!}F z@4S{Wuh3G14}za%Whg7yPeU_V6K<}>U9sSG#Ea1r)_u->O>Zc`qTYtS;JpSXP22#I z-ot{IXyuMM!f8q9^$Y3kTXU%bpSPjreTn?49r`%(7BI(??S2mOt0bu4#kq5zF4sQU zsXg%gTlA;A=lW?_1@5}g)~B?imZKo7NFDZGQ+OS_3-@-{{#Ey1-sX{bbMx>zVm!_L zu*YPH|L;e$JaKvgI)b`>WPDu~fC{B_Mgoy^8H4yJ;uaA`>pl;H(?;-7=vBH~(>Dw4 zqm^Ir^7S#jeDX+oGhVIv^D0urug*AaA4PxclwMpz#D-UeoU>{6{Vog36zUt_F|v9O zjB`B5$0S>W>>$PT@`%3Xy4KETF^A%=em%j(!@K|LH{#pDl zc;PBluKW6hdm7;>8Ox?v)1{yLsli{S8<5j^y|bt=P7*maA3uhcDevyB$Gxoa+`?Y# zy9omiTp6V^gwRsFor4D^O8z;mIdO)@Wqp|< z;vL+kly+*gx9=b7<(%eLId1<-24^1AJqzf)KHh&9`j}5Nx%5LFbIo^dRni%e@O+1s zZxO!?zk+!JGX?$|XW9^zW0t}lM2J~NC^OT`wzia%eVucFnesgTfVytv_w!JD+&UYk zoTo|Mry-=E7%$)cF}Y;TAKMaf>;zWXY?4C*`(oudl;`oA&)+tB-biZ#>Qq#oL}ZQ;Hrcy^qkPB(mD*U#-| zPem)OyOg_@)S$V&spk5U0>%cbDp0G(5%Xi4$E)oHU$nZd=22lIr=D#NS;W@bk$v@CQGEs2*7(+%Z;WsJA;SQ~QdvqF7cpcsi-2 zy28`aUs88?S`PQuC7zC_TuXjR?QZKDPbH7Tb&sdwC|51ctL#q0)3GYNQ*qQ*>79y? zeN}uP4li|;-)ZgQ4-VRqeo{ArwOFIp1)&$nO5p9*^X?QzSN%tXfbF9kp!{(C}0jJ^eaFyn%_`&`G zPS&B?>Tnh3hs<4B<#`%DQC-nifu4?&TqSxs9_lL6Q}L0jOi#nXzADtyak8&U^;F!< zt5{FPNj_UVuW~&VPdG0zpzue~SG z(tF^DCvj4uy#+|uPxvfU4X{!-y)deJzj8i*sI+24vp7$rQ~tQ?0#eZ2v%JNz{dh!p8r`pqhIJ}iJ!H2_DIa7Q%T;-YM!{O=hOmk`;ubPQY&EI^cIyFbi znQXfmjI#pU)6G}pfbG_^6X~)|rybP}m=hx~P;KSgmXFT`$Jm=ePsfHk*$gj^DLaIHzpmnp&ql1^*@|y(8ecMAc&Zg+& zRouO-^P4p-{EIP8E6-EVJllO@d&dmphqGikcxG#k3}M+|plLLa7%i+Q2PXSP3ipmR{J!X$0g9bjTx~fwA$^7{^r0k?NU8 zFd3n;oo7f?Gbkx2xb>~}$R+bOZ0B-ut?dqRFd{dfyeJ>GWyIIs|H(aIcp;bc$Oqe! zpL&KlJrCu|>@@O#bPliFK0KcC%I-A09bfsKf~S2e!&C8rmE$RB*DK3YbHJ77DfpPL zOi#fDSFWd^eRyTN6*qOXdU)k~Dt_iGyidbJ9XGY>n}ePE^B}KVQ@9~w;u*ytv}w1tGU14igJ%X*ZXnq z2e8eav)-l9XD~^TZKjOs4)rLI3fq!eev2YkU^w<-NSkfVgS#S~RM}?g>CFk$pCIDI zU30jRy}l_PwN;3YFJq1KdeiaQxBl2pJJJg8B6^M}aZ9=>dP=L}B!gUy>E{INt~A!= z8aKVNp7+N5e5~=3I`2puVf!>9MrZ)6BHNA9PmvPanhySzZb-%ooHb1{i&fvH z`bfO7j9X<>UTyuofq&cmA?V+aKCj_<$?wwkk#U2E$1IDAz$M>OhuMC)`(w!H{xnAS zyAY01Ep-*qIdYZhcHV2d+o4U!+Cgvow3U76(=dKrOJI$L6QM^(`R(iQxwxyCuN@te zCasg-uGMdTH>Tf2S*PFp>RA7%ExgMa`;$L~|3M6c_u*034sV2*{umDB#C8eZ`g|(9 zF0>_axbBYBy0&3G?fD!BZq~?T$?ww7fEw^o0g+A^;D1?yz)@$rY$QgbL{ekhN?VOT z9}P8yW3pajHP_-i)%7^LKH_z0RefHUU-$S-e!JH1@w+j70I zj#<)S-+pb%bN-s^T)S@Pj5@Y%C(63`gI_Hfan?_#6MWyVTJhp_sh?ujdNi$@Kelk3xN>-&5bGJ6&4?4C+v_&3z={@6vA-m8b_AEsOY-qiXb`0qhzz1v!; zAJb)D9MEdG+9&&C0BX6K^V$mZ=-Ep2*x8C4*ofzGnn91#{2i{F^f~(_)ET9)J@!r34cxGE8Pyc8;?MJibp!10 zgP3<;L`}o9*pXd7hoKg%=akZe;5glrlixh*nM09JMc;oHQ2;zlZ+76jh+m$^-oSbZ zTn@<(I|@CKOY+vJz(9}Jb3NbTTpN1;p{|))i*QW0sh!kuyb$s9y_PQdj^DHWeE0kK zb91th^xx}cR%5~1h}w^J5juyw;j`FXg2(yi6i4^tr3!A;4{@iV$Ew&<%&m_@Lwok$ zh%v5dL|QZM^y2+HP^3yv%$8%G$A3JkIZi86LdIb<`Zz@EkS4n5?blQnwNjH^H0GM_ zqO`;kvgbrhfOUDAuenC8ony(>no;-p&Q>li`>6HIMP;ssE=oA}gi8K>dg`LNPmf)+=6dd;w8Rcd z&zbJh=zF_k-m8z>dguN5Fh=QZ#P$D(8EVPc(jT&Y7&e7m{ddz^=FQMO+v&!P{@DoC z1F0t|%DWZkKCVS>85L(Zzk@r+@9aK(Ki{pZPiB8h8okcj3?N!AYsM7Z;@^_Qcs5 zc`#HCorkx?S{5f8o(!ycfPsum&z>axK{~O18E3KJy~pl&=ed-TmD5~De)XBj3zVbn z(&l~I;G$J(i;Kowo1BzBJdcxR+5E!Cvh$ks4Q!die-vJ;KN|Yez<=|}n| zJRT4p$KP+_@B8uJiO1fP5A5k&UwXaIl6iqteQ{PjaxUGUkF1N*)g$ksJl6~tr6rof zh~jC?*bfFG2-L}pL=SDYtk+s{eB198kbC+ueo)<(>PiA0O#4cGJI) zHTmnfZQ#k2(>;%~^~eal8t|{_MmwDE@2QldS9>>kXY3lLyx=!6D%e4U&+hN@+zxxk z0Z8?X-HWuuVks4$ucs}${8DS{E;^+zaM393*hk~lr19C#os^FG0T=^Ha=&sGshiZ_=xU z>3sRf>L>4yji8LyR8cnlHRVK@+eh#0`E#3xacfM{eU}|)+i+2+ZO28WwJjH&+V)&j zTHEx|QB|tbmbuHoc5d6QzRYdkMQPpKhkNWEbLY6G(9?dI^X$sY(ALpiFknIEk0rfBMq8r`*e7iLNUx^D zs{bjn=k($g@9R})7gke`qVgSD!Mh*74Q^4#-mfDG_3OYGiQr$y*Pll%59SGEv)_ca z;4Y3)%-+*6i~+#cUnU9V`!dIP`Y9@fZVkvg-Q|T{9)(U|4uZ7bPQJBY3vhD!#gt!# zUH0SfmJdbWfc0Z=qrt*^eNRMK(53RE$B4X=k5}{&r*|>K_MHT$#k>p4P4~cz;iNY` z;8z}muERt3ODSRfZSpF~+xKJ5R{_1V#gf>YQf2;$GcxQAmir{Mat#jg*rwZD$8u5` z3;U{&M!$437DhbXN-*MYGalxXvHhQng_q@?#*d<($QF;C)5?6|?zo3C&XRV$7UJ=g zt3qx+gxsDFRsCkUVJ)faXM5_r*FTWI9{bkqr;!V0LD-g_LzSI4Oe=d9KdsvH_~~WO zwSivghQ(WV@8R4bcWyE%@sKLq}lE$_-N%F2Nwz5mzN6x$${iOMm18?HPMX}YanjhgqvNB;9Um8k{X0S~ zYVD4Zk50X#-Vn6f9VuVG<~vqCTJs$(CoM#4TZ~t|Bj)N+yJO~}vu{Vu zORL^-bJ3b-CR~)3WGtA+c@aM4*}xgJd9Ql+5r9-z()S`Q>AyZx!<+%zxnx}9J(rw| z(h|v1vtCWP!4xMg`HmyPwmq@bs||Udq<=pTdii5i{-nEL)9m>9w6ix-n>W{x-af98 zZyulYZrzKpf7}YX27_b7_B{3_v1ijSVIt{dQX*>n3K4rGrOSwt3;G$hc??w6e~8k2 zjJBXykD;65nlaXmEyt2=>jPE2CO+HJN4jVn_n|JTK3%2k*e=l+`d-wsqOxwuH?%^w z*a1Pc*;f-rzsKcCr*Ryd(nF=^#V}7eWBzQaryAu7l^FOLms7scZj>j)4QbyL_iTPQ9M*mUqt=%SJCg3NghRf5lEb|qa%J9TDb2l$T%+YhT|x0u5%pma`&GxW8owYJUD!AfAsM~%!Q*E z^VqIeGuFgmzG%OV>|8N%8~OR@&?jWubCyJ=}DE=ubnQx_NeB3B=+ z{gJJU-r>mCMR9+u>Z0~K=7asQtdHWcSl3V0ZIflJ?CK3}wXb7oC#5#lcF{TL3`Xf5KSxNoiEqE@bDTr}ou z9T%k~Ya!;b^Zc>oD`v3Sen@YlzdoqDX@)M{ODQ=99Go8lT>(&-R&hOMvP zq`-Dbf9c^acQ-yd@BGUfBUCo@-X4J!#iQL{r|N*`Q6119Nh4p3-4f(e|1$mkCib~8 zi;SYCdJyc4oOx$aX`=p*kzcI(X80aBa4dr z=Tm=i<6pYRzki&8`1?Q{>orhvPrr##7`sXGa`#`+GsRtmKSbZ2#-E!5S~2(Ly%8CZ z>Q#R)(h|SVh$H)dzFgE-WlKENE23HxH)Ig8UjvEyyTH4gr1QGar9Vx&gk99JxB4=b zv)ylF6uya{`|&f*_veA~yZHGce!i6=ei>*$*YNoJK*Em}?QF=hI4bDnU*ZQn{#wT4 zSAm4F2I_dP@gCXJ(D%3DdvWGir}@wv=;I&b=RZZf!qYs7pNH{tF6cdMMXTjR)M+_2 z99H@<>jomE2g`v4jY|=TK>A+UDi^)n+>VF|snGj|B1; z!6&Nc$K?pLJo~Ho`!?o<2k(2XktcoQ&4^kwQQAdB6L<+Euk^;HNz89Ts)+KxkEmrF z6_$-?>d*0`nQ`cWV2iLx4Ue!#9e=p}GR+T9Mf_#N=K@67UwSzgKJBY%gpg;#D*-s~ zbv<+dckHZL+tS9_A`|gdXws{QdSOu`*&t$m8!&Fri(dM}Jmx!3*wTPOywd4`@ujLW8uSGoDeIiG^Ax9j^#O<@0v!OS6_9NaF z^%@$4m{k+OmC>8Vu7L=9M*RB!#Lu`s0uhl1Ry>-S4TxpW$9YWarDruGB+3TgZ19Xslv?Tqd-E0@Hl=HIU%eVk!ZS^fe2gqDSj03ZyFJ;9KQ?RV12Ne z9|hj_EGm0)e)6#lF{3I*259KZNa&VK_jWz~SHkRuQwL-45a z-}K!-2BOBUfrybwd@6bkM92|it?{@eqQ(jlOJGz|?-jj9k1;DC3eea!(0~dbu8|`U zF)A2ujrI^R^blcPhcpUQ zkO4?LS6fJw)KboW3yOb<{z0N&$Imtr{VFK^_B~ZU#lw{!Zt21@V2{Y;{%8Db(N|VH zVAe#G>Fy(Z&9+{E6u0vNv`KimKW#FOpqN*Ni1*y%*jBc<>A@ag9~jMTyelYT^x%2m zf&4NBMNrC>Gh0X%6yek1dA8{zDCNqVE&BLhK@sD)&76gCtSfi6=wqUYwG=9Q+!X&1 zbhef1yfVn$N6Z-T;m}v~*DqE28&KVjB77<+!a}y`p0ZE|NNMXdC;?PID++j1PYSyPr$ z#dvNjRrtMaDWM^x2q+s9j*Q$$tX7JY<;AfvR+>Ze@!Y~xo!5&jfw z%T)E}mMSPBYDTtYo2UmAOUe2*C+!HARfTS;CW`N(rw`%>Rfm3x*cG^lpXc#|${Rn$ zM?tBqTHJ=>i=YarqOM^ZirbDOqzZ~#>LA4=-7TyG(!?0DYS^zi zuzW}reg{>>eu{{3ww;kdaoZUgBUn}~yQR9Os##af%<5&f^IITQ{U!yr$8U*rQayBw zq>JXhTPA(9cnhVE0`6kC>Xu3ub?nydzs1r?G2e3Oq>^{&c?+hKT6;^Ti&lAyri%tD zdw-mjZA`84~-H(G?WpK~!<Te6QzwsaSbeV)KYtMwEv zI;|&h(V2T1AEnk4xhR#M%0*+pCv#DodpZ}TC7v)xCRjtmYvKO-fByYTRJo&K`Fx0TyZ6%WmoKsy>Gop0 ze~vNBH=lY?{}4TY9`g?*I@0%JoJ0F?lF8G9Z-aNNgOL}6^zfSco2VHae>3M;pGXyF zTwr0i)fFrHuY!L1PTO1TnL!_Zn)){;OWaADJn607z1_9l4-?1OL3$CEeP{RS`;mIw z6{$U&?!4{qJu)?KLX!A<1SQ3sV{ci#h`!*>j%QI3ic!XBBeOM#PoCLxXVF!}a= z1gz}F>=Jf|p7isO`k;{Odam;N zT+g?lSbI`8#Wi+9kBfLM&15fQeTleZ4GQ&@t$I~KT*WRHu`XiXe;%uqu^5YJbG;g! zD}E{d^N_B%t)$c?H?3ON+|=f}xCO-z*T?B%0M7Yr8K+`(U*?un@+KX&Io3#C%SDH| zDHlGt&-D>5DsIohQDK{NU#=b<%f|Tf!pEq#xwBLB9M+fC@TZ{8EZE`P+bOw1R^d=? zZcD!AxjP^AoZI^;djDPM#>>d?Acc$u=%VC{N@n)&rKBjKISk=+f-^V^W??*nV(m5{k^_O9ONy4Q=eMnjw{ zoTPIu3o3QeMWfVN7mZq{T~zkzyo=V{CY+Q!*#pi9Y?s*7xGj+y+XqoI|KeX&^P^HE zDqGawDQ*cQ9@~`9KzxJU&0LMA<{#%aP_yv6*b^@6YSeVTi#Y;S9^>m|N{H>Hb$oH& zOL5|x+_J^aORh`ROGWQL$T{2jT&vcZWB!719rYKr42bY9Vy%hlkvr3Svbb69amX37 z=(T8HPUn=4;WFA|i)vV$4!|8QsEA707}P1PfA9H^@uv4TQM3Eo@J3%owe8odR>FUZ zFYxUb@q>0vFTRd%KaYx}ujA`A-$AN4!Hzi`do}QM>-+t;xc&Qn{P}Vf?InD2w!nEE z?3-a14YgQHzNH?s{W4xE`8>G97~K~jTn|oCJvB~S;Y?8vSI54|HgFMrMg`rv+zK*N zk0L|!Y#Q73s}ih;eu}=W|MF4DBT?=jNAGzQw_=MpxUSTX{GazkQmOVuBQ)8IS=a4HOy-35$I{we+i0+ zYcD2^qsFYqDbG%=)ivCYhE;FBz0V`7wLBd2>5Mz*+n-H73iB^A--t!Z^}rG&BC>_o z$L`+`9b=y{lIK4Q6pZQXY4#b(CuJ`*awaOHr20uEUqFP-J&1@BV=$8O!rb>@@&RLK zz`)7H&`X}?3FCzY8A5 zBtZn+%Z2E!XI^EW{vN%7cSLkE;vEp*Bi0*>SIHIjjZy^JKicpG*d^?nL&*tdW7J8Y zg`|68AYxriQRIjXTttsXbPqmpWR!o1{yz-Q_;}Et{4=y=L=NLKJjZ0s{LHm1sKaK; zx$JDw+L2VbqGemo_@g1TKnMKFZq)DKoa2N*hu5`?W9P@KLQJ5*M8uu{bHD z=*&lf5s!}oR^}-pa?xPKiEzz(-SigF0k&r*{$yu~GFCwct&JFe&u?`+b zLVg`s3M6Ek$8qKsvNmhn%6y3>P-FNlbz8s2B!ybjU0$H1mMp4{F}$)e&k6`>Q9^RC0BN8o8~$(Br4-3^i`9-cTdA)g5a5w)#Vj zKd%l^Q^?gLYUD%JC2Bfl^@$pLUY(++pnj={rDR-ElU(yHK zM_NT=?S#s&C10}lY{&T}(i-aHs}NG9H(@*P&!}fSL}^KXIZp6WFS^DD)hw5jZsYAc zWW7*PI#NB6&a2~oPSliP?--}cb?E5n`aBq=jK~N(f_U*W&D3MnF6mqXs<%@vBi(#B2qTc>3u@X}Cd4X9=f@ckpv+Rx204oldjwHyH#{(OX7l-5`Us^E}S zSn>_EpY6OyTX=zB!O9&U6i1ad)t$#y<+DqTE8 zag7`}!iYc+dEhnmoX^G`2XDfI_r*2jZ!Upe-6i6h9`c)CNBUaaL~$)f^Ff^3Lw*4J zb1B=IPEaqsDS#4f+h50d^v+32l)i)Q8hxilznez-^{+m9_nRH%%9e6vJJ;L~$w%uv z7yHZB*Kbl_yF`P>`$=kS_5C9<+hhHrC7#DDikNBXjtaj^IcCILxc~cUmn9>k&{MBV zOG)2jJ^L_rO8fH?IH~_E=A2O+WI-6ATkynVNWP7F?t+YhPb}}evI7j8fm=GIP;9VUeaw?W4eeNb8w?n+N(sks+8)GJDQbo! z`$q$L6VxiCxLkz;?$W6O@HpZbvUlp|En}QbKCZk6uy>76VscD09AdW=_iFW{r5ymw z1E?|Rz4wV$_GXITfdgHcSMc;oFx}AmE^x6^dKP+uy8={PW49UJAz?H3lq5TDh3VYP z+ey9(w4Xw@4})W59(D5qHR&uQixn8|d>_r%XH1R%p z;P2xHS>=~8&Ut* zkIRLe>f}SjHFDxNV~=f8*8&-+_oHMPp^`i>$eV{~WiVz8F@8Y`n%wajGBZ!_KOBh7=Xt_Ubv?-G9!oXEB?%bGn=M z)pNd|gZ(jqo9;Yja8q0pQ*cEJTTR)`HD6QL*`jB752LlM(I{yy!%@VZUi#y%L-KQL z%v`>1ifiOg9i<0lhIFR5YjQO60Lw#Lav?RX88i!Lo?UyYQXW?e(&qnB#;${MTfX!+;k zoEK%?x2t!@Yq(rg57&6PDEhJvTyL;lk}=46JG{2_aM1|`;Z`j(~bdF$f(D_E5~cIWk5h>Uhf_6{2p;wsyOehOFo;< z1w(p3NaKpyQDzl;`j+dtP%n%V0DUJu&f+`Vs*g&Gcfr%c;1RVj@K8N`)irq852U)0 z9>lhqJIBR6bsIQqp8DEX<%92Y-5dUO84(90*weQ(5A@51SEojyn>7$sv zgpS}mR9QEwbkcd zPS@z3Z&trbn_bqrU)x>O_F03A)>3Qn(VttBkJd74^U-gu(M4&AwQ_dBeposqn{wf4 zj_A895;vH=d+&Ms=N60-ZeHLSFTUzDb}wry`z<^nW!#^Q$TNkCe>>J#pB=zrLByjlPWk>d(p(k1cZ6KLy^1-zL^vi+cSx5ql)2vww0y z-DP`dEL7Hch;BJnZd&uPbW>b2*0e5EAYfNJ?VhDrWDO1_%@%uJ$o@YL+?SK*L8R3m zB@vf7IpD+A&Vp;y1NH1>5|b@%D8;@Lb|%L;#l7L@?{6JB4?f3lOe3GrUhoKyKF()} zrj+Jf?|+^??2x9rt^AlJxM|mx;-+@klH8O_+t5{P(vCR`vMs5Vmgw%~+)~{X*UT)O zq4s5^*5FX;Z0Ef`g@0o2xT7}GBle6~<8uQMImaq8*2HA19|t0j$WkBsk}Fy}+dNuo zt2;z--)L?{dp_D*QSQ+OS|@jXdN4Q&Y>D-&b^hDIo6;$(L+y6fnMZ3YJ^j@>IqLL~ z{CC^xwSW}a`n?q?v0dX~bp0oFw!VIo0^241r5DETUB206$(OCh;Ct3wr+8jb`Q1{P zORqPwT+_!=O2`i2ByTz^g)C>^n>rjNygrLJnNlSIPIj*0jPfDcns3Wq<*{3XKMh?3ElR8^pze$1ZlKzhOlhoLj=RZbAl$dPcVYT&$ zL}m*L<@pgu57N!i?fDWtCC(&=&Y$Qh<{U_UK1EOI*!dMbwX4pz=qVmQ|DvZ?*5Zt3 z5@}6rkDZ^<_eEO+%RaEpna?uYr0+$$m#wC7Y<6H&Q+kkVyBo2a^V#lB{QXJ%;AUfF zgs(;WGHx|qcD{#F{NwJ=yKi>i?tZ)bGXC7(eZ44RSmRbT!oejil7 zh#$1aq|in#U&ptf@BR|JezEF1oYKQ>b=VI?Jr?!?FQZ3?hir#en@N0SI2&r zqx@a$DdwF(W)#k1ro|bzV+fQGTh5)>sfBg)oF)VY?i5FbFxCe>)xkLBhc!XpJ<^as zqK`ManD6M}1NXk9U101p<2?=R6`=-teRl1;urb6|xYeom>&(wZj1BsTwNeinGy`W^ zpM@U7{?}t9uYr+DHw9y_ytjAh&Tx#{7*Eeb#y#VVK4AZx^&;z~(IVtKt@^Q0vzbl^ z_t;^Mgn`mOHZ2Zbn8U5GcwY^+@uov}(%krT`Z{Y5{_F8H4k-@p8A*8QG>++Yb-ZGi5!>%}V-7(2X$ z^(OoZJR~aM(pX?->Xkok1AaL5_to@X0jgXcgy-w`!gxoc#jIoJwYju;K42}vk{OzF zJ@qKnKJ{hYU=Ofdj2bi(;}45o@2MC!jB6s1phxcpI5`vxrBh7(J?x!X*e`fZjG0E> zh^@{Cq=frT`fZ4PdK(tsXW2x6G*BTs7NuphcFncc5{b0LBKf=H88{+wb;S0tosO<0 z4^wQTTo)zUxeNa)Kj5NEKjFgP??+tJ=x1E`wIA|Pp`UV5*zd=D)LK91qO`;hGBTO3 z2bX+JPsEmzgnt;R=LSx>570Xkq}VpSn3qmGzZ{x*^NVghtAH$}Rs(wbw`Pnu(zqE5 z$GQnDK4Gw$37Hd=FGhW7+sm4EJt0YClo}N;2DYK39m^~=ufGyskLlzgelOc|R z6p=GPmI%=%->vJ9dP#|GNfDU|j2wB;QUUsl_kDm?V4eCT{$Qn&-m!yypNGz1MV-!* z=s3tVlf8CDiLp)lp2))_zm#6!)N_uQ5w|lyO3*VL2q#rF+7suHF8p6n=-G+DM6Z=*DQ&hMhHH2 zD?2VJA~rhz*C;x?MoBGsjr_mp^f6;%j)iZ+%#?1YR#`E2t^XdV(6ICxC}=qJZChJ{ z4j|sb-zgWE?n}h(2XcAvu{}9xYIxgTOnw8F1nhrCtM`OhyYLL(w$h~D-JB!}4QwMO z^?XH&TltSnuZ{$L{qjW{xi3E71Z3=SrTpI(ky@GwG5dm=-n!m}-6Gc5F7^aP_zhH> zZXIj$kZ^sYv zgZtO{Tk)_y53n6)DL-{}o?vT^K9o3@Hczq@Z~L+t$1@nG ziB_^Y2dgLSN2XhQ%U#JODD9dxznRp($9)!&4OURtzaBwB?8x&OPb0g7D&H|Yob*7h zaRjB+f!=TTGiqo%&v;yb}7V*ZAW%ed_zNPRL9J;Wwys+WmY^Kx|t9E`@^`69(U8<+}(=5Z^zHrE%j-K4|^u* zePq~w%8;(Vzx|hV<2`Q5|KGd6#^1Q(|NhVHzI^OAp+?}CwsKF2Eg}Hy@IDHiT)V53 zq6heso*V7(HR7E0blFc<2lXJpUeXB*R8;irWTsi;`{;eT;i#XRR0YX9)YjE8p=#)2 z_i4O-(NBL~Q@vIbmS~(Omq6WB&ndNTJJ6HvtC8onp{Fpn9W{-^wxy=D&wkW2=Jutg zaLoP~pHMTdO)D8RRD1>uIU*y2AdtSWZw`*U#;dkras^M41q|4u-cSEKa)iJ4N za@l>Dj568QuLv!#ZRPiDkMW(g0#&-0i7(&Rb7Iwp{dQD-VP;*v@B;(f$#c z?Y`MTPXj(+-|XR`gxNc-zxDY0W*9{)qgoZ{5TRWazb z-QDo5pT!U6#$VdH1(93-OP!pvKE~D0lDUZfo$ax27^&^kIb>w;)*H^br4tUvkSJBQ zxRtpli;BqM`uiw%?TZuOc;tjmx(z$s*}p#G+&7=1H*2KEH4@HY_T1uMu94C$&8XJu z>C3)(Xnk*5f4UZ7iR`Gun$^ayD+T(E-%RU{Tf1LQ6i-b;q{kO0mS`Y-9^3J(3z6ol zoV6B2q&58*U(Jwe)}{1cbIFXxaEB<(CFi2FM6%Q@#Kx$^T$TxC-`MhN&Jt;@gHz|Y z%@vs;`2X~lbx&?=KOO9Eq#bq@CSH_83^#JdJj+7c8obf=yB;IG3q`o==^x&~*W(cwt*5qJN`1YbF?iJo=T@*K3sb0TvoQ7Dw$n^e zS$CSLudd?FIQ#7L@9FbH9O1l=a^$@AKtw zbE)O~hY_KD8(E;O)hcr>aV~i;W`dU~sVK3&CsdcaAACG-`j-1@?8dlbi9g>G4bHkY zWY^MPLbg-B?kV5sTebRjeGf%pUb$~AuS93L8*HVth7a3!ed@ow9knB8 zw|djZ5ayhB;Vfsyg>=fyN$M{`b3Iw*C5n$1p?ROiiOv>JWuI0Br6redxBMUCvP*6f zYdQ-qq~$ERkk+gP?96_Z=u^Iy46D6Ac-?q6(7R}`TB4LMN2g!Wc7tl9#O=QH+}dpv zx_DlltaWv5_vI?&k!2 zi$z=V`?hPxt9b2_rTw{Zp)BXhg|udE5PzDUbg#?K)vhulU@xM!)hCgU_I~&;YXg7n zl-|hqSkn7lRJ3^?rz@WhYi&Ed*Aa(d#bw*lw^P2^?mvhVYIEOCoR1hEQL(MkBg$%X z6^v&M9{S^1L^b{SSw%hl`B_Fi!jq88=F;-4r5<5^ z7E_I|Jgccjn4ZJvEXRp4p7l6U#;EcRF8D%tf?C1(0Nid#b86$ zech53q;J<|1x>$OUz=55ugeC~S69uH{*Ao)`pRynpv%Kjd(}6Rt*>@}4cX$h6tT0b zzR(!eYEP`3==G6~q`=;i9gBQ}wegAl{Gau`P)2;qnLT}N&4*a-_ScRa3j5k=ErnC& zTav^+kiT8s&Rz zq4qXb`Kwsv(M&r1x?3EvqnWduw{1POFDA_9}Cmx5yXPgtbkTjkKnk&vyQg zG;<{)agvv9mf2?MKq!R@JMZ_cpkc*)m(W^dkC&^kts*maGzX1#r84RX~AL zEQOc8+ZI(c^@OkmywvP`=yoI?fZ$|yv-)n5|9bN%< zi1H2&J*}aMCLcZh?KJm2*X{J=zo28b47=7Gj7be9dhH%?UWnukR5Yl%FelQ z4%tCJrgm$7y}55k-6LDtSvTSuJFH#L{vLh}g}HSiH{i2)b(Q>rv*8Wa&vxI%ORRs3 z6YDK>*LZQIF__e{j{$M8_rWj%{GYDH;%{{Jm*X(OG)9?BmfI}!5aoL^~u%wMYU3aQ~oc?$=Unw{0{YZW5$^UW$btm_S zXxE(OUq^ZS>HZR-=Dhk4T~4qc4$*}?cfJU1%S$=DO0+F1d>C!MCuxoJFRAMklk+@K zww=f?>2vN?dvd+R*w28e)zI_ZSsmvk@wNS(*6ORKd>Qgxd#=8m%khM!`lNk5R)rrn zTH@!fI{^N9{cJ=a1>n5%F@} zqdXjqbffM^E8S>^qM2^Y{b;8j?NBtdC+gv7Y0n-UkEZtQ#XQ>D4_oof{X16u@yB?o zUwe&vc#VEUS2VXD2bR&^p4j`*;G05L?5yY9??;pSjU0dYx@dFX-t2qlJ?G$Xw7TEu zk4Lln^O7j>6Z!IJwEZS}aV#4D&(oJ4`fzIdn;8G0(};fkSe{n&!yZpF`eN#|qbKU} zG^8*3^t7Zm@_6U6C-R}wmVWp;jp>KDJgw=8t=a*jrSGZN=chgHkuOh!+=y%9|8;SH z#og6%R<69xsu7E}+PuFE4*8K|T_+!&KT4^wsnuPsF$2^!y5n`DT!@PLd19X8pjx4g zF~&N*BMh}mtm=5CkklTpTjiQPU%XUfQtP@QC;Dplaq45C!cn`Ar@C3b`I^R<57*V| zg`H6ks>rzz*{Q28mk&elIFb?NLz&n0aw6@=pq=CL#+GQ4W5z`(2)hN|ohg=!$6{cFS`@~R%sP?M5YA)IJ z#7i|IwZth^m(4YjF&64u5hI2--YUMS^+h#>pte4SDavX=AeN=NaV~k-$LTRHUrbcI zQ=3`c$`^Orw|zAB1FwvpDdj{oAf+N-og6$gZ+);q&OQR|yoA*elNMy28NT>6wRv|`k9qm#Gm zRilTul75WX)BR1lm?~q`7v|lItZ&n9B{-p`nox}w54Gpft1bPRp1`)#;jNk{VO4oo zN2k>sC9^o%-7F9)hysymOd_KNBSM0TiqzRkj?3{Z)QSNB@2`*H=hQd+a#v{Gf6 z+F}P-15TKY4vW4(Y3Y z=!f-LPyBs-*AMxaKJ3?nxxVa&d|03M#GmThez^PkxF_yhU%RoF`rM7UM&H-zdFgev zS9_UNJP?{%rw$i}TCc_yirR5U>*C#hhO_Ss+pX`#F|~(x%q}i^HM+*6c7Mn18k0v` zYYb|AnoZQb_KEG3|I=elN&d^Yg9S$D11 z`5c`t6}@_NNNQZzOQ~`pog#J8E3E!$t>RO@l{QhkKd#N0YhU_P9Wp3%ua*ed|XN??dE$~9+*;IE$;I^GO+{PYWpV>fHMRI>;X!e^|nC zPLH?qyJptjmGGx-#Y(@tKq+ZBJS36(DRkr&W-nh#BNGVT!aL^KckkBG-BxxBp z&b}6MqotN|qx6xw)N<~zEVZB;agCOgf5UAQ@;|6loo@%Np(0JCR++VD!(9MqJ}$6r zBUN%sF5*{4C$?M7yq}f(oh?49#=o5NR$o;VTV|t&<;!~w`}6FU@8fddMxM)w8*eE` zZp1ZmCfzYc3D)~yTjoT&i9oqf9*Kr~N#r@obc6t@}NtSZe>U*9dH`;SZeyDv+Bn0_c^S$V(Js)Q0 zeat%*vpLad=c;H{&Xj1vTgefpadomFgzt7=?mpXnvHK?4JAu`jO(S~LkU_&$>KWhou~@Tbz_MqNsm8*z>F)k&RvAGKZ?6N=hn5r!&W3rDS2f`y{? zScJkgI>-9ZSk(H~g%H%9vL0k-$veo5;#0no?pDj|H=L%tja~1SSO=Dwh#Aqh`y1mX zQ(Bj?i+i#m1_Q0EjZXYxOGNNIMn=pYdQW!0Y~mKQ-o)?r*u`O8)#?T04G${Rw^QJj z)E53e48Ag3^kv(vr}5qEMQ?VRBpiK=|BVqDuew14xR_YJmkYFn;7 z7KM9qytBuRvUVQ*7<5=y(q-+G&_Vg47reoLdliu&YUpt8wTcvV{V&wbdrzNk1JVe!Di#H+VSn%)3EbQDlms@V4 zEyoXUFT(tI7|irvd0j4htVlHSx$tMS^G7&U(U zlCC_a$~oM(F}0L`4vSUgu`jt%>vC@PYycC>nPds-EdzaIH;%(5^3(jNE3m%Z+XIk)FGg*>(QJ^O$^&=2pp zztFD_`~E~f+|l3Yi8F_->SKlG)) z<;Gt6V{XJX{#u=%CcjLr%PSLowWH1&&*`f;EDW_%ES=*!vG&jTT}X%W0GwEvC*VUk zbZg3oR_y^x*3{;WD9#yNV_4=X_>eefK&q9;;6j?_Ik+&Ec@Qq7{XLIk=OJlRn|Tzz z@yzoqTu9423>VUMc^Xb!@;F@d{X0O(oLbL*Pt?`c@9sod?L4;Znjv;>^E?w5)?Wkb z^F>tKg|^I7@gW_`WAULK%5(9dVKJ6@Fg`rbPFm|q?bUfSuF<)6>l&3>aQ5?XoFl4@ z0%#0s$ut}DczomVMHVy?wXCRnF{)`EkZ)Y%3Av~r&LeW6&GU?0IKDWKBtY#lqegj3 zuCX0LoaQn0!=2_i^+TQJLEQxEu{sKKBTOJk6t5g>C-x=#3;ANCbdS@62BvR zS7*oD$0y2a6M@`+vgh>L!`Ar{b+z*4c&Qw4wI$=*|>p^EcIf=5Q0tw=- z1C_m6?uKzc0!_tjuD0xNq5@xq6n(b)Yqaq9MNlL=hWz34pxUn5bwwNewg*X`A= zShL(6%@Lf(c&JN+&B!yKHXbeY9{OPoS5Eo2lB7H}IOPlRT>4;?hC9KBH^Iket9mIq zr(mnM$e-q`Ez6p@qC(4M$ndtTBx9Q`v^v#YpvXiqo+ue{_S?(QIsUlYL}9>)%!YzbZB0-`+958<`UL( zq-VQh66os1D4{NdQzTeAQN20k8_lWOrM==V&K8>}%2nEN&8U29`FcLxq$F$~w{(`; zxu>{wbfT+v^d00$RwL|7aW(bRJiltxrDs@;aMfy)zWMQkzjD>Tl@?G-j#ul=QzWGQ z?j?JRd^2>tN0R;wRSVjxMGzg~t}MHovwu{0Xt@cTl@s$G{zXt+KNj6+ZuvIjnE2h? z<$DtR#LvU)YpK7ml;X919u)TDHft%IBD<3J-0`0Bg`{0A@qIjRX0L_sBb}@akvGY= z-n`EBlog(tO?IiwaQg~XKiT*G6cou!zl<5m=+k;x9qGLqXl<)XjO}KxN3EwE?UuP1 zbpq8Vw$;bcOD5=tEq%gT=dLU7_~f74`A!XlpTnzWCwjiAXPMc8#n@IQhaCE7m3Ei&CL}`tFk!FG`S>cCxrD`Irp(J#*>F07{y=}fkuZ};W*fP59 z{;|;2%8yk$=i64lOgL(Vqjt$xH{u$;*YR89RXew+RihStwTEn^FQRe?ZQmlh#(B(Q z`|zd~-G_3>;=9lec?2%3xyRtbIp$Hg@RlBj3u*3=xNwenEH1pcN8`frSsv+LwWnAd zc|yqRqwt2{dYupO)0N#atv6=flYvdp*v*vlb2>^7BnQqv=`MmQh3JGInWs zg1$BmqA@-SsechWKF)Ef*1+6Wwf1fQPE)(I5sUdH=CWoTY4^Wn?lL}zB(;xfKgL+G zy3wJ$|6Wn$FL$U@y}X5A#Ps*8ezx;Xkpv_jUA?VeoUM~r^FE{tZT4`Jj?OI3)HIwl zw2yL2)K|J;Y4KZE@*0~g%dWi<5ohuw*3i%h)DqR$UaeQuxYfy=^Qs~7{gfzTj1}*c zV{P6KBW|C4^^|;bwv)g zdl0$fpWn%A^#j$vWnGM|6!F3E2x|6t>L+;)yf~4s$JbOcPKw#jE&0CteasWh?ekr+ zZ&1&cYkBJ|x2&`tcIVIi{?EQb)KE59<&&>Qcw_LL7BaA4KUWH`-K( zxv`cy&W*T62g+X=J-9VrOOLBv)-v91Z{{&}VsiLaoWyLWKHnE^r-%$^yeChaDkx^w z)>xZxtCf5jF`vvn+}^usZKv(j#@IR0Ya(*nX)agbR{piD5xZjlNvH zZp7v4_9M<$zZ-F?9o$&^+QW@@NV~W(m)ggTxJEnGX(dHV)GkkptHvz^wTCh)Tv*yu z?MD$@Xwx%WAI5&hh6`tzgMt6n4{bl=!-vax@3D*!7wSA?#ECOzl=x7NXPme&=NTz3 zoO#BI3+He~iwkYEtG*RI&4_W0<9No53-M4!jT37>6Vj=H4`t-IMwa_+GA+i89Us== zj2lhibnHfJWBu7+CKAKveH!K9Tr+C4NesO>7ZDe}o?(UQ?74cGgzWXuehdX0i z-dZHrlDN#%4N(tb8|)Za6Ya4R^b-7l3N^yoDM5kiP23G`r?Qlr))>y3+4b#?_T|O3 zqNnoY!q}HL7s^r|T}b=#>Oxt{vkU1Id6yog87o5oQwf8zBPTADw#7vVv?2n|BE zGWpcxSCfG|y`^6x!9sL$TNo7_(yDXYUEt>)jH=_7;3l7h2%Z8Ox_Q?UoyLrcqb=>v4sPoAx@BI zO1AH1DHnv2^*;8ifm2)~6DEwac@3FW&qFpw9cyh}2SN zoHu2>5^ zveUYj$^xxjbHY^H%V!gwTHVle$h|Ps_K-$lsjZMk*`LuCD4(c;`h3l@n>vqQ#8TeH z?wjbIb9S6sR9vwe`9Xj>8#u~G= zx?kgw&p~z|QDxn?^q}>MJ}5SpwMRM2n^wVK|L)!8^s=Oy#a=N9ax;ibxcS%1{)m5B zDKRKX=h->ZG5gIOSE9ai>PqjLcKRri!@Py4S@P>C zueYL6_3&1NRpG6Omb{k7v1YKuc*AJEN(^Die@qR*E|cst=L!{K&)E@XB?q-XXBSBx zHc?E~7Olil-!)@B_%_FMj7R*-h(mG_uhW60@unjF*u=bz@MiFI@KS4iEXh+Zo!jSxK%?rV5hJl8s^N>i^TmcPDRK3Auk>)m&{|%#2lJJI=)|~Qt#}Z9-2s8tk6KBB~662O)YdW zNS#X?NrR8?DPX6ZGAG1dd56LqZ&L9mUgPtwVd3_aL}6A+r2N2X45yO5`m7ecmo@$! zQYaL)4?+@%9I&tIf%kauFYu@P_?|`Rw7gB_biPS4q|ob~;EVC|gCZ_(a8>oXDG z^X)o?iPq+okY2SFddaoMH$A0s*e2oR-KQs^OSxgs`GWh7OOKVL(N(M@(UKlj6i2tz zDlvp5t)rFYlmKB(-yHB&+*qPfbCNUM9%JlMad=cMXh+JpVjK`#lM@-MPwZik2s6p zoJ~-<*5d(awDa~~O%K)XR>du6B@1C3TE9+1^~AlI^hycu?~#A~I(*J? zSLEbnk7#KR9i;iH)DiL~h|utI@T=IZu6@B-{gQ6f&uaDeYGnf*b1n?E+}C*(af=>U zH<3xyL+->&wH5Lw+b6%M_H<4??ICGThRikpZIblmT(3}yW2$NBTrd6;IgLh^%{dzG z5i>W`)FqQv65R`P|18yu$_+?Yq)2ntQ=?UCvi*6uz4~k!;dIKBGpcsmC5lsD)?0;? z$`@0+zkhPfu#kR@=1C4|#TB*kzwuP2FIZL1`4pc&_wb5@$r%iLJxx!fQ7emwZ1uYJl{VpJ?9PZd-s_^rv?uI|iSFTLw|(!<6VIK_ zF@Nrp=`}}XWfMB>#mB}Q?e@N zTl##u0&Ailnq9S$q{lH2)!wKu)H-KD7;3l7g1pY^TK3~99s5$^elEH9dP~lEl&0G< zf1;szM8;il0Ab}j^=DzRA4asAoNOwRP!q@FDF|1&fWB-)A$es=C_Q9LSQRoRT2om= zYgf`1hIE^Bw!<>du^z-OlFH1~#_fa0%ITBCAbzr5LFA;~yagyu9o~nm5?dLK3?CqStJ++v4Td8rR&BnNICG|PCdg`-(m6;*OHe-Cw zndzAy3oufiG5w`_3t1~;% z#`;(3`M7yW^ds+gv(`MfqqV9Jos~Ixsfr7pd?}5J)r;?s^mEgQTVl_m1|hjxuR?O1 zBkU8OeHY_Mzs@TkJ*=ZmbmOS7?UQ8v9EV|mBelXei4@0i0E9{VHjY+M$`o3_~ zDk7UF`8`JHUAr2W+8%35SZXU|Uu$bB4;6IPN3G#QFMd@@!5q)AcIOz%8O}54+~L)( z-Z>&Z1o_F1bM847=Tp4P?Hv(b`pd9?oDp&EnpxOR$zYz~WBm(`a}MIMcwTUvBTaL8 zYki10lDu{QJf)}ps<`gOd^{t{*{~w|dtM6>|)Hxb;rEVB?4^>CUJe8VguX;R};k?N}#Xh7q zU(W2I8}epBdaY#q_rSy>A`7|?R$pC9#J>7P*JCpCZm=Qeq1)ee;pJ^#+1gLn<&kJ+ z=B-w9R=V|g{f98|D7c$MuARr?%~5qT30!XPpf4SHg>zi}Jnj)3+C(U7U&q+)e|V<@ zJzbqWDV%xxZpb3Uw)Yrik99sD;T8)ni%peeVwqSM@-DZ~oZII)?dtO)@uLUtx9~;0 zeiT;c)ewQ<%3F`#}}2D=Xk+%-tdJ(|x6fzPRJ1j^F0d^V68KV+`oBjP>y+*Z#~K zWx@YCk8@AoEZi1tpxbnnx&R+aZ^=3&@OfyOXR&^W5wPpxHlW_}^muGY`k3S37!wFs zz>ENUyoK8$lSJ9_mfFMEA2G8vx0Gb5$BMX)nar$XZt?vXeWxW}SoIFhWQP<=ad zq+ee@Q?xH4FNqcGm;-4w2W=vQnJwd4r9}UxwOAcvxGkDsUb&^hO38C|oMnr4o?$y8 zN{127VjXc(khgdyiHZ zVSX|%v;L%4INWBqAM=6TtUWgx5mfKA*4sKn|17*a-<&(WE{%rjY3ZdtF;$}W?3L7i zr>zi1gv0f<#}fbKM9-?H1{lEt&5>6=P6Jl;ZUDaoD+JX+C2b)HA$*D-Hdzw&a}$vWs0 z)gj0HGVCJ$mIs4-u9soqt6yo*44@B@8gv#Ts*N!S#pA!Wx^gh%7W zz`sV15q+vYa~H>uEna#fdnoq{jENa?aBZpsJS*r?Sjg#Z33d;Tk?=y=>#&%-5bSyl z$gxAzygPCVL_2Sksk*9z`(#@DwELZ9W>sZ_mG?YwT=t4O|47bz%P8=%YO{1SyHG~M zJ`d}}JdlfC8vzhf=6-TEk0q4X=+lLHm;HXA@yafv%o{=u35J8{-1jjuD)T-IPFCk~ zi;walh;?vVz-3Fgwd|4R+$BFBtYONZ$9jNMeQQdyAfFB{Wrcc&TnXiukYDV%2cv(r zB_tP5#W5a5EqrxO--jl7Hrcy7MsXH1?pYPi7^7B4MKnX_!dYvDT}|FN&UvjsP{d0u zGAX-Mt-Xbz7TttDqFdCJG9(POBL~kyi(r9ky=dX6o#MgMYOh^5YMl}-47FS2P7?i7 z*bK7Aw?+NbBdxgyK@U%-LXzgR$*jB|l2hY@9-=&I}^t-eQQ~m-Zd?h%9|9TIiI1*=*KY( z-ax6!Sw4o*W1S1TVm->9jnPKdNd;x~QmxK`Ds7u+tJR4^t^E;owX8ZawK(;2j)o{O zu`eeEdJ)^_#Bi<&&57DIk}KJNGo0>`Z9?4Ve3kSISNiPh-B-KMcVF)Qvim;%yQVjD zN-f{YdNZTvA)VaRXrZIgh~C{zR76cJ5&E{QyE%_UQ7tC`ZL)kl<%~qNuE*=>|1tCl z8Bc%S{Sd$Y8q&k=HRB*HYx6ohb^_=xqEoHa=B~r|eD@!*8hVWgZyr|xsC z98h?zIVcT+~s>5 zV`9Cu-JiG)t15r*I-Ex__f+cgtocVlJI~R`FmtSt@ywqU*_thUJ(tK}%bH?mLu|`! z(JoAWOIC`>D$S1hNcx&y|}+y zc%z&bvXSr>*)6dHeq@p){Kao}U)#MAMVx%|!s|$F^+&FgsiaJF78}7X&#@|UXRsw} z==yQAWz=3z7fl3roc4!&q1MVc*Nq_W^gj*j_A+KS_lAx+ejaQ2w-)K4$`Us|y>mdV zuCzK1?x689-^}ADK|k-|+_mD6hj2IM;(6rKILE_&6@B6HF?7S*KHX&hmeT%sw1`J7$0rN9sS4>*=B2ma}vn@7Z^f>F(+?wgz3n{V@*) zyUyQ-+_zQGxSl_F?=L=RRA-zi@7fZhaqx-wSZi(7>v)v;>z-a`PAd^*7yb6!>FfD2 z-aK~n{`jeb8-d zXXKk3lI-N!uhKeXi@U6z(}A<7eCmtbpM^)`im6k5g7ZWVzmxXeI1gyM$9*5moLl%rXbG|Zfvmchr z_m*tr$#-9=Ko{|53Nd8oTxER>VmH*WaOI>s99u>ksTJFE-g;P;?Uh|l zwU-sQ4o}6Z@YuCtbt^GvUbl=Y5i}~b_ruHdX;z`A9npBjgS#7^n4a*|PI1wSPb}I= zXlkdZ#Kk#}=e;S{PDQwCr}#usJvmiEQ#(aXk34gUJ&{78s?{#tvv(4T+7XRNukZ9m zcxtD(?KRRwP1i?^qqPjAygr#cZ4Qs` zq%o;=dr!hqD=k12&=Us~lG=SlEDl*Wo(Q@y)y}brOkTvkiN7a@+2KJ7TkX6Lc~^*7 zf@@zYWVQ1i^wW@Y!NqtWSG^|JV{yLxlnPf@+S9`fA7tC!@et?<1Sk0%n9m18QX z&aJg>3jJ>PY2f7j8^^2&7u%>TNsn(QB;s3GE2riXmMXB1c`Tl+e&Mkj*k0*f(kiOQ z+NlL_$_Cj|=sZ=g_jqd>B{$8u+njqV=eWL!nkL^wZJ!?kv9&VssdLp!0{xcPE>?AkMTJrqlayhyjNNeMVvJAm9!=4YV-Y8d_ZymxKHjnu@ItK z-jpZK)yMCaC3eW0Mx>TC>(hHeQR~;VLQ=~e->g)X>)GRJ2u(L=U0p2``QcampC&m;4olDbx&+Kh;@8zcX}pLfz@ zb~Hw{6@1j}P+_8ip8BR1i%&Lb<;oCkwNBm(L+utGN!#8Ho=_p-lM;?=&F1|wS6$Yu z9d$9p19&@u`&yNAI*#I1_yFU+&xJu;!!-gjTkvU!t$BQ1Zo4D~`DvbP_Hn=Ex?ih3 z7ST?}JadZe$xXOFct*^^gZPst6Cp8(_O-nLCXJ<)HE*HMO+42!t|dL zK+cn0bpp~WS5(Azf((u;tK~sdv}8Z&%pV?DJE*I%fcc zi||ohXSr0t`8jS8;?H<|NvuQ07g-PN&m6iBk4*a<2YpT)y(>U@Qq;Bb>|liepFC>~ z1pHzmIUb(_1lN8_`2gelSRP4Y3@-kYF_!PuV@=udyDXe;+EI1f!6$SE)ge5Zo9b2` zo6Kv)-0^|$9t8Gm+C@7!4}E&iO2aO=FtXM-T=Lk2F)HpE!^>D*o_HyB#htGqP3e2< z)xV6$+Qs0@bBoj=N$GvDi}US6vr4*?e&!-_YUJUOI}-CUR&iE%_2@kAKY7lde06+@ ztZ~2TzDb8dpStlL3kmhC<;H&BcL#i$i|dem&ilxQ)jq_7@occjaEkkEzPJJ>XzZap zdo?UE{db*lu`_Vy{*ZwM?;Ufj`sFUp9goRR?X)Dr^O@! zLuQ?0)e$vv?fIcCKf`yv<2)@CZS1Fq`>MF^p`NEL9?OrmCi?p@Jb7AAt_Y`5E*(Lg za#pX0KUuv=5h-WIfwv}`2HhtnpZeXPp@}!V`x@$ZA~E2EvvcYAeG?vSNK;M|d@mn*vei;+jvE%xSWW?Y1` z6;~D;2(IiSXk!srX&zromCm@m(kuIg8vV|9p{aB4s-kulvypckT(3Y3b3ojrm+--DZbeVMj6p)N7G%^E|L) z#FKbq(c~gmO)J0~URfFAX{EYM+Q~j|lTmu_{^lx`*%N+HGB%`4V~M znv{RrgCoMK?7#eea_*m(`UUE()^{SH6+OGWIo{~)jBn4NQylAJj!n)Vr;_Y!(P{Wx zJ#mxQJrMu$!5jNijF$Soy*NxQ1>T?OH`3G{{bi}AX3<)8XqdeDN|e5m`h4X*h^qd? zcc?(a&9ff{Kg_fLpJVSMe>FrNo`sy^J@r_N`}ltyv`%f+O^sNsv6O zXaDS2A)2$E922HvJUZ7J?Ysj;60X)Aqw#)l;>K zKUK<}XsgZncOp3FTBA_AWfru5u8p6R{FY>}pi~2y0-aiTplP?GPE$gFLbL<&CCrgilc2@eL z+JT2x+yNq6I^R|CtdR?oW+FQ+PjNl^De1G?xNYy?e<&jaX7a8-VC=Y)!rtH;e&*Gf|S4I+a^Jc@d zI5CH3u3C>&WboF`P5RCe*}r!)&beUvv2Tz3(v$iytD?z3a%aZ{Evc~9Nek-Fx> zF^0?lBn!|%-F?3ikKNO6Zp1imbkp;^lTNKu*C_CI+1WUFME(gcG2oMWyhRXHm&}(n zht~y;tCB)`b?o~+vp`Nb)rLHKGSxrx*@0<|GIvhX>sSr%A|C6R3D!c^2q5}GjV8PS z*V;%lY(w7Z_lzm4dpc=Tvw?_}tER%VzME$pZ)3lSwHRN7_25u}jXLqJmF{}ii2VsU zKF<0jiYdH{d2rQPXNSeh3FIyP60KvF$Es88ipbw_ReBvyT0G<69EZKTCzma&1)u{x z^xucQ#^#ZyhHQCkM#htnS5M8!vydUzX$JLnh@uh=_P)ZAHdR@IXM!go($XCAb z{URjUu}--2gCC8gxqP_eb$L7^^2Gwr20p1mRr;h<=o#O?z{7)|o{{|+(%_Q>B;Zw8 zn71(oz541IM~}B2&Vo~~LyK}#fe~reJR=&TC7|{2$IwGmV)w6J)wk|(zRPU+eE$8a zU>-?WR%Jb#SST0f)6h)p_FS1ha8C6|-JT(}wir#`VRLY7OFoG*5tLVghm)^-bBbELkOmy5mD0+CZ9)aPaKn|H z{+q+?bGO8q@ju`5RP3X)LrWW}lYIGb6$tZQ`)&nD%iiWWAoH1OuUHB8Bb76tndreE zVO92DH<0$d@Ja+pkXrwVv5?eGEu7O12#*sY_Vb>TQlY5rA$h`5TOl9Pl2nbVtuLZp z@-{e`1I{@o2g&pDoJK)iN;u>kMNb*3m7UC@8U2t7>e91nIi2EWPF62Ii}@xZ=#2KS7^%U!EqGLTC?7)x&$1bX5Ac+WY54h`ZyRS?YC&bP1b;;zV$14>Yb<)Wi%(dp^XD?|a>kOZOn6h&`if(!5r-yx#EL2($ zMV+`|jEq&%mK{}_H;i-6(Llaa3n8kdHYKsHKE8L)e3QJfi)MGMA@NVGUkeFI?emy9 z-Ydx}cCX@3J;r$%l0p^Go50qILyvLx&{~>FwG|p>^s40*vgf-9`&dC;dR9Hgf;@`- z`|qu?*K;l^xTHCF9Nc*xYqWyqob#C>il_Bt@PC|Z!tM7+nr`zvQ?=4D&lx|@*>=t` zJ4mYMAWc6GJLX%gG57JFos(u8 zZ;x9~`BJ6#9dLH;(@K=y^`bqYbM@cWwW8`Wjun!1N)*L&a(_Qw=tLs=%yVNE~hZW^pQTjA>|y~6q6ccoHOC)cszq~r0kY@WO}l{e;nf>9^ zev0De9joUt`<2vXh1B-avBFc^V|Ik4wt|m}lu(JTf}SKuePk`V-l`BrPu^GXNIZWt z?Dq4-`jv(ItcK@<)+Jx}Nz8>WyI-@BE0F9GXLWC(^%|8j?EB$mE`!59i+B4cCe*6+ zRNxohn~(`_#^P<&1bMdt{t#)Xtvev@k5vH43HIMQ?}4E#FLxeW=0|z+U-6CI8NC_F z%9BHa^i4jM{j$0cAZ*1RCLO$8s~U^js5-GP9CIMc}vt~q1wMILO$M&b448i}o!Q*UDt`Xc2c1)?w>5s9 zVj@F$=FD-eKO>~PA;+sH)vv<~Var@#!>Lqke0RZm$Y?tk?Q`wRKe?MNwl>tG`!URTWb7grin}uePQtq^Jo+t^QtZ56Kgj z+6oEO3Vjrw%qP3w#P15Kb1wZUyawJ@$d}xn?!J#dzl!Qb+zr~c+Z8W_s8)a9b6zSq zr}?0k2;NKEOEVh1xG`_5c3IcqRKKZl9bS*w7B4GiTePJAi39OQAMb!v(ih*!=n>ddd@`U z3=p{_w$HA>WIiI2i?ACKI@mciM>hCMgCqzrM)#~qW>l~|+$9u}ip7~891&J!|K$U4g3ZZ;N5`q>e+*qmeDu$|AL7?v|KX9l zb)WD*-~Gq#s~bZn9>FQ-G>4pbUAD(_wJtSIwe!EvhzR*m7h!Q~&f(@5$Fcu9?7W^1 z9B^tu^Hy*X|;Zls?o5r^kTXEr@}qC3XCtwb(o&5htw z$3LR?UW*`e60ZuA+i%FO8MBZp@62w)-*#SiD)x9^q=#&?t0&%?=K(zszaMlg_1kXF z!s{hCZkSe`wx2q`(<~=6imH9};Qc=Ks89N1*X1v@@x3yuskH~yxlX6{W11(^U$?9N zHur>h(VhEM)Slf|H~G}D-vnaTUd3x*|JP%!Rqp-7S?vL*I>dn^Jhgo^q|ns%(Tze= z+fOSBQEg9sC``5WS)Zb;9hO@C{k-iFmziVg=ZZC?wVAabsfo&auoaZmOSOlxB`P(V zaMWhiP$zSH)(2jWEzE9>Nva^ zPIb1v@F$AL^pG#(MaXJ7S$%2S^9V()Zo6g04W~r(knVRO58Gai5nqnUfzu8NTkRI> zpmXQDVQIK`T$`Cg1yfe{_%ggR`c0M(`KC{XH?}HeRI|+4c~+2MJ#4$rqS+H|wV7W| zEXNzKR}Cd{t<&3ZaE!ADazD1!TRq0BidOVVAyq?a?zDRpqYLvRWgICG&heS;^36A4ODe{yL_IU)+vG#)zd-w;t%S?hzS;>5dJ=_giT2utH|dQ@|)ivCY|^d@*yL37^E+{5RanCX#tw3FtbSEjA- zvXV=Wk#S>?le6@`sC?$-G3M&xJ0ae9_-r3R$VDMzU^yA z83nJ4&vwKgwVd>LqHsb`OWcxECC+pm)>1fXr+DlR*biowO;%^*{Qo9;@*-?``V1Fc z_>j>izYiw9s|D8`j}aE9VtCEOoQMOw8syC5Lkm+a61}b7z9>m&s@31CO{9#jd1D$v zWZm#iF|V|uC;Fg{H*co%+CyiI$a#+2M?a8#@^Co)-c}u2vnKgbtG`#vDN%`6cjv}l7KQq#lN(CE}wc+=X+5UI<055!S8^{e$k)o1?=%5fyld0*yWczkH_ zjml5FgyJ02v(P_p%?Q-2c8-CYJWl%D#`F5hJvB+K2FbUN&?9;7m;N><%oR9uOLp7& zga&DQgFre#98s&kSA|5cMq)Ql-b?19TnrvJZ=djLcE0cDT_?iFFC~8=tCm0Lj84VV zhw+Q3P0vwNnd$z3{jNJQmgwf@o3ogEG2eapO%6IcN_5)ePT2WgN#=-f+W0JT9CCk) zhGl2%v~4%ugJ3`>(Q`EGAo30di0ia z&5#AkjbhfS*ZR71%*Da<Qi^idw#0f;U;xv>M}oMqH&_{^}BAb zwjQe|j&_`BG3-P{cPPth%z>r6frIRd{7D?T$RdfL*LG1CvihJ73! z+fB&Zzxr$|M))Rb=)4GRp1F@?<~V0r)cg?$s!rE#mRoM%bX z``FX}rtxKV^-CV5TK&CRPN+FuE+p4u0I=B+X>q#n{CEVUKVph%voNh$(Q zgqRaM&&!`Zx#i%JA|e&EwPw_^A0;=*b$dxT?wl^+kg=TA1D_1FtjGd<7UT2AvbdSS zty@nwffDml{>&+L0}L6TWXHU1?gi(0BMfqOaCJ<3du~ zW9EgWwqmA6`<1U+aXv!4gY$|C`jS@lnLEkoXZ#q?NsN%F?D{xRF2i?n%3RNxub2_d zIQQ;fL&x<*^=`;kgVrnP=hU>n6?-Wqg_H5T1Ju<4E3^bf6Geb=)kx81jIg#T?;%d*{c0 zdqbn+T&>TTxR2tk^Pn*i@k}e6Ib=>b$JSqj9roR`(WvlS@=gUlOy(dLQvadHiV_Nf^+g_iL?=| zd|6!KQGLz$9`#Vzy?FgvY$xyExGbo~M3(gXK}&kH%7~-8_Ff$J8~9YOVn5(T>bY8K zR|(aE^7WGny3(&|d+4})T zabEL6GyAI`h_{DuZ+hI|XRR)$Js&wh1&s89@$h?&K7DX*ZudmZR|KfKkuOUS>F9|s zfAB2y1~w?~t@fB}ZV#}Rqeh_DGm&)dBQoMKm%>|FQ$4`e6Vbxrv(s~(?96p&W(u(z zp5+T;R=^o_@GwO=tRWfY)hWcICS& zVDy~pc^sf8b`KwNzN#yINzvBMG|!&#ywFy)xn{zWr0VDCI~P6YPWU%5hr)KwB@xBU zEXio518Y8)c}jrh0o@l@7yst?_|o)&)?34Dj!eGW%^|-HP0x*L^1rdX`#Q0Q<=2cX zv;VMzcp%(4PK7r&QI=b_IuWIZm3;NEz7gmZCplg-o<}Msqsdk z=JWF|WCFjq{|-cw`md(<6&F&TnnT}+VGB4xiq=Oa$OOP`qm{pJq5%ysT5F}WJXd+8DM;N>FTj5>=prPLa##3|LOQ=13 zH({x*@T+wCqP_In*d?(BupJfjw{S&qmHd3upc;Rpgy(m)A@&HoCTbIT&yzw-UxcH)h6VaBe>MaDOJVc~z?CUBpglNCT7 z7yF$&F~f)T>Z-@4V%1o%~;Hyi=MVeBdFmc<;V3Yh(CxE7BVK>UPObk<~}v zbCbq9OT_Y;fDiieT!dGx@VzM|pB%S;&WswIb&6XG0=SN6boAG-EI zXx&#sH9_y2B$7qprmUdt*?%x<=SmYQx8l4x7$XusFnhRZs6H8Ro9 zKKCc_+sHj|z5J^6*2CMNTDjY{WUa59AE&0s)BX2mOUa#uPlo+?j|czS+Juv`-yhxU2(LcouezyCME?D2kn7-Tn z*YMx7-S;uh&*Jyr;!izB+GAylt78 zv&Cx@J;v5kvNd~^5-w}})3E-}iz9vNv0AU(2}SMSW7k2ns?Qyk9+IGusjZL%={T~- zsLSVx@$@<)%st?~SLx6Lnt@)sK;g;P^Xx5z(o>eJWjI$o*;%!cEGt7sJ*P%JqWZqm zsc|$&)MKlyRDP=KR*J>~&Wp$y!TNjFhmd-$df|~}$;iB;>KXUpJl^>hyf}v*K~e)O_5!d0tzAk&VM0QZ|@hj5A0!*h40{Ty5K z%OoJND9m{uoZ}UTd-xgi@oAhl$z%1w$zx&2FJTwK>jh}) zH{oyRP298?Xm0La^yrV~LoGTaUvj4QQr3FaA_NZ$j8==>ztz&nbXV)k;J4@Zp51R(Z7C>Gb4R3+Y&! z`^K9X`%UP%ITtE)j=YLdiYutjt%EPlES~h7j|vWn??fqO$@NBR1^xL9^sIH%>s_+f z;gPX}<}}W`f4;ra^(=JrMabBruul%T!ap7noSHbRz&(hu+zU)6(ruk>^3Jk;KOAz* z_HE@1@6hpf!Y|Pxt(Clyrq$cKy-#O)MQ}Av-Hm=d+}-1p+Ee{^#N)ne)&$7Ad>&md zJSq#jZqf^5T%`^@CX|eAKN&LmpNHH(8=@G#7=duJ>WcF2og>5pAks*-gcpa1F}n>a zwtIGZ;-2p2=dA_jsPe{)mvh2k2k_E5N4XdDp1gC)k0lwa|GDv4Mecht`)~JpNAxZ@ zh(GF-Yk$>ct<^ls$xJQc!5+VKL}QmeeXq~UMgK{aMpo&MX=m*_iBhi>dm!@8-3I4u z2+RAX>wIMRdiesSS1V|A`5p>Z_C*(N5^?G0t!R|wxo{IE^+pe=$=3nAmR_UFYGK+X zcztzYVwKZBqaCv}d+-+QUfG5kue>Vs&I`MLAk?0If-rkT9OhcjVU38t=3CO0)aN}< zn@5!v_Z?)-4;~3CzQ;;?6CCU-@wZ+2bV`l2;Z+Ev;$8Tu%vz-!N*dJWJs&bvGHb#e zpBJuTH)n%P^0u&NuP!{bPWvhhwLR>su+)wfK+lHS+}?V@LZzZ#$|^6qDZc8^G(uF{ zW8Di&ZN+-jdjC22#_MPmG}S+~*chU*WIos@TW##`*Q`%#TTtt-YN`f7PG63@^(8tky+r5SI>##wBF*K+AS7V z)_U8GC-p>o!L*rZ{P&^vfBLZYEBHM}l1Ikf`+G5`83}P9JsNBar}w;W_atIGHLWJqmgO{-%V_>m4?YYYx;aBM6U^Q(SK zJ7i_Kw5+;0R*9prsJ$O@#aUKWMziQ@T>4q9G?haJgrT;FEC@?&#awC4pT~+P&aG47 z3fh_%wb%`IKre@Ak#}v@`oH41MxwUItP87hMx+6z{(G&SpL47Sr{v-H#I@Kq&6;diKuCvk10(@$b z&z`)hBF|V@AyxDjWTvW7|jAGsr^Z4BP-r>iAkmqT5RL)uJIZL&(@^j3? zqu|pg!>>PBJOXO)gVZ>&N=5}q`--CTP2ITx9>}(N%P!;Bf_d! zYoa9!N~K0lx;&asXn!0|4Ib|P9PjG<5N`;5ABg$xws-XjpL{%SSL974k2k6nFU-&5 z^`ftCY{b}_W&JDObG;w>)u|&i{`6|u0bhidRC_Kx=k@&O2O(pe9X%alCOt+={sreW zSwo1d?}y=EI8RmPV?Nx2ybXxl=Nx!F56%7Iz7RRP49;*r@t>ndJx(OV6EYUb)WUb? zbq2C^>go&6rD6W_h~9fGj+vU(mD}fIoc}yU9&ZZ#IOw7tI^i&WAM2AfA9=N)$4Hf@ zlXYEtAbuZfM<-V|;o)F2k~ zB5Zw+*+0}5a*a5xfG2&{*t{3ytK1|PblPy!{eUn3Jbj@zJ!CR(rKbkO##d&^&R75Z z=Y8za!uKCHmY}xY@O-TEpZCX}5xv?vfpf(9&+{I<2*02AA;)>>qoKJuLAE(>t)B%x z?^I?sQ4cw|3B>w)>+`vt|6Cb^5pBg{rgG%K5uVyUI!b72$5r8!N};LkroC zrdqN_`syMftL+gV71q41SVKB7q81Rhc(|WN26XMoxq6airsCsvG%{^fqC z_I)1lN^uqa+*8sjBuiSKTe`%8Qcu*x;l&fR+|~Ipd;{cGRm5xeNJ3Ie#QjnH!p3-! zsMW68pfy!_G=_QWI=`Lcj25!iZ(QPFAGkSv7Canpbb03sIp?hX$#CxE9buj(|NHK4 zoG1ULRNu{c$tzuz^=aqy^|LF^B>HNJq`WTPIg7U1d^_YKVn)?5EFq}X>9{B6E)=zB zbZTa1b!6nd($f`+TBqF;hT2o?p6nX^$~>uivEI=y{LLv8gr;^{3G2Z?lf5l*$B$xN z|KS67A2iY?CnuwrccP#4xQ_Z*3#Oj7B5dq|$v~^_`_u}Zg?8h#D(e$gBcao{xt?_} zu|CxZ&fY5$4!;-!x2lK&X7$kOkt|oc10m0{FNYOdKO0x+0PVMa44EK~{n22zKOR>3 z-H;6a4lnkjXfMORe6ecPf42Kur;!2$(tc4(pM*iOig#VH9m^ z>B0Pd-WtxK=ja2qbf`g~-RT+{Q@?3F7g%{{S4A7wAm<)5`>#F6n(tr73~-iD#8`E| z)_kA$1jp0&HQ!D>w6)p?y+Mo-JJm8{c+hAtG6#4+g{-VLI)~1q)^OpJe_Ox5I%Wr} zJ=>^%}@=Mz^*_Xr{Lhi8Vth-*jQUWgg744`XhLK4MR~>pDt9 z-U7{gLPXcvs1n;}UFufpR(ew(aIL;xoL)8#D))^dwQ6J@n^Ga-sCe3Yj~Z;;;c{EokAp(t-l z?O{%Ck0oi;YK5ovFz$2)x9((wtX9Zsmow}}Tr*!;a$w9WEo_<7rSz_~ZW_7T zmflE8wZCF7dA>wQd7 zmZzn5uG6ditz)={bbc@F>uti#_GD@^`t@Yrru4$yw>iB~580%i*wlYHZnJvzWNOp; zVdm|Yxy|c^KedUyFt4|n{d#hhP3_l9wD09->9=dP~;Al2%<7!s)cn zqW47R9t3xZO%wgQjGxF&%v0O;NXRHFTeBjTpWCuK zj`cIMMY1YR?h0!9%DN!Bo9FUZpWD|UBD36JYq>kK z%(EscxKx`nvFx+-Q=Oixu-i-DsMP~lz7=QH^0E%qXyETztpBR4GSOH2 zW9V(=Jf3~HXHJ!v-vny+1A= zAR)Y@aPu5_7Cpg3y!oEIH)_1)%*^!&FM0Kp(8KusCG=Q7`b+4sp7fW{V?F6Dq5D>) zzl0vbzX=IF+>e`((8E2s2?;&ilim`#?_>0r&_nn)A)$x+aT5}HxF5h_0!Uq`W3`J8hg?b ztMJ4H*hSxM?2#vWvZY7=96b7DnB$Bvd7>0+dGyI{-$vAHVfARKgXi%!hX!7YPV5kk zz#MJ6PqK_swDg56^?#PWeDY7!mrrlBFRpmnX%aG`-@LT$_vL1#b-yn+E3Nx|=`XE^ z<0rNL^>JxEjD9oHdb}_FrS*7U`b+EazTAYg9*U*(lh#A%Hzlpd`f?M}daN%uA+5*y zaud?J?=$!HTjw5oqTiIX9^wRjxd~}K)|Z=*)?#k+^AX1TwOyjLdC_ zvBD2O{TwI!`y=q|3*9~5Uln)CBaC=JJ;pGurrSG)aTRVILtR#sG74Hejk=6@^+I{- zYER9bs>+u?){iB>-H2=Eqi!C=ceVR@{8eKXb+yVooaPI>Rf57=)%)mEESdYPZguXsexPhG3JnYXebNJ67JV?*FnH zVKsu!7ZrEZR{M6MtG3p=5>>VPcSoyd;h6QPm(^oP)fQ8HQ+ph@QkM!%ZM`)VWwmIp z7jXllex|CentH638E4I0U-`UR+Rq~5uFB~3v=8lirq%bdqM`#gU;pHO)U*njZgBGg z__VTmeGlf+K1Qsvdg^O~loB_%sxo{1D4qJE-sjS4Q+t)Kr8~saEVz$`%(~yk%yAa5 z?bVyx_fpNamzNcr|Mk5aN#DQiMj3lMoLORJUJV(`)YV2$oc12Lc*`4gyit*K?hG`q zl5u^f_CdV$!mF#7gLXtit<52wQ=rcFgC;s72U>bDe%;;!>h!>g2Se0iJtz2{s*Kd| z$d^ObkL1UJ0{$A+a9dt;c^c@vR)M{1?ZuRaD(tKu%ONVQJz^|xV^qjnTUBPfEpiEv z6xJD3cvm8; H_n&zar2Vy{>G!QtJ65Kda(ipvkbPcbYkLQpniagl&5K-ZbCvHW zw#rJrdPc4YD{}qJz6lx28X)ARoHtO=>TS`es;)UoDcnHEuOG?T@SZtun6~m_>65M1 zi>KHl*+upx`?X!!A+^1^!~RM(bv@@~A-9iTluqG;{_Xa7pD2yKNB(J_D_i#m$557B zb0M9=RY@1scCZ0Y2Mch~ba!)~e{0u*__gFuE2UG$FVtV9N~=K}QLDdKdkR-2RcKb^ ziZ?<|`A(Q>x7upa$y!m{?5=42tI6k_$1ZJjXbxOh%Q6 zhNMqzdgmG6!XItbk)!8NrZCaLY5&?hRQ2&Ny=fKZ)?P2?x0TWw-urCCd^TOuddh6O zu&gM3spOh@@^fS=&GH^aO+P~=Lm{+)sJMl=XX-~jbJ|tT|b6H zQR-@xWfZ#_VH)jJoY09eM!|caxJI;$qF0Y*^uEUUnNHVJSD(_A^njeR= zn4dqKY-JXgP8mMhQ4@_yV%xJtG8f=goQCAfZ>dc=PkJ-FoP1SZwf9kb`!Y{%W6sDa zGa^Y-d(}6`C62w@!&STL^e6C*IRS5fXIJ-2Ik|4xl|PRjTtx5lb%pi&adPa?$t~xq z?ATk+w0RMllgNBSo|p#l{+1KjpN13hn>>$Yw~SYVM&ZxvXW=TTmClj$j`r=8uk^Lr z<<9R!#f3uBt|w z>#}P4^{YklqZWjHU06MatJa}( zPI^~5G<6CH53e2eD9_htXTz9)}EV?Gb>eA{@~H)8EL zQ%^rOv%Il<>-)qFPO`-_b0Tx`u}p2efeO8<;B(G*zAKzn2iy7tcNpPW{Lb5|EqubO ze^@p)?aJ5B*HVYCT@Op2yN$xb;OD*Isyl*_o<@3h@^{wg5$O}oj_$`SuIKqk;XKxF z=G4je+CuG3aFE+)tY##%ofbNYyAqFs64a3Sc4|<1RMM}UJK~M)y?GS9;oecc!K#sN zp_j44%ivk+hIU%qG`on=Js8djh>bF%E&4?=womb4$Xk2w@Way|J`2f6+p~pQuK0Ee z*sZ%8*Wl~mHkNN(HCwp-I{2ISq_*U@q6Zb3Zs!ixspaOTB!@jEd&~3grXuMc9{MiMUeW#6{@P}26h=pXV8Tv$ur!G&~+ zZ?WB{kPo4D8c)J^R9tH7C`DiG6z5Ppr5bOHYgJR$-eW)RRgH3WT&x=H>bO}w+VQwr zHP*4XTQ$z%xLiHTRdKs&v@xz%O}SpHO3$bT!Fv{;2-CP>HJ7fk2*=JOJ{qz`{}6GO zwsR-#2>)lF@NYYb5)ap%MCq%mW?lbAegM%CVu^Km0(WB7oOZ^om8SNpe$S?p7LPUBTeY`-<@lGoPR*F71k*sPCi=~V?Hx7mMgwyo*?VG1u}#a3n<qtK*Z+y1y;wKx?!mb3os&%^c8o|C9Cg={Go}snwn; zYn-QZ6YtFsjlaK2=W)LAWbnk=&lr!zF~L@oWej@GuG;j54J4=eh2nBulxZppcr^KVXTZO4!PuHSR&x_Lq>cA@Fqh-v; z>9q8Sc|#&wq=(oV_F$hvYcE>X`q%?~y&nBC<|FSdwXGrgaEL}*Pq+`(Iv4a}`u(~d zQ1iJpbL4!eeML-)sv_&9`q205djBZa)WyKNmK>qw*K5&J1~S$(+OPGTph6Mzc$m`K zUcHIT6j99jE0uf9N72tI{q_4xZf!mZi5cr)thE7$DXilY)ym(5UVhvlx$|$=A*V-7 zt>^WUGXHiRGBrDX9<&9rhF^FQGeHj0`kq`zYi-Z)C(=`G>C61UW=Y@RkT?7*TFm9xzM+3kzaoBFIrX&{^7pmTZ_&f zj~eamlAEtjENQIoN$!)LZ|uuqD(iYP=7FtW$@6d5B4fq(V{M=s;}uMas*wH!lnoi3TKEk!L`xOO;-v92$38r$^J=zY}B;z+w? z4*4ecxy17J-|AgIgV#}8yRQBoed2v#y)<1b|5NHMypwNP!8uMmFNWA1ww)8-mIxNL z4n{iD_0+PCQnS~eiqK1JM)ql~OrTLy&;Aq?>etgg{WiJC_ZvSRJa%k-3-{+$-qx?i zwEFnlWaG8;;Shy&^L2{WRkj}aJ4D}4`9?8&wRqV1_4;uhqp@zJBNgQiZRyEAjnzSmuHe)4bX$9TNaEV$;Si;^Uai4bzBwz76}!GhnB+*ghia!1zBOC-IL0V< zK8&lS$A>$Y2p7^R@*zBo%Xd$ncZU9xobf$T6B1VU?Z^ z8Q5#>+I1AGN3WjxxbSH?Uq3CEf#kxcgFdU41@>B4sn<+8sX zY&kPHHuvnl^Mo+7GV7!O%T&!(Xgaw1F1S#ylcrQNI?q{Z9f#kQYI>+u&lfZ+=8M&o z97I-;)OIfBt(4JQ96dce*~-|M*4%MKG;Z5&YDqJFi%PzvO=SnR#da-w$|~N{&!wl+ z%EPIS^3DDfWy_4ucV4ab`jnlOWLG<^T!Hw3M8dd>v4)DMsh#G*^YZ+bn<}EI_H2k3 zz6z_hEpjV*YL|C$Tlp`FEgYS?k=LsWIFkoN^SOQBvY-cK!Mdc1`qc>`n)Ol2Nh zr+P#!Gv=w#Ca1@L0L=`-nAoG*_hvuRFbY~uO*M7Fjnv0Wa=Xle- zZZAwl*7Fp9v^Z2~`9nw#R%o4-)acaCM=8YK+!gN4ahWrQzVkYsLG;GG2`=yN?0b!P zddATUbJWHCBuZx_y~e0Blzw>l?!C~6i1bC{jHc)4#xtD0s5hZ`n9bhy49uGttIm*m z&Fb=usUPxq2GtYuCT4Yd+SE@^Oi`z2T)nuRXJ9?idTWXK8JcUX8Q~{`WISVYVa;Qh zXAvLBQ=xu{`=?v|aNXl{MY6RURC~&fUlFy8HhdJPM?}ExHAN%2Q%%m}yNIZA7nJ%6 z&xT0nDLqldTW#J<(JgJ_vb=7$Zl5e#Yw*Pp-F0e{%PXgvMmYTfgdliCswT zs%wcHDE5%?GvbBh^=*r1%%c&X2UY5mXa3+vyFbRy&tmkI8s`|sx3NNq2T=o(Y_~cc zH%IJ+R5^w}2Tz`bS4VA2qHy@g^zBWnJA^KkhU&sMQl3iozf$?3;1`+Mr{mTCfiZ4U6e-Zp4SC>4n ztQuIS6I(2h@&}2iK59BkkSwdkV&DG|ovo!h#?))+jxqIGx{9gSQYD`{W8c+u6|1kM z3$^P0ei6Fu$5=uC6@JC{!&=?fbaf-R8fk9N+_-)5uyj>g;Se9ug;a^oyJc>)Z$htg zb7tyedB?Xz>^pxW{>|X}W|TW1Pr_dHn9C*Bz8B(>p%J$)o`2SS>;CKASG&)5U+(@A zR^iLI3-v|(_kZm^3p??*65nP{|NGq!f%tz)Xp7c=gl*$ZAFBMaE2xvN!w4A<=!KKA z>_>C08S(HhgEjeE%oJ54>*r`n-#0@`6yN+zv3w}s$9(*<`&0avxJvG?zl%Rn=cRs@ zgq9JuMDxwoAvE{cz8!Q?pOHUqfHg4REl~H2lIslXl>`9Xw`ABUivtl zap`;N%j6l~c8ffvwS8vPuZo#TTGcai=oUzYgdd~3m8Xa4Zs+eIy7jz%Vf1>Q$11?5 zs+ql!*7g|_t%_MnTGcbd&A(40@;I&TcNYAd*X7&l@1KgZmFK77Y@d%)aH{9(b(|jF zAI_g%#H?|G=-B1WQ7Tr|A(~rfdP#Hptj=jx&)_eyvan`Z=jm*S=Qz5t$);WhN;aht2?-ihp|){1wE3$5v! z#!4I{Zw6BXaLe6bjaD?y;wCGN$8`5rKS=XSaHW{`&S z7&}!<&USwd>O>1UTZI>Y44#ma%>PNQ)r^EUk+YW3);027oLzqwC)nRa%h}cY_`O9s zGHdM9cunF?@P=K=otVRI()@k&iPut*xeC1iwU_Zbvm+lz%)LTdTezmYNWD!)RSj}Z z&YR{ynqBSvp!8GB02H&1=~>7nYnFN8)E`Z=?zK{_CSj;uGkT4hH*|T0lXwu-9M;ef zu3BoUQePsi(@1#>%?Hu1dm(dFz9rkZk~X^MRY>$$SB079gqQcQYO&DHaHN`Q`Z&}} z#_wS+AI6{ZHP2cs-u03{R81Ls@N@8on-R!jEq!dky$_c1GIk668}-mWOABJiGKVRey{lzo-}$tT>Rdb-Q@Ur=q%>3T5BDb zBy1Rn_@6IAhxXzuaw|W&(iR*aDTrupEM&h?@**9W>=*isGw<~Do5{Qhm;Pj*5oQf6 zxY~=SBVu0-R6D-!V};{|;lutC+&kNS{SP05ePQM7A%)mQEcV;5(1)yL-Z7~}r3T~F z&m_vo9+Led>z|6F)t>Fy2flq2eigQuG4o3DN5lTWBQM7=Yv=G3e_zooUOjmc5fMe~ zxVh1m$wj`=mRDk@-?zPt#eO!|2@!_2J>8r}TTkjZ&bBBP*=QFZbmO6Te*4qg#Rf#ip< z9`uTD`tr<73ZoG(0>FIeUk%TJh?2tGwt)a*HK~>-04?U~s zmuf_iS$rP-OFfWML}oEO&~3W57{fUC`&gNwNRCPkSLW2GFE4Ii`E%-efx3PUQtP9= zUPr9QL4W$y!qsy%xcXnl+5h*V33hjKZ`_b$yV^p z<@9~is@^LDyn+{GEppchjjdei3c3#?KPs#5SBx61jEr&rhRmQEnq$W1_7S6bGjl1e zA3LTRiAu_h8*8#9!ZzB}k!D>V$DJtN03IMG1aHG7$7`r&y}TO9{W*LrvgB&GOKy0|Mn&(~!$Xa@Ff)ZWT(kQ*4<2&!W&3J95AQT@ z$KD$<#kW(;91qS&ww#iVD^%~Hq)aAt?O2k#Th3sXIO#uLh9t0RYb2jrfp=r?o2#l~ z-tx>5X+;y%P|O={WNdMQTS0TweihVM=ZrgV$849>ylGRVJErHZ6{F3_!Q(-LkT+LH zQ#nd`yRU{W7U+ISA78{ec@&xw-I+dFMGwb}?W*}sid)WXN8KY@y#DlPSbf{_38s{} zBgq@KHQG4ud)Lg!_RQ$%II%aGIZl2Dw?oHD2sQovIp}9ii3)nWij}dU8vRY3gUn8% z7DSCdL|uu?ke-J{JB{gW&h7e}-Cyptc?|$L#~#+jT3-(%e^;W_mU++>G6dD!160j7 z-URn-lCRw7tLdjQL2Ib(S8l4HIo>g6Me%*i3hS}bGev_|c^9H3YgRMv zv|m-8_!!-(-ZSTpoV_`IK6}2JH3?nnL?B7ja;epE>>qMx^;40YVkIBOn@^RyowqUi z%-pMxskbpRxihahADEwKw_3Jk_C_nEf1a$2KG_nZAE*;%*Gc~rDNmq#AlQf`?7yR-yeN? zg5Mu~JmK$;jh~EHlNkX0@uwLA{n3>{a1+QE;rI1l^_&OXgtj@0x6Roh_vf@S82Tfl zdG|9MZlVulK->iGm?6<0eLsVu556)idY~Q3!03TH&Cu}UEi*WJV2&9cJG1 zW{C8}>|=rOYmR4_^cnAU87RGav7e#R4^~tAPRYPjeag)6J>?74K2z(-4JOK$vD}Kgx=QMid z)VB4mdcCQiSJBqIihsBJ zuEtfG-Q^=C=3GB}(e|~Y7wedY^P=wSD>s%jkQZlPpLnsR`oV=HJ$cxZa-;TIaT;Zx zH7B&*t4_Fm*4>EaUU@?5v-X76XZ6i(w`;|nvZq%Rfa*6`X_z5E!rT~fK^pbO~~{%+^1?gXmEs%GBUSPf+B2t`{h z^#|*2Z;Gngdyy|ybAM74)jDq|3PbH2YrlF$MYg<<)Xotrt%)$y-Vbd%R`#rxulide z>bKelLv^iur@g-S)H$rD>bX}Uj;O^y#=9EpjCw`_f@2lPzn@m`bc|qH%d?t(ha*M=F4Z*v@0{m#SyjBll`-p zWil$+cRJ>~HYZ&pP&=i57P-CnPSg|5le+3TnO5AarajGWcW71U*JiV84%9v$syk6d zvzj-KjB)i^ZFPRQ=&G&D84^XcqFkN(E&6KN@mAdu5KXnv)~$zn`Kiq`R-e_9SDg7K zZ(|Jgy|2w7Rma7d)ImyRc54* zvb3B7p)$i#d{@gUAzn#sR->q^C3`w+sZy`AdZkrn@2SUXt8?c>SMB&}TQygpJde~( zYc|*8@wPDczMl@>`o)LWx?X;GH=`c!K~!p}m!`8A!Sm=l-Zc7^7eG8Hii3D(CF2!o zDHO9fAI3L*)ueuQ@)eE5W%uS-`MEoM8}sGZlTZ_~$K8AB0=3gu?x;tcud5Re&)1{s z=W4p+R$Rg%^Zs34#%-oMN$=_!cOC5SAI1z(pVt$4?ej{(AS-?(_Hx8eeYs@+|Pb2^xReeH*lXh(E0v!+tN$c7F_N|2rss6YUl{pU3y# z1y{b^{YU)XM(Nvu&Uewb?}AhRy^+&r!S!!OU2JZFTNS-wXEqwn+CiJ;VLbDC+zT(u=uCFY%YDvS1-bT zpYJ}3_Eof4!H>c>C$fLsPF&T?xlYH=|7nOHd>kv}H$nZ=-EVil3#>oHpMD>I`d$3# zKnr|9c+)?sxHb_v8P68sB~#Km8_tdKkZXhmG@?XR9RQsvcj(*Rsa@Al`X zn)-4Msu8Z5qtn;NN3lxp#Ji4nV#T)ll9D^lkco8u9GaMWjjW%11MYNPELv@TrM_;l zVET5g1=DxytcSk3Y8Lfxw9?Q;ybDJhnaU2QeEa{|d$%n+ZX;W?p3hhG4^YFRZq}n) zmb-n9-R-u$xA%Ff!Qx%4Dwe8vlfHh(%-|G|iOf|i5i_u4S%ZTDNma%S0D-t60MKr* zoqIt&6b^k;A6M8I3N1N*25ld$u+XU zs0D#-Mots?;ZkLZ{Mp`K6w2ff5g+d3blb0>$UZA*-P#C7UePQwf!O{>8^wOEpn-f> z7FiEu&EX|sZu)8Zz44e=zD3eIqu9;Dz7YC}oRflPeAQ6i_DDZh5qg1K^uCo&M7Ffd zq}D%Iq)19-gzI%4r>`4za=d5s`3>y^xPeSU+WoFW{7Vl-S`D`C%tXB!^3Ik>Z10=7 zZ2Wd+vk~{qlWxX2-fZu;W2$i=B3tTJ>?q9bTs~s4gZWsrlle%+j%HyUva?y3tsTxn zI%KD_F!S^d*BRSm^bPGh)+YArYoGR*@2TC{Lc2USj|@Ify`R@_iMO>zuXk1`7OfDc z#Z@1ngc)F7DjHGLw9Y5r4fUN7pPMo=k9_)P_SxjQ`-xCUP4VISX>vyWJxWxId#!ly ztQ2~+G^54^Cn&H}$@#*{>F4{9ChFzA)ik}7_GkiX6#92R=ZHbAtEdIpcbDjTB}P{D zHg(mCY!NcnXp2%-+59$!UU@x)kh_kFtXq4exMvOW8(kDZiu5hmo;GgWTE8maZL8OZ z*Y|kz<@z7T!6E%Hj*Bh*aS9HO(=XYawfZL;agTn|=_TqRw%hTCs{ce{TcU|yvl@G3 zfaAxO(FOf!cuJXbR(oah_$V7SOIvd!*^bc7#A2JRiHXCu;EmP0#Akabe#?@lJRQ$o zu{~y8Gw$>3-D}$``ply5W4@%9#ny9B__{xyrdEzqfu4v_!QEG{&%O&%X=l4PI@&WQ zoK`GYe&IQL|9;q>*#N_82LQ zOCQnL>ib8^Y!CH|o(23xo@+ex^VE}UADgxRV0%WWEk-5SeqWIko*4)IU!#(t|7-L< z_O(|8;lw7++U}?I%yzO+*~r@{QJsN$x}Kd1uUtfJu)nGjyYH1!fg0w$zAmco*dv?( z-cRA}Bvr8YUg_F19;oHKv+)H7sCXIbZw7k(7|FduG3|KkW7GcsqtN(@)n=7(@Qb3qa9cqY8(oVzJ{4 z+W0SO#``&aJ?)P7$Ti0hXR|P3S7GTUMIW;ttiP`(s+jEvhqxOGXR2Hu@Ycg#>5H#H z_Vbvv$e)c{eo~@cW_xIbW_eXh&61bpy>EqQ_3)4e$ih6VA+k`52FXL%(lA+wTN)?} zZ(T!Wp|u(;3+WgQM{i^4gZB9wIFov3zhEKzts=gLx8`z(6Ry410(!u2?LsaGI~7rD z_H45Ge$EBnO3XI4qu$@w7az~Fq6+fqWV@f5XMpookrBjs9*=CE8%@83c_>O+b$4m~Xj;*|vG-UY+-J zirJxS`fW;~f!gCACVQ;Nm_TcIo&@(r_x5l(-u;wXdFth9&z$E)@vRhPt&buhd8@r( ztV>~5-{w-9h0;oK7E&waSxCpM0!ACGAnenZWfzNdaI+30hklK)MwgLcKh@hQ_8_N` zyd!C_vIrUMbIWDkgtD16Ron@McQN{^mT1ok#a1Wv>e!kl_TO=;0sH$G_8V&!w^sJg z(VRLf8s11Ka<5mx9nWzgyVF-w)y`4Tvqt|LEWCnZMc%=w)qM`TN>|yMlmeNnwQz$sY9wtt*@m2~*O(d==T=Lag z-^Ew(>MsRpDB(pQmpz>Y^lAXj=qc-Ji^lf-uwQEgneFs;&0jAzEi*R#U@On-HvR6= z^J>2$E52&uh%BU{3;YDbJkA(_I*&v05!*Or1m<=eGX}kmbMg^O95e>YH8HJ3K4yu0 z@-$CbnFVb<&Kl9*Wi?_xrpIX`aK=^GlsR3G^YVLJY-WiA^HDuc%*Sct$Pt*>->>7$ ze9Sry9f4NIsrh(q96JKDj&t+z7zgJg&Ew<|cqNX`M=No5KGs$oo{zkY(?=j8>KWnP z;AHAD&d={($phqL9m*5rq0jROSvWthJ;(Tt`#kILZdX5D)IC}H^=68)U-V>=5e?0v8WN&?mtjt zWa{UoOzZXK65&v725U0vU7ZKh%Y2`ibo|DCBXffse&1`)eXr`V2mNoy^}NSN)V=TV z@jIM<{5ih~du=f(W)io@p|6G5gPTo$v~`2`exWbjpYEyY z!p$zAhO?JFbqKA^*gGD_$O3Hx>^ROjW9)Ha1+O(9^F3vWEwqm3#qd(=6YGCZ-%0Jl zI6|HlbnpV~ewiqg(6cAjN9f0XI(Yf+kx{}6kTEJ4>*)AAw$XighHOZr8Uj?{arwMO zdWHS2t7*pEzpZQcV38nWG5T0lF$%7&qSX|5gU4SX-n%L(k&jKDG4e5agak##l%Aiz z%!ik=u8bqBVy{JrNO?;ev-jL6LoXuF)H`;#B@XvK;j{rfoQO=+v`}vvtFpn{yr`|^ zo!p8$y>L?)*3(O)i{8RI>1OHaG#{}FD8FCi#&n~v`-sjjwD5`>BoS0&vjPg}A*2o( zJYq@k^@Y)aHR+dN#9ObYbpWl~PsdL)yfTTvZFhJZ8Z!`=I=pL)x;T#|;HN>e!>58C z_ce$A2P+oQ5uT^OoJ8js`XI&$`V#Y3aC019eZw1V&~dmCNO2n_Z3gIL2EZr0BFu_M ze372}eSXlT?)^npM@3uDj2EHRc4ypStzCYMT`x^m`n4daDNZnA#|^81s}?pRva+Bd zQPl^H41Wqe#$CBludnQ$L1JQeyHdB@*-YS0#{OAA+=!I`i;X@;W)v0-a|#^-Z^}>C zp>dB_{EX8(>m7EhVI?9rG(Zk6Et?eS6?8tv zH|AHyb0D1gJ-LjM6WO{$cn>vlz!tha9<$ffO6=}-7T(8e8~J(M5Lq_`4bp{Kbsy7Z zH{vglavLk^BAkLzqDDq~9<3?H=G8Rno-@P0atA=dh z(@<1JYMzCH97A)!dOo#ZaV|mW(R7csM@)F}8842zCB_&>PIr-e@(bz^BQWVzHafdGWb4NBvt3ICPtsRfQWsApia-6L`4PQ%JKAwwhTR)1I))yFs+~X_geFWNGwz-y< zC~Sdtd^S)$GvtD;SNE-D3$u|=VdL3rwrh1ICEJ)i0;B9Vd&@T8S`&#a5c6y^QP}2M zVxq7eWp|0k_B2+O=xo=rsoCphoCRes+2&hLBC*|LF`1?HnilLcV$Tx0DBJ_uLyZwD z+igqHTj5btzMXavPze#Qiy~drR^VmPSX~RL!A9aOrWaOOhOzZsZ&bo*c0W+n@Y1L$ z_M=)kjm}Yz+{Jh-`s8Vr3GKZSFcG3fs}Ej<{?y*BCL_=C3UxvE8$# z=+olV97Y8(7HUJEcAdne6)ZP=#(i0wv_f4QvLNw#RdP7bS}M4FJym+4I#&~);am+) zj-t=V$;5f|n{6?gIKfw+lZ?{_r&!gPrT7UQ&QhD%i$*I>5wp~D+M9f_^|Kt`S-%zM zpGXxE_!G;i`#kySyZ8+8W-qR!IK@`8MQ3M^=Hh5kgVEF0MCGJbzlstcG4fof=V-Sz zoO+igrzdI+*!DuN6W1_s4|k51Y|(P`W{acOq+1v&-P+QhYugMN&e6PWPnNnETBi2D znBUg*QLn4ns~(-rZ}pKJDbp8bTQqL%o2sL^`_b3+{VDjE+k;bZu(TKBd1&p)DR|hn zH{-ckw@2goXzkUhc%VHSkH55c<2hKjhvWG;-d>L4t+l74kbCT{-p?F3>ePfqT1rY*=J{8f6UuH?#ibViEX~sArjj?)`pq5HT}GY z4riGyZnMOFqo_;r9dz7r6pxM9>7ritt;ORwU9QNw!*0Bus3AYD;T-4aeU}hBBja_xjbefAyn)5^6vFW-{FMrMc??s~mU# z#QW(|2p>~hqY6v>$Yvsbw;tyhdf4kRaL*-V8>>?x;jZWFyC1Q`9J$$)JY$ayceY~` z6jF&>mXN{3?&`JOlBCl~tw#}E-#wO|o&&SA`aQfgtB6Wfcof_bjow(t*=Nh%Y+2=X ze6{6lxoIutIwx^0vxdE8$JafVe>ToW%-+@rw)2`XgLB_#)C$V+om%-^_ol{4q0&2Q zd$;tL#`TMS)i}h~{~C$ye%d(1b$@OgzTWR^rFY8{9M{XG$C#mC)xKjpj!SP+*9*Nj zN$z{RN`9-~{UBv#$=LRJeEPhhmcOM>qt0^P&u#13Y_0iI@V4cT?el2=SvmhXNXOO; zv~qKbwRVihJbw1qJ?im%?DM$C^H+Q1$E zV~#D`r7t|Lr}@4x`^a{W56l`9uX#cpk=NDlp}|Zw+o#hz*#4H9_OtORNFXj~k^xKYpYfkxhN9N3OW~*p^+dc8t~9b&H2~4n`sO z%#nU>I2UY>(@g3yB0AgqG*>o$zErTMZ1+e*ZISTOHEw+I8pJ(RxFXo%w%i|#lAj8A zYZMjr!Z~$b&=>QbpW7(l2C7TzMci`YMsnrLm#@qU<0V(WHy5?I=M#NJ9Y10mWU(%d zrk|q1eO%A&x9$dE_$6=cR3UEW)$sh*s*!Q3B|cZE;>WE=7zg-fcpdxPqysmt$EkmB zl$G~~wVLIZ=riT$5^Ad2Zx~aDt=0!^Pck(`i#9@Mtf6YP$Qpu9ue8*2HBF1B);%qP z(q@vnUc-@c#JkaZ&g6<79NVIu*0rG8cfFsFbsuazA4@HI3J$jYu<_g+qlHhw)m%%T zf`g*P$D<$C^5gkgT7mH#)K+2?dTT{SA@^7ry>+2AVLQf_5RL5^%Rw}@_eJ+=NgyuU zY&%69wugNG?7p7D2C?64m)V8hhBjhNwYd4KjF|e>M#_6E4!>cm zj}u6l_KjG>%@= zP_wWdqgRN=c9a$&9@`N*gIH`wY6xPo#miB+i|nx1tzH2{XFGoN6Pa!PswWcLJ*&Nr zXiG%&^MB_W8D&N7ho46I&g2wzDY(%TwJ*3wS3#ZkTB8#Btwo<#AF~BO;0?vm7_ML- z>xinRd0rwO?`P8WdBh!git&cdd2M5SYj^gdg2~^7i;qCPe-WLpRv$zu?pg2jvbpZ* z<;U_@q)KXDE54>r;&ZWvrP2m1?OgrP;+55$r!fwpX07!Wb1afp&$PWMn&(`sdx)ji z8>HN$KlrUaN+4x=Gi;C1EoyJ_7+$D*#$zAWP2>4FMt7Z(t1XQ)o~NG|Cv)96el)gq z=O|uU-8u@nNB8P=E_E#1d9I>FgZZaraf5@$0_quH-`I}ToCXP-r-(GQA<%w)=I=+#dGYR-XeI(01tWp? z+6qP%bZ@8XjTdGk;v?+myC@9`dKghyPOSAfJ>;(N_7r;k%4oivGJ81b zuQ-*4bAC^MdcEn!M)b~do^h^01spS4n`bdFLeGE7u*T79?G}~YR(nM$_GlPtDUP2e zFNeCPPi=NF_NcK!mC{rD`^DLhXAisHhodHB``jco7s1zqzn%ENxX$biNB?}%>1xfx zF)Ju)U34D;&JII2V>Ho6yzYZHgW@Z44Oy!fTZCLqNxb)ymH#YN?({l{-1q1reyfi+ zNtu2e+p;3oEWX-*y`PKax;!-p+w1leoXywucn%J)`%(O~YjzZJ&zjb+Wv*Se_s2%+ z@gXwX`(meT{Pox>8+9x8%Eld`sX4Q3v-L7@*y3bD9ouD(=~&IiUb5Ya4YPZ_FLuo4 zrj9MMvGQ#Q=a%gr+rgOhrR4~ihw&Fi?<^;=hY~$5wxEc${M_1j?6KrePw|~)Ob~fS zGXv=J8;iqV*e~3>D#sD6)SAZd=!wppPIaBojwVk*b^1MOtIgO8zb9!TAqoRcz z7izJWjV=w^7OHSxsD@jGi<(BbS_W8?WD3W(PN=|JMHW$=UDDzP4XId~(&Z z-9q?x83MAV4~_Q=9;yQzEbW-Qfa0Zsc74C zxxQ`DMrq(RR#Z9ZlM{&|>`BBI{RzR=)sqNW% zJjfVdJr+b^>oJAz*!qsBouV!gkcC5RYxU2G#RG zB({6TpPKCT$K7=7u*dZo_Ruc{-Z^faLR^9qEOEcmUunXdMbNQvde{$F@zzSoFHuvg zgKfS96N&8}$)(@8tUj)9 z0^CE7dXz2RU3oKkS8+e>iPbM&8$F!ejCxgmk`kF{oCuB6gGAxBO@E;j_Db>AfSzJ2 zjnR?pkq$~7?RqThLRljxFJw^}^*og<+>Tut-cG$xP|MXgxP zuit~H<@X00W85&Mm1sz^w)cJqD6%s+4~o;Qc%cD$>a9tjo`2LM)@Zo;Gl47@s&-IGqN^h*l8@W(o*D1BVG&ui$C91dnm*=Lt;xz= zU9QKp7PL^uNe-+7RBEYb7c?<0m|IvsMQ*2zJLp{)Wms#a%=^g#tW;nrDVN%1>;fQf zin+~@dAyShZ+MuJLj-Di9$_Ufb>#2shaPSt(05lfCqc6}<;nQ$M#6L~q{P z$f>m;UTMNx!5B5j6y{$$PS7Q=Zoaq3n}n4!k>|houGEWZ6~8rouH+i)03+^RmtKOL zKI90i4?c|QJ+Wft{N5bYruhxQmtt!N`nZ2swL4gI?p__W5j+8{+{1$1npX`Q3O^nz z3%3&MBND_3h%4dAytKP?bM)$+%`xt0RjvU@Bv!BQdaI=ez6<0B`V(F}{A$M*~uLIo(CBrRwpQCQ@oM ztEbrA&);z8OuRkwK5VJ{!zUvG!A=x*Jw2U z+OE<_9$*odyG~>A>s_gl$m_dSBRM#{t2L67?Om^tTyR%x9Ol}t*+}$5yJ{o(Sle|Q ziC^!^jl*qs?M7nHclE|0*SmfrQR`j7Y}|I&FdMP&JO<(+&&l6Sc6bjJo^ND(VSXfb z%~5B|O3e7aE-EiU@5!`BaX!_Xp}qqr9kfVZSpbckiGpfOoPxTro+xSnNbiY=UMsn$ z*zeY!7@~xhy{H^nUBl7La!Xk?-z{ZIluAOk5!8Lq%Px73=tX_)2D#!ME8F|zTWy1m z;lt&BJThf#Jo0k=oQ{LhW6-X@QGM@OpZaylb;x$z%BaVLXl#%7n6f!J&ZElaB6~e@ z+}RQ@YxNO_ZT_kw65Bnit$1?qA)IP{=$e`kSf9 z@#@Z9A+}P~JJ&bbzMSg56yxao2(K-=xt8|2mG56>M3N!fTsg%v)Jl6xKU4l&G}Ve| zDd((_7AI@f5J|s!NO_MH;5Tgbz85LeMzJlDzivI$-pBoX%va!e{6i~rJU3jyY;#Ym5ru8u*)$@sg%-*~|pEO~FSA$=K@q3kW&wcE||dRfkd=}%>`M{KumKFjLg+KH>Rd+_q`^(U@4nrt)A zR}q74!OA;LMJ%?NXQzn4c7DHFmQ-Y&i()U>jyeTJJhpppV#^xmyc16BDO=u~me&*g zc_H%7cJ%2W;<6oc7KmtUGf(~ygKdqKcg}}sY`5-W%aT>kIQmmS750Pe^5(g$9%Y zfc;>5O!l9(U&xpGeZjaN@|Z8Vi_VsrNXXt(yS*|}uY@d9iK+WfsNm+~OUo%n?;Cw) znMvFY6t5Fg^s3NMj5S`!z!+gCLo@1yV&SxZ4&gV$SHCqL75#13UvI72o>jti##)!Y zHO95(Sy;bi7{2$p&D`}zIe%vB81Gv2k2q0~VS+b>vF9B{=MxJ4HL|{mz|lCjvsU2x zmGO;RW3{xEnyKYnwQBC8jLc#?AMsf(bD+|eZBKOiv!$)jF^8xsbj~6A`SV)zah|Le zjGXyUTY+{)?xs7dam$L|Kh$d#)+pmP`!X&{iPBj!^1a8Z@f%(32r1HsWqX{(Q~B&$ zT&!u}xkb$8plIvXVvfUJv#8_Hms;#p$Xl9w96w8oJra2t5skwxnp9UQvbEyNdMl{4 z{ys&oPcL`H+MjMM79PsXjFg8hk7S%gEIpQy$lD%GHW#hOla08?Bhq<7^nBQ6+FW9= z&05u@&o;|`kv`it{#3~f5!hzvBGP7CFfuF=QP>teJ4z}FV(F<)v$J1pv-LP}*yiak zBCtJ1Z!s@9fAj8`FX=I{#s0w$mMcUJZ+}+mdV19nZ~o$}?TzKUqO|yW=Z7#QWCT!! z8t)${fv?CLcdZ=tW-Rv1HsytWaV(EbDyWlJ%s6GS&{fR-I@P$Ww{#T}woWxFZKSG` zI#S#tef)+}h#UxRnDdIx*v%ExnniYS9tN)v;Y~a24g0-w@WW8a$rlXqMrcnT*camV z(9n6COK!+j8IAc0Rryc%A{({UAlZm}R+er>aRsr(`06E! z)Y-22Ppa`H23yCqr@Eg9vO3l6pN5fo;pZU4HmK&wj>*JRCCR8%?#}u&s#Z!&cDS#5*5k?-JZ)^1S4tE8B zE8O1YckX^4=p*8M&_cZvZc6JPttZwyR8V{MNW6Lqy?hgOhF$- z_=L6hnec$?iy_EL&Q}po| z)9Z3~#n=OGNGC)(oV`KB$F%3EEd3>k=c9xY9!N83`*_HkRp zxo@iUdhf&1_`xlT!ZTi@dSjA=Xfn&oUBguRy=(Lpo^VT7?HjKl#J%Nps;IEnobl25 z(d2ZGKh1A+dj_P)%q`pE71w!HRbKyo)MfT`B=XW%AB(;8<;S6~`}!mCm$AS|?Dbe- z90!agMk2Pc#yIq~SY#afcC0dvhvQtt z#bV>okB!yFaddwycPd`yvEDc?>R50j>anrnNS@YW$&vVVtT_&MD;6EeL5WqzqLx@T z8@G*hvk~{i!nzokF)!PUh?lh4X2rLp&$g`|Qq3U|*sfI}3FVf;zu{|cjV%4783RHjgoj3F^pW2yYXe-pz zAx5~KsyDoX8@CC(v1;P}3QNRlkOO~V0&^8Pb$wyg0625`bozZ~wV#;#YZ3LLr8V5b zXYiK{yxv#EC+e_qjso;CqL?8K$F<(Y-R1rB-maM5I?!WQ1ox}CGPpagt7pX;t5*uD zQPpdOy&(5Ep`lzoE&Z+a)IaYnhI&=C2<4$OX>-u9Heyd-yv0MFGTry z5ZgWev}#0%&34^m$U@uFFLQ4+3q4!2a7@{@dSpyL*EGl&#*eTI|UY=WYcp3U0IS1V4_N9bM zjGIdv58O%7KOW_VLT2z0y=mtC=Io)(dp~cu-{3pz;i~`*XaZy#u)h?UG5G8sEUNAA zO{*zJW){A)5kTBu-JzQ3v?v+U)1qX^Q;VXLFw)jb8Y%Zk z9KY4YG^EIuUK88H(x~$EiO!bjYzr4_(mI}tHEABtMUnPV*zIZaTIwMZb;2bZs&cvDS~Lz#ZM=qF=}Lmil)jV$sjzP?!399CFd`*_f^V&qmy1 zA9Qws9x~f3D?s{e^K?ED*y=SrDYNBXjY>jDmu=aH#m+!JidI*%7i{&KnUvWcvsaWg zifS9cKC!L$nzH(krQg{Hwt8(&%53wrI1$($qrEkmh2l4(9tQVz!?(lz>bM&ncN5Rw ziH+Ugi~E0Zm$_HtzO(r1Hnh7~xSvxzw;rG(8c|mNe$_U0-a+s0BlN~{+w`#)_)^9_ zqFCH=eq&?(a@r5o%pz`gzA<{LIYhz{n|b^Vl&TWgsSGPbSX z^832fe_4pFe#}BTMt{;r!keZqP4D;Bc;J=7$5umwmz84oo}wlPx(z-SYJOrb*x+Ox zDuyB!5|ybp_6rg8`Puj8H9ehv_j^rOHqy`^{S@XNC~DZ8FH~Gbs-tf0`^YIdf9c8V z$?)!GEYwL;dAVr!d5L>(PYThGb+hK%hM(tFSf^Mk+}9C8e7jiKC|fD_bTFf zAyQ_m&TWz|+d2Jni+GW1;9WjZxgZMLHg~5VvAnz)d%;$nB_mz7`qe|qY!9s(#{T}+ z+xsd1^vd>z9-MtPqg=k59UIx^cUqc~-JY5XAaz+OWSSHPt2!4_2# zdRqHt8kUQz_@(s|fA8U`jIUGh^H49QS%e+>%|!Ks&CVYE(%RJ)WKZLpX!K7SCPmM^pn_i(Q<3NYrP31A^a%vwucO1-mqFY!%&fNCj!27o|s7N4N(8TDzYw zN-aDZ?5V%$T9<{x$HpOc-B?MZt&oCN0_{gOqtuIf1XAwNP-P_Gn>@AX!actClQzYT z1Imgq*A~dHd@%aglitzNhXuoa!Hvl-;*6|$#rlABKejtma(dz`P(A**KLhn$Sv*75 zK^^N8ciKV^sr%sa?mdWT5>^rT9j-0W#EZceJL)B+eXX|qAnUO4vLz$C2OwR|- zGb@>B$N2ks_@nI~<%cbDLv`jRucxQ5tL!D)V>3T_qi`C#$^NrFx6H_KnWMas7-6Z| zU$*G?ILk~_wm?5VGnF^;^UPHq&M}#-JoIJeD+}ouuY(ye*fyL&MRxL-?-&Bh^Wm{Ihs=uVnw$=<;7&(Lx`+EFznghJ0diB$u zaX*LK)cUx4W+Mj=!0!=31rBy6`}sRWua`qqe*4RxekK)lN6-xsE)Ub};}myhy|H~= zoVUl`X{7F8i&h_Js9w0TU#OnxAGMZBFE^+`z&^NQ&6azsmel&VQ>}zGItq^J+(@sN zJLhdlv5(8wA2mBP2=r(~`eFOr2FC6oSfkU|yF06A!RkTfPh9_OQ|sdnwgnN~i@@qG zG*FY}r$Qog=r4odwguGTzc-n9ZF8DI5m^kcsDTEEBHT%|5_;cK>YLp*p-lN#CT>I*l8Qhe&@i5m4#uFDaJnX9M-S8N=aioGC@CtoV-I3PZkGE{pw;IEtWoM4`91i|lER~T zY?A-hbYx$yl-Y5QhkM6?8D4rQB@Zz_+w-Dpl((rbqaF=?c=YaP!20;97q_;hE(00| z=deC8KX%W|afFtAf>(WMIsqQdYx^@Y)*8yF@y?x(IRf=3w6LlN@Vk)7fX@6NqzidJ zx3lo)XJ6TGtRcriEDg^JvAcGjuMGj1*fE~(a;nD4={s$_P>wURYT7tMrKak4Zo-D^ zIWBQ9yd~U@t>&IKQ%x5y`i+%Lyics_1#CMca_;kJKnQ-iLp z;bVz~zaK{q{+6up`z3V z#733~Y&}x~ym$5+nlrjiNttc`Z9Mdi*^=V2-7~9V&~%&_w<8YP|BT~ z2c~CVnO1%c1d$Nf;!53dQ6EZnbSRAa~{}|3iRxS&B7;> zx1*xy5hna66~$YV7Sy_8moQR4vetjz=uz+QXvc#Hin^{tJ@5)j>@?tY9*?24vj8iF zIIu(}&+K1fx8AosLL7@6El{vaQnCyFFXi4g>|wncmu{y7l%7a_o;{B$aYcoM%QEUiMp~AFD1|>&x3oQv5Bj zlIK#-4(`12JD`*!3|@DHw}-JQRm4gh#0p;VJI|Txv=??o;KQJX?rp+6 zYigU?_0(1jYgUreJ-5+tUk7zCk;>*R&i!s*%rEY5RLaa+&P_=l>z27mEnVyqePQ=k zVr=~E!0SoxLqfIw(!5Gp+Y4moq0=z4wBQ-sLkFCjH9Vh2Y`*8E(+^w+irVXTE#$UiP$%_u zFLGqGM(~Kf5{m~}YT1-Mc5kWBMy}j_d;AHxW8@X^CuFP@V_Y=BgVhmzU^vKV zKCscoJ_J@X^fOM7y*nWXIAM1>9$;g!|9)x?PCE+7b)9w;oP$%1!eh$-;jZ%2$iXRB zqGO+G+`TgTG^6g=c)RKpYv0kYrgyVWfe-Hpr$tVO{ps{#%PH}Z-#i`usU#2Yi{WK5 zyivC2g)n-J%a&f`@&3mtZLVXVO5dH*&SqHH9>0|l3^X9lPTzct(0XW57D=;y6DmTF zblG|p26noc(fAPZm`Rr{a#Xmr5=``Tv0`N+VI(Bw$1?~z6*b7VNL>jFS(3TL8hHa^lV^yqrT!_TBM)a!Lk%-Hx+OgPPOIq)n zV9jeaB*&R;J>F|oxu?KitJ*ycAIMQ*gl;Wk2)l^f;W$qEtA0<%<62elNN(WE9jY20 z$;+Xt;_-a6RmUTF*se+*$wl!sN3P?xs(Di?yonap6i& zl|Li3%;Bo^abvKZFBug(tyi^=8-aP%`$*jSHtx~5ZPomE4si0aRMn5fZ*LAAx%TH( z`Q!SHyI~Gj?T_RIdvEZ0{H1|YA=AV3s-jUkZ>^>aRSno#f|oy18rMz@jGK+RVtXtq z&a2d?4#uH-G=~~;zx#PCa(ilF9DZHpK0noP8Xn;H)u$d#!$mn2aT@&7tif6q=BFx7 zGYU9eFu#+1Bp-u;%c8adAeb?sEi zDSF?YIvL3YZjyqp=ciJ}A#a^p$>yLv)sm06ypcK^aZd$;uJVBU&}_@ejSP!U6t-(` z_^HO37;N)*Vu-}H-UVQOTBkLM!j|W3mS^3w#*;nbeo}ppN)~2jex2jU7M@&r=`O2B z>$f*#p^cJ_IZj4?gtNppGXp^kwpp<^>9buwXPqS*#mCCiQ3a!Yp`z3V^w*(s#$ea&lxD91{rsfxh3eQ#TZRF_K3s2 zGtO|I0$#AwNit>wJ-?Jvg`T{yd(1o@?;lIh!EQ8iQob9GdkuL`tw$H|Y;f<&!p)7~ z;k})sdMF(;P{v+Ixq-QV+^&q$JEQfr{r27BxXvR@9mQ+Y2e|Lf@8w@wy51S8_snTO zP(6TsMEZICBkTL_FQhYHpNZ=2^_`6*JS5~Z+>*z1te-J}f2^Bt_;CK7=Vc}3k=3lb zw`iSG-&+oQ^b$Rt`V{V-@9E>vyK)-~UzPi2h}yIipV*4kQ+;N#ag{*#g_PTO<|*j< z4r+c^f@X$5?+d$k1D45O0#?&~Chom+3*CPyAK60t+Nf)1jkK{xmqq)Pc^a9tm8-2t z9<=F6OA`Akgja~VDO(tZEzPseg5%=CtZ&}7g|H`h&j>q`km3! zS%0Udn`5V^?yYXsDLp69(ycb9rkjZmlYWV67SBoHz5;mj#lBux?g4psc=&joCZ@@^ zP@bH9KLF*KVIZf7yPA3OKcY?Q-f%r&ufX36$1YR-B8G0{9P9IG#4t0;S#jDP(ZxMB zWC)Na@b}FgeaxoCMqP+-K%_R*BrL7g1(XRzqKITTX7_=y&z^Ja=l z<1~5mm4ZLg^0>+0p44&Z6@Z$NES!QrZQqeCMP?6sC3kY#xJNk7FS2K4_bxg|H8t*j z;07PuxQwVMN+0=p-0XoYx#EQ!ynTVU6ytk7T$`YTRSu1g*f`qbIX&fiT2d}|eye## zP8BsUkO9Ryxcf=}#m9F&uNu+!J>TkTX(@-yx3X=mkZPxX3vta#??&^wDh@H)-4#qs#NBCeg+W5xM=Zk0M;2>Scn`uA2q z2Ycpkr(AbDR=JSmPoM|APAxv#c>fhKlOi91s;wJI)P?P5NT+s$m8Vpp?~uqWWVMbIL7S(@4|XZNYL+u2yfu4g0B?&ssc2lid_T$ymc zA-g|Izak&c-xSLoJNhNry(xZ6Hd5=?WFzkJdl*}na~*rWrl-tZfalZ6CcL(Yx{Yhw z`NHn~LwgPGH&%z_l?Zrg2^p&h&Yo}CL+<>?USuT|+yqhb^URb-J=Q|KJ$Dd4701~+ z2F3HX*tw0z2n7%0q&>OT_NmPhDSOaTKeu>HlXWQLw1!f98ag>!Lvo9Z^D~M)^}1Jl zB^m#wIhEPYfAoY~OR?xi^_jPIEY&HvmiqM+@2gLowK(c?PHPC-^Ik+~t#x#*?Xgwl zB0mdJR^^%<%l1$T`XM{_bKs)3gX8$wwv*#{Xe~qjK4X!K7QNWnQ;tEo#l%l0q3G-&G1#_PCF;-XAM&E>cjaVa`dvDC2=lvkvT*V}HOdBC&oyBO ztlZU;)w@G?`Q%~Vch^rAa>=l?>o9N4FYgM<>fQETLV1X37ag$F&-tyl9HV#>lhbXQXr z=Js7qd5AyH8!GWO)SlRYIZo&no@GOAW8~ykxi2(ArH!e2bH97WBQI838{sVJoXo4tKAl;N08?+hv6gdmLH=c=sA-Vf}7|SWWI@+ug?5Z@mAA^YZ;W zsEy-NLtOPCG#({CdpZjMJU*iq|oJIG$lwaE}Mlrl^f*XrG0`k`wl_(d}?HjPn zeqRy1qVn>A5(yp?Zqc}y<^@%Wy_%>UE8J&`xxtKM1S7U`je7Biv|QKcD9asstcaC>Y=vKz}HYRv2+^s{d2E^`R`$D&_FQEU% z`g1$^gZX`_v0Miu^2TkOS$*Pe7v3AxTBrHgd7TdKiH5b!>km-X2#5`#Ly$>BKM)(> z24?J~DbCRsO^sOLM?+JNPnO@Li*{SxigP)aw(jue30~}h=Z6tT>=7RuzM~YUgqRL1 z2yzgM?>Gi~q8EO{D5gK&TT(hEe(B+qIBe@U6K^5mbo_6+qCZk(OUlp91ON2ww{~x~ zpFQ$SSO!vLJUD~*#c>gg<;c9BybIjxS^SvZsGhXha`YqomND6iHzU-SoI)Zze~);l z_~A+nWK&+7oYJ?WYV3|LO&>Yiiw+UaJeP!0>t5mD=nVomVjYfLuMIRm-?T2RFKAjMhYZ-Lkn|--8&l#Bxz05^M zy3AS0Sw`qBXRc^s^|-pW_}b**%a1Ef?W0D6YlnZ)< zxN+4hPW1iqRrtje#koaQ_Q0usWm^A>N$YFp9@Jq|zO*Rxw>Il~Yw8H_HQ;fmS4yG& z^~8V9>^_`)B_RC6az4noW~>kP<+qdfmxq9i1Lj&kR(O*D8VY$aSo<73f*9UWmYYaHFSnM+fS1ZlJ7Nskd&kRbn5yT z&#ORx9pj8KrXTm~=xRonHM#on{JGkmIv1eqHS6ay`d#u@WaK963(Xk_$FIh{;RmxR_ z7GthMb@>tcTv=>gLQ!$^rDfg}`ht|%_HfHlK&3w<8j=B>hAiQEmkuB|Y`I&5QxA9_ z%y+c5PY|o_Jy}KtYy8}I!gwSvTNUBD`}yu4ky3HCou%gHO+kdgT9zTsH(`3dxKtN6a1q>-brwx2T^|4=^vl)ThA@6+)y&kLW9k9qF& zbbK(sIFf_=PT-Hz9n1!f?R}lk8rj$7eRkup4`mwi$G+SUklo|<27zqEJ(&;g;vm0< z2=A5opYdud+-QRv2~pu+-W~hIvK{mH0QYdpc%AK%W5rz$xZ4vs8A=CgYLN$u@mY^X zoZ8RfK8pN2Sv$Mgs|NmgJNx;lwRGg8Eu|+LanH)pNfoo3Y}?zHRsALcTV(2%w=?Hq zEpKnmL*fmA(1>{`CD%8<88;6xJF`Z)VB6jvoz=ITj1v37*7sh$@_fvkQ4;Ihsk6qg zzP&mNqrR6q4PXML9 zSvh8%Fy7{RVFg3%JQk^(Pt_IiRYWceUFB*Z_4TpeGm50dw&z=2oJ1~Y5!jYdoNFhQ zzP=xKZT7O!+u6-V+%x;SS?4UX&6#6TXS=3>^J2-oxnzIX9y5p3owx;~&wiDAL$RkD zx#xVHQlI@Ov^ioVjq_{t#mv2;t`*%rP9mI{51&A z#bSp0M~zWCUzk|Z?Bf(S;vrv;_h&J;ctf?H$8BnT+`)gvEioU=i}n3Ic;^ug4^!;p z(ko*?Il@^>rH>oL0oNA0%q1|w*A!M@g4o&Rk zIAoddHV^KhkM(c)x?%rluT$zBuV3?Mh&-w5OQxo*i)e&+h_)uqJA|+nRrOfG76T_p z-5Z1t0Z-}Nbjk#Ozdfx3kZ27wy?w8)ChN{~8Y)3`skFL?4*A&E8Rl92t)nz^N#N7f$ z20b)5THK{TKOWkyy1y3fzTzk54MqERKm8xO_RH7!|16pRb^1E~aib<`;y<&fS~v1u zCxQA1%q1~!uZRw#M*?;Gl}lNm%pHn-vUhIRu;;PSDL7chPUCnuCbk;MO}(Es5_dZ` z8^^_Z>^2U6JGL9i17g2%c+1#u9P&JN9EZCdTaM$QjXg(VukE~z!{3fw$MVp|w&U>E zW8abZ+dE_9xY&xFM{=+hTaUwEi@ispm))dss9Ul7NDh{JO(U_Fv41vpn;XbR+!J-_ zqAf;OY}roSh?V-J; ztWnJNmf1J9>orlNanFG*?gwYK>D;KG)`2&5W}H{;!@~|x-}%&fd_L*k6>O>W zR{6dCsm9w!n{(%uFJr{dt@;wD;r&JRc~2n0+nD{M-o})@`?2ek`bK}x`r!KFewgZ1 zFzUkbnloyeF(>d)V<%L&lF8{(u6jqlrJGanvp&On=8BxM?fF)hXCN1}g>1|GY|DVk zqTJ6zs|U&+`-4d*xVC)UWmVW`wx#duGL+r#R>ras_sF15_Bh^bb7q;;+2)Nd5!fCx zs?^-DdB^-b*9zO;o&DwPk5)nb&DnpR{qF3)&i>=<+q1t}Wa`ziJrwa?D&)T3|Cm$g zvwo-u$F9z!(CLttN*_0WvN8U~M)tpKY`^WAr>l=v6l055l>0|_ol;-F{%XDX-P!NY z{%q|h#u+jJJu*}0M0zbawXUMZ5}pcfbVO9lca4!}_M573A8Id`t@hAT>YK+sb4HEB z@mBeY^HslxBkD4?;6u!gtzqZ);nGh zLtKrwg^&$+F6e=7Rg&kPdD3}C@L}3*=({xxcJtZCthUqY_t~~+^~DXf{Pucz?aXN@ z_4VwSS*Dl8+2%@9MMPV3wTCzI$d)|%?jHW;cc(Kq-!EA$0;Ko^V$t>&>@P>U>A`$#U9i2F z4%2=BTdQn6=6h;wwtjotg;npM{_^SByMP_vqSfck zf*agS%X+6?>%T@v!4Yn7>i2o(Toqbg!#r<@@`%tU?8) zKCbp?bXo>_|Eh?7ORMkUjn;-OdBjdN_r>sTXTLPGl;;{Yk8;u7nh{yoz$)5ndQ?N3 z+D3C9bUUA;K9$9`>~2a;Vc(9OIr5)Wr|vf?hD&GJb0s zNagJxgFe?#{Q6E;l`o28gL}`QbUfzriPnq(DC@Sh8>B%)WGAg=NdX2x7Fa; zh!v(41w#9*7HJ4v7I97n4uiNQ8cFA{<6rA21*$Xz9&L|~h%^N7NhXc-!g z7;Ni!IcxORV&FWiWn7zubd0uQw2Pf!ymf>(XpZ@g{vKPuJNo>iwENC7-Y~c|PaXZ+ zK6{6g8-2AA{g}uk^qsOign46?9vM`7vmTGF$`>!G@QnV2Wj49j?>T~Ae&=TzJ~5f; z>-UxML&?jNlRYT?k}*f=&sE{B7rZ0lFOQ*mg8RR)Bsgc0H&Suz{_#5oyMKl8EC=4X z#~F?+owN@pGFpB%h5d<)jC&|!bas!0h>X%&-y&-htNc@Y-TG^@XkVqu(LSWe7%h<^ z<8xyx>-opGzVp`D@hFjTsZnBMg1fbFzbvZXGDqcjoY=V3II)pI#oH?zo7fK4oz=)b zPM5mlQ6i&TqeRAb8D&H)L`&<3t|+0+*!1^sPn5zar$5$UGambZsk1 zJ9>8}*{_3ddU8(IHF0E>^n6RbtavAfJ)5uo2a^m#nmv#Bz`S?(V0Tub*TXSU*xrYR zS6p;x#Ust`*CC{=K0Pr@0U00Hw{1jqAB)GF-@8Lt+I|&ZHM>`bkhFc$Yewpdh`!P7 zCXySr^g-FS{YS*AyfbSHxo3Rzw0oMpeBMwy^6_ABEX7D@B@%ZtS769EY{BheUq}t(WbYy|4N~g^b$* z?>_D;qs9VzJ$>~M%Mt4tb*?2pCyet!JybZVK0Gs{_}VkRx|v#z4radIzf|?U#aZsZWg#6iuCxw}$$U@e zm@lbI*uJ(Lte>=fY5FpDZVflhd!3Y5;DGL9`xp56=hI%s#Yg|He_z2f$o_gyuP4@e z#ac>zzUQ9VTh4!<$4nWZ6yfa#)Fxh!YXA4qK@0 z=w>7C8GXtpv@>+VyN~;rduXudY~2nP-!3kpN@-ux>eB)2YdgEW8j2KqMm~S-v~!v* z#q~U8W3{uGjksr)b+fzFh-*HDs^?3rJw8Vk54BH{g^?#q%Mm?BhN$0Z`}+J3ehqJt z@B7`E_kl{Te(heU;*8mk78j!G^O1SC(aNVn+3!5maq4*j=y>;>ucr)VW^EV~TOS zv|m{B{XDiBqj$EC(FpW2@~lc3pEYffrFRSJmNv;po@<+Yyjo-6giFyzW3iXoDj$7a zo8{xKX}f&Xr8dk*Y_(-R=3JX*;kDW}3+WhbT>P^iCX0+$T8{aSTAb~bc_i+YU}yHN zQGuQ7w@J09+-r^6k?y6mXGF?~B1ei!);(WyJlPhjf|J46t&W|(Y3bhDZ21jP*ct1e zv6ixX>QP^sbak^yiaqnf+11uS``Y>;RFP?nVMp_iV6XLz~12*Q|#_c9)+_1w8dC!nL(guB(^BfeMne$2*zNnZZV;((t z6(p=t4hpq4>ObDvxRoSUW%P z<*w`(vbq^-7AGm6n5IMQlOxyLuLfyH3M)0&X3r6Dz8YF6$3g(hUn&Dr}&6XWg3+>ztjK%|S?M&Z$9Nb%dBJekbMwxHz5bD_`705XL+wtktJ zN7fv60B3U$i{|AaB6%q0v}g?@w3a!oy6)O76>?6HSy*1LYR!<{NFO*Ji0yI4E<%gr z!4`TOUO}7VQ=_bB_;OIz@_RW*T*F)0y)4wZ22`RP%DE|V{FcS_e3cSsncq>;-p=kQ zQPwjpT7>O9ixTH>_CtxZl@-XqSj!2haj4N0@3T<%v-ODGt)7zawM>AP{@Qz?hzXS_ z%NX6W-`R7%M5QmJpQR;I9d98^geo{x&@kKU=lk`ej12ia!>gUenm!{IdiYADHkMaX zUyHZ3^bs%miG97S!NVqXKi&D+$4F7xhZ14sc}WE$Lxu`<`j>T#s2r<35uy@TK}%Pa zN=vwGS)ssoJz7%Lge}qeccQZ`IqAAy;&fclH(@o!={VtPS&PO_$yL!qqq*3M=0@|e z7441YqmBkgW3NYxqq%6K$?+VlN1LPh*p5a=bFvn#o{EQMWOE8WXi*UDj>cy+d>Y)c zZ$2J>Et($9#WEUQkG499V>DlpYzVU;*u{qLhH9J$J!To$uID`B zb&sUaw$5s(&J&O}+Zsc8(uDNc792ktr#rVm1hxfTK6;1<=oX)>#wwf^D6B()VILzm$RF zcNe^nRQYji*yf*PArf2MEX|j>Vl|kg$hOV3>1QMFj0}6h7B2+6G4BFCJ!;0d^Q9fL ztdltG1>5cHpMH**-{T3eoRc{08C#-eox~vyTjFU?;*dI9qAc^2IkTobiNhYSbu9`% z6t9=+`B$IxA>VA7Db3(qeTIlUv&}j~MEY#6?aTsnWvRelzKf7C^T%aT_4BsGEqT&> ztm87FIeaM3RZ()-=AWw~65H#Kx=3@bid5OobF(?pUuU0}wO(3pWuD*FOPB8C7RR6M zGB>Q}yydkM$ILht;c;AiTk%(^VDU$*sZ=4qvRy^KIvuBSZAcD?0c-M=35u$QYW3+b3O zSnP^>!?$5SR^DON9Wg-i?m6^uTFr59h&f5NUU80EUDy8clT4({w$8eHouMA1_yf>LQCT48Y+?QJ zQUcyW^xG^mP(0(GHA2hEPW2dJzs7I-NA!)ir~S-KSv~rBIky{L@$x0=VEiR^eXkJX zaBZ+f{xhRD)Yo1C0c(_l0v+x3lNktyEI4yVCI9>2>{ILI!?Q2#zn|N0O3?%35U(k| zJ}NiLmHPX$Z_oa8_P_1F|2X?k`!D`>h#z`4{Qjw7;spu3E{h6p-+91le`7k}%`RGh zJo~S+zn%U4?03e)pNx~g?)v&qXMa5V#=ZiLZ@cjSY~KO>_x2m@o^klAef!PXe;d!g z-~HYHoc%BR8a@AZ=huC6cBo&iynQld^DfLsIrQC9F7TDVjQqgb+bQFAJKM*Y%kduC zuJLi25z>bjf^j49m$n+90p3j>?Pn$d56?a`#D`~JPwjKFNDpm&d~RPnvELW=|L|{k zTliD^=G=bj+**8rpLlA2e`Hj@wEuoF{lstV&ll7G{lfm=Q~UF0XJ6TGoZR`!{@+*j zllU9fBzArF^y$#ga!%Oe$JR%$a6-iZZbE!8X@ai}8Q%PFr~k(Ef^scBuu;LfcyRU? zTUCEGIsgBS%l-P{UvY)QC%CcM#vUR(65K_zf8_G?#I{VHPqm!bYw$N%?mkNCr<471 zIfV`TRZjoyY^gm@VTpe3l7>6F@utHg)7tPd_PsibSmAcs<1R0R9MZX*5w`yJB%~R2 zT8~U__vZ5P^H{t7%G;BptOLs>BnwvO5E#l+}_zertkEmFz(1tAoGS^AM-8O zBHMHO?#A>1PfhmGi1wP?>_Ly`#E8l$kZnCC(agd&#&XQEkd7JC_*io6*e)$qw0ES- zwzXtxgl%i3Mq0OKYLsH_)Rb#$sHVNNmTLN~HPs`mS=$Vh)*9xVa>V595PB`Mi0O{o1>g*nlal+R zGQ^R{@L3&Q@eZRuf0{rFuK z(qY@udnl%d-ApBas2kAH!!C}F+E-F)zm-z^Iz&xl9lVMU{n~5uDX;$4F#NVi?y|&z z{hSmU?zu%M^?Fepl~#xMj_)2FmuGd0_Rg|JbM2mmGG8@$D5zG%s>(xvgmTqop_CPs z*OP+5Rh!ovuIem=cGYJg9itWE+JJsA+v3H?<$*NWE+hZw$dNAFxnHH{e?4=c?uAx= zactO@?17r^I=i5zeK`A|NBg;ClRY!{;A55ECsQu@UoDRe>(O7)MsMw(BOCoYtL*)| zWsXr7{I^}P1J^g(Wmfw5c#9O+9NEZ$DOi?T-%r-RLN(;avEw&Ey;7=|aZkHy=msk&PP8*xwrE z|L%zIzA>)wt!C_gZ=Uc6A+;x#7gh20t<9>E+J`CA_4i5UdaBx<*{mU(++Rh76@#}7 zR3r393!JQ|s6NoNUtUx(JQ#pC@bFbDL`E%b!i!a0)Ea7tpvhDT#6N-$o&}{^lAw z(j!pnyQj8{?Ee^r2oHa=S;1)t*stj7`P-!5Vj2PV3ZoL~)8FlEO6+@Bbyy>u07gAb zgxWvMJGkzoQ06)2uYk@Qp*6%^1dnC({$E>@Pc1Hec=rFvX{tt$JpRSx0jC_`q4nsI zKTqR^lTSKd=g%f#y4m>i$JtPg9O&shu|M0m>9P=WUEXi~oz9g^djDecS;sxG)emZl zd=lvB;>5p9k%UUhoEqk#zh>+&wvIkb6m(qSY^9Fcjp=b6wL7Vdp%NE%v&a7b#cJe! zXVL`^+mt0$jr8A4WBFS~y1dBW%xAln^%vQ?-c0G=ZLA&%Iq^C< z)!6^tMjmx{{rSj$Snn>Y{`Fglf0%S%SQcK@w||&6cqXIve~dSscKJUh^Ga&}+h*{E z$^4aYuK+JN!Z(NP_f={mru7Oi)CM5t-ybPpKh?8K;05&&V_TBosXAgJ7?EiLoPINzo_mhpdNm-qc z)B5hU{oMJT4WNQrul*x*Xuq7h-}<_(t^IV!a;fUa%}S_A#=kv2>Wl`f0N5VhZ`Y$8 z+K<4+ zarGW*++*uO)VTN6i)7$!?_%iDO1()Q?YbT%6QyJul-_pH9q7(zYxQy&INSAfdNiCW9kt5V>hbhFy02bOk9eq_Pmgw}hDMKes2)&{ zR@V#asW0mZ)d+{{4b^Ce>k;*6_tz`xF%Q)px{u3xQ9b>&dQv^g;flKqyt*P!Prp>` z>1i)3`t%6Ps=N#Y>?W3~JvG9isy^O=1CIi4G$Egg*D7)AKX$qLZ~tf2UEl5h{Ca*D z(4n5NK9OJcyc@u8_r4oYf7k!QY3#Nr^04o=D1P;$&Hs(f{GCZ6DysI3^r4@vpEdE&0!o;T#$n+|`TwX_@^*D=0Fit`S~y{F2NVvBdRv5KEq?Jv%Yzq9e%+i%jU z?^5iifgRar#?y1-40{lKA-$ik_1D&WIi%k0=Lg6*VerqP z&Gh#dpHB1n*iP4ge$UBy&guJ!1~A|$QC9lu_(?Oq(n#r$7U9pZ<+^!nec5+%g%sF6 znnwDyjqGbXb#r6%aB>{4d-U81K&ou<_B>{lcR)hkaCV|!Mo1ZFgq|3e*g0};drrHP z8e1O)a>*Cxy`C~6*`yy|Z%azcR}XqM0qNe@4E3K-CKa}>8J-(OKR1hY=k((e0Gi(? zNEY@(;EVe0d^&qi*o zKsMqYD^YyaTC4QE!%PjWh|UVFcw|v%Wq8r>O4V^BIff^KSP19(qrM~j@%P3lvS_$< zG)@sWFyp;Ey!02RhC3MVECy6iLKKNp(9to2r-?a;yw1b91TFV9k;#nnhS(DH;*<~r z!tX~Eub{@d*XZbDPggx^CCd=+X{*_zJ>H|aDOdUaeXY;4ZC9O1%Ewx>2P06o>_s+q zZBO#Cmi8taac+;Y(TcswN1{E;Myl=I2&~#3X5$rmnT<5Jr}=2Kx7kQ*_Bb1}w%6HM ztv%01++*(&&kzVSv;&bfznzGLpSs29CzRTVBIt6L5MIIP? z+W!7k^lmv)WqW5Fqwc_Ot&Qu*cD8?+x@c{tv*r4*BxIv6B_$iLm85LMJu9Dj19?op zH~vMm-ax@yjU@>N(_s@Y4vvs#`z>v738|Ee=Md&`!4!)(j!W=rz=?B}Dd zNk=}ay88IOdEB$YsVA=Q11Fl748mva$a;tTFDAMt1o!n1_Hx$DSKrJU~^JVrLQ5u+C8biW{15CHrrD z-3eTlT13=xD0cTsR1~-J$b4RhYIC^);eTL19y&G7SDhz4|LEJ`HU|x5#)9r0qJ{`E z@XT=H^kGkKO@G6(fO5GFDNei25Jas^t2?3;_gG*`&6pj^rPj#S%j;St8}+c($wsfW zQZ`o6TG>c*t(K2gsx9VPZv^rot(c8}NNZ+e*IG3jtJS*MhNwoZ>*A{u3(Jnw|PZdl$uAte!4{wWw-Mi<#5mZQJ-@j%bDRGDx}7CnKh1E zzb(o*4Ubq7-tmQ5TgA+5V}U+yn0jn}jIK{ge~7#nnwcY1qvVW@ewA>9B66O)zTKdo%^@Dsq0M zPP>QFQa+aU?tHR(daE>_<*0AMUKBaf+0RxoRNO}9Yn+X^Pa;OGlexr18&yYb>XBGt&7-<(v}M$njh8Q3%kkVJQM78<3;sTo_4Z0USjJncDL`!j zGURSIWxscWb%nI_1f#${a>`%q`P*4DQan3d5==uBhT#N z2o33{j9Pim)+n4k+|}pnVHDzWy^O-;dK!hdTyLXr*Vbb;cDr7)5%;X;+CHJ~|I%y= z@6g*rhkAi+sg;asr*{0J6npx`ajHG`9y%N!w$L>`?tXW3+(weJe%weo(r>-pAXiH- ztsPnQy4yl-M}8D?&&*J==i`NHsEd%l%F)qj0#a;ib`@SQsXv=MLHDkedlsmqgZ;sM z#JN52dp6(M45C8%R^ZQbY#<{yQ{2H9Y@@okru@B)k5^73hktE$NI9=_`BsifyWUl! z(zBj9qT@7eS>dDf-1_<%g}&4f<8bfOAfq^GHOwgFRs)ShUe{0~Ihgy3qmWw-Hww8& z1L`&3(zndhcuS=GkzXS28FBiKrKW7p*R)h@ADM4)Gu5VO@|wt);ud>UTE*k4ni_m# zgA=UpW1 z+Uy7cGFLC!=Jwra7(Iu zsYWqN&!_phuyKu#CU4Qx+_;a~;7cbsTZ1#7IPZzw3BEGVcqz_1ZlHj?swC@bT3d=y ztxw~|`&r+9qUx(>xZm0uLS&CTCthDDHJov-?`-BTCQXZbLi~(HoT{(O8{^gWZJZ)< z5FBCE==vFF#ac(!;&#&ac=sv#ZWBF=y5b?K;bASX4&yV?`gT#?dc3cEZ+Yq0sbhzw z2U~k;`l`c2Xxn~acAs0nF&}Z=ugu47Z&zsj&JkQ}`=#0Ft>2oDxb$mBAi|4U`n}n> z^kC9r!)LeDk3hxf+~ z`5dgp71^k5+>wpA7MJ8>;%+9yE!jxxaZNUA9rt8omAEJ$>7U^qz;`B7v6H>nW%$v4 zBi|X1HlZ^Wd2nQiw{+Gs^=1vOexaI5h(P(&14^bgYy%{#By)xfEuvZAxi2 z&YBcwV|s>gE|nuJkMG6!eJ{&Zklp8Ym1HCC$${x|U(2je$t>dxs=q$3>iH52`+9z! zNGDA#M!5$Xn!nyj%<9+r>-Je_`gdlRV+ecZl{4Hk%9}65EthutUoKJoY>OOH^>zFUOYr&R#;P71f{XpFK6?s?E zGgBUSL#KT(se)$XO=6MVAJ{~W&?iP0w0lOXm4IF!at}IZy6EA#mEA*&bI#W95y!5j zubtlhkY3@jUy=$fb$w0o&$0KXh+&sQD|*ORWC*F z+^Tzy>h&wrX7S$YRx2g4XQj2PHx{*%7fPO2`_n&*u3u6|jfvZZ9QyjXA?H1F#BX)} zIw`VUTKM(ZQ+X~#WxGV*n%zdlk2ev@;R)ra+N-AN)D))Ao#1zWqWx>T~eRTP^JzX7_Q=9GCTp3Yc5-*-;z5 zhXU6nTbw+3e)ff}bIj=X0+}U6&rMz!RYl~Dh{t>ECpebxN5|J15xhp@xp9qLCt`g3 zN!<@eQPVCbMUY28Rvr4*^ZCfNDfA{*zw#W^&DqyypWAQkSRe!V*7%I%5xGWCeGe_5 zlmn-EXB@^Q7rCRyc5*~5t4{Zo^;@SM3QhHR`YmSf<-FpR5iPIJO`AMC`|Rvf`>$?f zYpPm)5qDge6g-uW#CK+W?u282HSpM0K63u?b%hrkaOw`bub0-s{-8$X+Ey(q6?G`E zICz!iRSNZb4l}raVPk}`${i!zRp_tHWX-~x$C?_3R{qPM>g`a&=W&U@S&CRO zt|d`*@a*ijV$PS8G!@wKrXC1>r}k3#O58~vOt^$jz0CH7R0#}?w;@5n|idUY#jIEsVAImNMDjGlvI za+3KYQ0FZ3(bjXC*|>GiGaswXiH^baNG>01J*PSbcb;<{hdEX^crFUdvdWuN{YfDCa$ji|w5F zC=S+g=Hsxpa_S>ESkJjzjhCMtTV{jyWW|{8Mb!~*KEhdHR5b0O!E7g6>>cBrVSERJlyD2^`)Lp3 zGpp%{)8%f6--v$)VHobA*bW)m93l`SBhJ$qWMx>=|X6uu6 z+090+WqS-(k^OAEc5P%M?pZt3&^%Wjf9pwGVagrC^2$O~vp~YJ6RxQ-!~M>v)P-$R z>^QAfjfE`QJmSbk zY$K8ph;_u0kGG6yvJuxJo@~rzM3jwJyr^uXWvr2nS9|H%SoyYOsXzDF5PJ77%$i?W zhW`uu9epd@{a8N%jkAD?=)LfVcLR_I(VS~{I?VI`D58q<6gV%B9OZ{;ToutEtklgE z+hA;=fv^{&pp5Jn@=M5D>BfNiLoxsDDvHK2dsYcOa;_GSraS|!xnB;awr(bw!A%E# z2O<6qGK9Itn#6c&Petf*vK%4EDDT6?Z7gn=alen>AK{hQahWBazxAoevXbKKDdXt( zYhwlwgJ|9-CBmIi^fPL@)kfN|Y&iV}Iq?@(6g!usu3v?u+_M__tv>!C<)vlaw@THX z#THj(6^}wZc9oCfr)Ysuh}&9X6bEZsWEA$gmKlZKYN1icJz9!BgL`c7*^uAh9nc!@ z0jDKVQv=| zctw8nwR)7IrGH;AYndI@<5s3eA@|7qQggmHZ{hN%Dy1HZoE5f3##L*ZGOVIk^QfsBR;pY7rGJ9`%yhoJjQx*ePnB$`T7}w=IbjPsa=2hi0%5! z#@t@N*~n|_I~%iI|JjIp^aCvXq4S$^q_Y-L1EO;)D%u?dc1Jzp-tv3{%sXZED zoNZfbiEO>Ntwpkt*R@PGYSBX3NVS&A##(BzY`k?XHv+ZQg88WHS~4HE)uJOX;pvn& zg0d0!XyM|QAxiwh-V^&O6mgb%#(3<7Gd%E_UJ0^^$Wr-zg7`0P(}>!-dEP5Z4Yz6G z1@6}qpPDHB+TQ3=&IeCd#m8>`-n*i=KM!Aqs|oA<+$0Qd-1+@fn!0NhM;t+khW4v4|Fwl?`#OKUU& zvAp7)jn-PTY(!*g=GHD7u~@@wq}EzyBer%R8*z{Rp_RQc&+xg~S6ycPdqC;PUIlaM z8G%+yS2o^K`m*s_>C8slBfUk7{a`w*REO-LLG8u%@e~JmU4x=(9P)^pS&&~+(5r8r zi)%ARK7PxbMx&vhcws@I9m}hd(7B57yRmtAYtjOG@$o~Z19kQvY<^MG8K;OlUg0~d z>}bmxMb{97W^ZjI`BYuCmh|1;ILF}f((B~4TTThDjQbbeqvt16%3U)@`o&)2xM zf_S0!KF1;dGp^E-A=IqWe|4#E`#(zldSV!T?81x~+vP5edNhc`cDaj_jaTPV^}Y~$ z#dd8MD7%Nnj_3EU-9^go-F%lR8?D`i%0^u7Qsv{dyIA>{WtS@(tKJ36##-AY%g4lh zH^?_;BjsC4YE8C4obSSA_i>L+X50k78CeX!izj-|2Wp9xHyFXbUfD0yO2nfuP=ij4 zQ^Jmh{=O#cYT&g$>}@D&-2A-7TcfFYYZEyUc<{PCbey)?Od;q zKy$sHjkInTvQd|IBO7mNSF-Vzb|)LpJ>g=P^6`GTUhp1$M|%hP!tHMEZNxH#%U2cS zz8_B%MJ?u|>4vZu0&f(aD0TuBKBH@9L!QHxkjm`x1XIwU3eC)5H zuO&OBpoSU64LyGUw}K*GL%`V}-F>x)Eg*Ne=L#=_drhB)SG;4WzUz^aHXZ8v4uqqUfrD4?RC0h<}N%p*hOa5iFVr}GiF?Dz=e*3M_6)_y=X z)_s0LKK{~=7=gIvXJq5<@k8p!)z5T-W1JSf2<2=2X2N$y2d7(fy8<`nTO(pr(k%Vf z@agCE+-LuJdcb=mgq|uTrH-?E=#U;;(8CSsxQDgu!1*n@$`d_p<~$llum_P*(EI=7 ziWzIR_l>h!D{b-7&ci6=o_W&G59fpJnx<5b2(j2APr1~jBQTemHXC!PiL>$YWt%g| zc8^R`gWsA}#$BX)zU69WiZ1o6tbiuPN}g{I1G(uaeIKw&6AB;C-NaFp( zxFN3AQSv*9aWkuCCj89`zePxK7F~NA0dB&2WU}I~UByQcxy&c_OH~gDdbl-7u_i#v zZ75_}(^uFMh2}@B0_%=Cas_AeyBhTr&&^BK+~Md^8un!r`oO87M&3QBc+W8}SiKr; z>hYmMnvv6nG{<|06B@XaOhpTQ#GU>M&M@}8txrKudn0FEouJ^ZoFa+FN8rpoN5#$J zcSZ~MqCS$uT%dCyl3k~Tm4GS{g#^2wQ|`S%{DoakNMU@AL0xmRo|Y^2P!VT%+=j>+ zHbV0T7-&MP#Ag6iL>}{>+s*ITMZria`h9M6v2%=gNL#Bkr|dQa5|5rJM#N)|G{itt(T>ktNWRzl0zCC~ciJAQD zy^%W8pO57>BEJTU@nv?8xRa2LL$3qcEFX(+)Q!YycPhs8v+QJyz=QnrhW1f-^-jnb z%;iqWC{*sGWaAy)X~{?T)4OFSCL6ItCixlxUQyY}$?h5VWy(&^D8%_rP&Vq;PEj^) z*-6Srn(s7a@dlJ~`%txE=gy!STcS`f|5OMlB?-8TP-O-U)*on;_$9ktW8?D~S9fMWx z^yVWXlHJ+~9)a#V#ra6w{mqxpvTHpe54yG!oj;mx74pY&dnY@)kMsR#R1YDaHeyES z$QWtV#d9ZoM8B~n=R4*3sLP%7e8lyg_A$7=6F&m0-KozYH#wfmX!=65a*Q+*@$~8A$V%h?HMHcrP;P0 zf~OasP4-by7jt2D*54aY)KJybaIy;dM|cnM3K`tZrJw_E1|F;81OTd?G!<>2b89*Q z=Q>cIgsRT?n7uc8khVA_+-KnR$&g=7{aJl`OSCqrJ-HmExMz*?RPl&vr?T?1wOqZH z^X*2BHrE{4{iJ5e#;G+=HdfJ0*+}b}D;u?Fwrr$U^W`JfnlT@3w7<4Tvlj0Ow+iAM z>YlHum)WA0#9!P{)f8h*E3E5s4t|-|YdQY;vhDMekF%V=Y{WgQfNKb+k8ukJ?ne78 zw2u5n2JH2hJFnuaV2vtn`O)0^uzquAbe*9t4(FVnOnF%aHAGD4H7t01OgVmtkKyOA z_9V(F)>XMFKF%lTAqtG}>ku(=^cF=@ zM&yo@&v@kunJ>lsk~U7vUYc(}NsrG4DPs=StYk;~B0#?3H~Ki&u7`)7WN&u@oR z;^2(_E8|{A6CUw$KCxqTZFhvcG^x>?;pZ&qBYRTrStIcVc5Vzd$nR0rNewab=937DG1q5u$Q@hWfXB9-U9}HMWVS?RyEglyd1$3$ zGzSss$&(t+5!+S+>oas)a?)DdtT|$D*{)gSQP_v|{J7D_xj0(%QGGwo;*aBI=`D{! z?$HhO6p(qsyzw2N9$H+HY;hV(aX%;Nwc0lthmdllw;VyHb2itM; zLPWMb^Fcku-J2d7lu)*7IZ>Tv@>52dJ66fDX9e+wq>fK@wvoK*vyL@oRh0>8dJZZ# zTZAl^;w`y3&wdllo5_xxn=SF|HuBM$``(DgDn8Y{x1{SkCpvj9yVolJs@0S#$`LI# z&N8j*$x(Ky)}x~^Ydt#(bE$_%;?8%6Mj}&>kHXy6^J6)H9T<&>SYX>ujN)c#M@He& z&WyqzB*{mfzrU%s#~f9*`TChiZ1?D8W?1}u1S$@o$;yuS z9$Jj;*y4OEYS1;WR9#svSxXV;hfouy*!TZnG5*`Ba^cqgIYl+LoIxLad=&i(NBkqxCog`D|vW4b^PC?C<&bE*`TlKC1>9WOX zO20=3x7N3}>??0fAST;9$sq#UV(UV zo&EjncV~aHU*Denbr&apI{V|+8B^te_eDpxk!wm&Mrfe37m=`Uxg_-)60 z#lCTro=h3oN4w^iT$09>S>y+%WjtbeVA`*)eR^OrUgUibkHle1-cg%}TkU%82OurB zq#5Ch;{f@%-SaHjTVfwGGL%I>E8};a-0qSo&VyIF!V<^cti?R=^e23c z$Sa}ky5FmH*}G%Ll0VBatyh!xWDgZmV+-HwcB*`fjW6jjABg$#_uSwGDY6#W0mNC0 zms5=%PVg$I;+CQ35=yN5QE;`)`}N8fXT@`vI3J2)z6*4{f^Hqn zDd?eg8L!V^cCciLKJ+c^FI(IU@x(khWGH*~{~*0D zJNzp67udh;@f#t@$YUa=dum+o>F?Xgck4MfwX|(}z4GO~pL@S=Wv++9JlnpP!Z9PQ zUu{bnjp~x+TB8iB>DDr?rd{M%O?fHjYWmA{pr+rhlMICUI#MGXT4(oZ{|D3V6?U+& zlhwD2MeESF^Te-v_Ko=M-hCr}w{OpeUmY56{tWdR?$E+s4eo3?cE@I^aprq+$Dl7M z9Qvk62<*DQ=7g7H{Wh&vQ|{u>_v|s-M^pafwdFhR>`!=q&U5?wvndz${WJU3cLQI^ zr0+-e`*yJRfU2iEJBI|V-YKirsrN{ zoSXik3)x>l*x%bLn~P9tH~rYcO?mc+ip85WJ>82ajKuLeXcpmmKmcJ0(8h$+7t0NEtE=& zVoe6Dwk=0l;*~NIg_Fxr6vk6B7KK~OU=-38$)a95oiy_86nVq0)aId~yrAL>cBcLn zsw*B=6)c86>xR9-T7$iaJr)|bC)RD^uB}i$@tnlB)X^u2gQPQ_GlTlbpGaZFh zOF|UV7D-9#S5J+w2`yGdl8avXhD{9pjTh)}>V}nL(Fm;Xgx5@P=7DDD*DvSNv!CSW zz3ja3W#go+JnOUKxl&OhS9{mvktdb6lg>MH?prOfy>q)oA~}{{vhG@w;i#3qEaP!# z``17e?tG0zq3mBnQKA3ewO~q;Ptph=#ISqa}C`VDWxuA>$8WDnjddS zam(ED-?H`7v!qy_`sQ5olIu0Jnyb->EnKH$!cTG`wgjU2@J^#ej`E|l>T7SIN?x#~ zWpX*$CLXT_l45Qj_FGZxC#b!XUmiVFj)ES%B;YDek{*Pt>-%$aV`(hXtlgVBeuwspB$BExwvtcNL$9soi}BKK)&&Y*OsrT zo7koouaBoQ7_{e|oFp_)rhh+1B;!_2b`N45pm(nf{%1?x1jm^&ANt2s%R#TcY?-Mz zxqfQyy;gTA=g_X-r6he=n3v}N|F6C*VX-cGbvIbu&s4U%R(Y86l_qUh&R6NOPrRTR<|sY^NoIV!kU z@E|{kduz7ZJNeTVI+O{v@FAo6TK@`8($u@^UP#~Za(jOtfP5h|`g45Ak6f~P$5TsA zFQt|ljnqe!k*F1rWGD(D$ygLZErU@=xr|0()G{1}v_;0L{gUmwLzxbVL%ZiTD zUunfhA=DZn3MpUXNn?ya%QZ?A-jc?N!b;jC?i`meN{tmYs-%tLMwKvfjTbedS|dgw zZPA#tbM&_4vD}S-#e>_~$n@DlfqOsOT6gqneDvaC@^0VCj%~3^kWY=e9`}>&g&J2l z$@cHu%iOZ`kQ6m0r8c473hwXa8{wbcAGMCRmf7ApZILL-=3FNG{FanXzdPfk$X0h{ z+y>~MG16Mj4(jE3q6+D<_b#m_J1x{N$BjG7+Tr-p{cC%__zq~grvMEC&FIlnME~G9 z;_Xj()lj(+o;G{mky(iR%ws`#Cil>xMWF$DZy}SvY;lrB1qZx7WbZA`!u@x|*(bIb zt-Viv`i}i-(R=*H?-WCdrC)ws8+zF!Ysk5_jK*5lrtzq%ZKH8=Z5)l2YwKvNT$@K@ z?br6v==*nrX!NIci)j4$ZW51{?>5nBx*J8K)VozQV#{vkw~Lj#SAEj-8h4FUoVPqmbg(Vkwhb)*w&NxViOSEOz)jw|qydjV&xF z+_BUbv5;|SIR%lLo*^uhaB9*RJfcp0HI_KWydF#MNjv9r|0x6~%{w`>!3SsIj&jC! zwoX>qvH0Nh1dLlmHVLTLzGa+hK|G&M)cst~?GU`qfKjTK8#%W9bn`evuUyqyrI%96 z>W|yGsR8EtBuXa}E$N#mq$Pb6hlR?DT7xVKW3JDlP)dClhXbFR>ccpUR0Fcsmr+>k2E&4uLiM;9F7h_3z3B9`$zVpedtU27s%Cf-I#O3nKyqglN;_9iJ z%O0uS1CPR^!G5j1EWP79m(gMOwO2Lf2vOIJuQVf~kDT;=OFd<`^wUVWBvu-z`TSP6 zg)GB7%oxPSd@k`c&YpXCyN6lAh6iU5{qmb5Dc@*NZ7KlR2wL}y~u0>)o=2|8S ztJXqMNL#cNcYN$b*li!C3>0L(Z~2;g4_nl>$17FH+0uLtwITX-HoOR^9)BaYu*EN? zr$T*aqrW%#d8l{qsOP_zQp*f+o|O;CB*aY%wEc`vu?NcNC`3Z{C=Iyvr4Ld z94EyV&hr~TJAf3+C`g(IFO9N>v^-DIsP#NXBeu-5-~5*Ix~^5d#$O_qS~m)3u9c%u za;+VO5ht7F+-#9aYI{_uelNQxbp4j^xZc?&jncbE)wXsor7|x`y#qFzz{|sy_}p(> zBpSF=jyg7oc;=ec8r7|3w0ACAWGPNBCh3e?)wT3SA+1Yy6e8w+?q7~VN|GIyNs}<1 zT0K#ttXEeQ(w5c79RR22Gnr@96+GM?;B$ua$l<`f0(h38J8Y+mdcJ?trfRFudMGWI zQC4LxCsAv#mZK=7TF#=7w&)f6ZeB`7+`WquY5uaQm5U|y^vaukm#b+%PTET2Ve^+! zy~pv?%c=f&<;$wx2%71lFRyyjE?;K#rmq>oIOTX#R-dXr-8@S&)%lDV!TQUv-n@dh zbpHCQH^ThpUj%h{0rJ;ly)m#OLR#^*t2b>=4^F9DV=CmAC@G4C^GSNW)|*rQ)^=FgB_RA`J(kH9@x}O6*Qcl!XDd%@>^2x8Z&C&gI zcL(p&?EClB5N!GWBkYr&_s3B|;6ae}{Ycm|t>=B*ptu*m!=Ei>59f7%kql2NAheKfaBd(`$0vT}8aPMc(Et^}%HVE=CC z%@#E3bs#`;(|HwtvxNdzDcf>I`P(w(it|UvS7Z!zuGkpr`HGG~;EIo-o@;~{gj{39 zAe8IhAEnd}-U$0vf3l6zzI(L$y=W;|M4!kn_;uSYcxsVflT~E1{mHRN+X{Y0n|u0(i`Jt<{5#$G@2ew*Jq{-;J!`k^>$DMitPFh(uKx~p zeS&YbGq=o{zcr9%-&;Fvkq&RnTmrmlmlEPlzm_0xgnbg0_KQ-N98F#y>>8i_tagnS z{|j~SA#G~#mA-u3vuC1G!Z0|8GHsqKgSHGPCs$QUmKwl08@=?-jZLl{q-wih~uH;XQ zLALj+zGvRLd%`CGNxYM}fi=@3kGN%s_XFUg!hLOcd2w3qk(2x`W;;J_CJ6y|QK8Z% z=PIkuWE4HCL}LQ_>Q;}$1o)!QCW{p(M4 zSwH*ZU|srCo$Ggh`bqzL)2{aiZ-n{&;Z1$}{^E^X??2uMOZ$^I#=iZ_8*9G5`BT@^ z*3$mxjWypN{i(y=O8cib{d|A*rd{v95eW1B*&AWs{+-UedICDyeJ(r77xIgm^w*Nz zfOzr$NYww^DJ$dK-*O{%wC(6QU|Z%!T#2mgld&~A{docG$v)IvvT-WI*$Aigec!OB zY@vH`13>dg+AZhw{n9(%J!cV&15sqYeu9?`>YcmwjF9^i=2>OE=j2p>qn&G7O9T7G zMaOA{C-5{T_-5LBp>%MH*6O5)J?qVjo--isW2v7RZ#aOf?LC6$;f>5Uq!F*U!k+4@ zEikuXTcss^jZ6fb(DCjt{Wx?Lh^xAc68=oaE!t z_PH-M2P+tw`&j%?HzMzvLD@Qf+l)c?;1*erym8c)Z;xe7LbIYO8Z<`x{A({%_qBhW zpA}j;KVSFo{UQ@dy^|jLeNMl3=PhG&t)qM^W4aTq*T1_!zFBFXdbxe3>XTRNNVZsd zv=T9Y>NUrlLN>SAw6ql2CadH4LmmQdv2xGvSviR^bbYT?lv4p8H7`fkpzfNMzIjML zd&s@1U1CVVqTflwq9;kiF2SUIHJ!pPTV6h^&vqmZ_&ZSGR2jjAUtzL)_&KwGXG*BEyB3yJpXopaP!u$~4; z8M{An6V!sx)0lc+V7psh8|2&4nmgQc4rd?h`E9i>NNLOb@!!Vj=BH;fN`9Z7?I`40 z8lsSDX^BGGB2Cl=xX<$_YZ&Wn%XgGQwz$c6Bl-h7P|ry$(aF!GOLlL+Emw2?m%UC) zo=3^iT$eA;NgTF!);}eUmMGgTQqR@$Ml$}l{Ci@tUGlhFJRzj}v^Cu;=@#F6>RoPe z_x8lg7tdb#`s5Q;yKkPf{CxC@q9tFkrsCnVCsmv8o>c6fa8gR=n_h|S_h_>Z$oU6) z|DI>}aI-J@X{?tcIu>fYKCf7AAN2DjKlk;HhU2h!yKEk4&W*(rO&UHP<3vqkc=l?Y zFQzqsSO;QDI0@o(ncJ)VQt+Q<5k6&KL|JK1+aGbrd?Cl4yGq%mMaXuZLl8BRHTxzO zZ!ISw4z;$AqOjKOr#QsizKTa#vcIBG=k{3?N^QSIAtn1R3L)8laR{^zqY!HQF$yWS zFQYK#wqq1ZZd=A-=#6gJp0OCrj)+3ZZPzG_W!p9mZEpKU;UHTRQn24Pjzj-Bdgkg} zgdRVg*7UJwNGKhxC4tx&B5bttET^ePsd_i zAuG@_d%69JTOe3p@QV3T&8rfq2E@$aW(da6Q+F|~8QeGU$fnOJB0p3$x7J#*a5(qJ z<~rL#(`S#K+pu}Yu%&Nvs=ax_X*Nomuyq&D^B4QzjRKB!++`=5r>@48F3i<&LQJapFXEV z_GS4T6Ny-#bK=m}&p~mRV{Fj*iD!O}iW}P;mFKKDyz(3tg|y|IW~ ztfM1cwu$~)tqp&~EwwnjQPVk$KDzRTsXwA$z7A)Lt#6(Vsj`hayOKUz#fi$xAqv}w z8-t|HHZBi`2yC$@r+Td3vYV(!SKm(gSPNRF6oI#qj}ZAqD} zUuF?0vL)p*qUJq6ewj<`Iop)c?%ikqOeOZ2t!IW3X|i3e=i<$y{M=^G8)DztdgeWm zCfhvM-kU3=UDuO|LGsF|B5&Awo}WmQ?OZqKr=}&Jx4&%7@x9JZ939)T-i`PC*Y$9` z5ti%a_@j8AFF6Bj_nj+w3lp(LSj()&c=X{drcWit{%fo{+V&z6W48M^&40raJZdE% zC&Ka?>R~E%rcAso~}U4b9nYyZ5H=6y|cC#-e)mv)$M7Zm_<; zhCK{5RKJ{R@9GL^8S#EJ(7SB9`E0*owWarzPoSZX_E#O(X8zZqNt1-BPW^n!v;0k` zQZKOOZpA;dZIRhumQKCMYW!>&B{_muU(O^N2iv!&){gr2P@@zthPP)t!JC4&XyC)? z`5G1{PQ|E|+eZWae%JYJ%6jbOpY6UZVv6vayc7PT+GaZkv43j5X&?8HkNIMf9`Nv8 zXCJTw_sI{=wyy-a=gn-*B-i*e&wkjYTGkKGcI}+E@4%oyj}U&}-iIwCrB$D=)cYnL zQPr zn+q>Qt6YhG=vxH zKmXP+F5IxN+)25Yn?ja((LJ9xge($LQuw&KSJ zsL$y~V^374*hZsoWUF;xScr9Kv^uf{t_$w{i8vU?%JK=T4D3H~HIwH^I z$1X{WU^JF-+{^6R`hiHBO+(}6zGb~XmwCl(*!CKHZLwYP zJ-oC1v!O3)Vt266xRdYID{r9!4XVBntQiZXpQd=OWlZNX9#s879IC%3paP2%d$5HH z=K0d3i?DB>o7U~Q^x;Ow=-akcBIl8|YP=CO}{CT9xe-doJHYze6L~=2~C! zweUqL@I_0TU4z>+m?w?2_4n#R3VrBVA;)O^xtbNOzAr@)j3?%qUe!$~n z(HGEyuwAU41jLVU9mM1wb)dLws
  • KK=)ku6$;A14IgeX*3Dzy^pM6lcpKNWs z&ugQD>?I2Y+vyvA$#=3Y@lv1V{E;G(y^@A|#ui@UglpUPaVo`5fcJ?lx&d^M+XWpC zJ=OPeJx|PfRz8-VVJ+MB3-puS?t5vF*_Xl@+}`oo3to!k!mox%sm(UQT}otx_OC|X z#zbCp?>Zn2>xm6?Ea%{tq7z`hTF&AxWu7g2G_r&s|NYzq1*}EOdVVdM2|IFsFYLZa z%*1Q)d7-+j&07UZSS|gdfz1Q&G34W!^a;1CdZit-kVR{w0H30jhCL@uBS{v!O(}q@ zCq|CElUW8e3typCZP5Yb$3Z(=dV%#1zYnZp+gTXBxs@?ldSkIr<~jS8>_erb>(93O zZgwwCx)D8ruf#UT@KD%%a@QgumbpP?_bZVxoOb)=15_;b1HQNH+1H|zk;m~&=vX{p zH$nq;t8Mjz%FAi(!fM4$>VD4NNM9`8Mch)+TdLT(EOVS^{q^p8S~mg~FlNdkw@OKC zXj?QJ=)fMg`JF%omE`;98Cg5FbD+LpZ{sQM6`$9aO1Y89hm$V)I2I1JNdF?}fq(qo)4RuwpTOTg>-I z*&8gfW!dTOMB7-RvxpxchsL6BK*Q#Z1_jMzK(3)>pGCHn?cNnA*dqAwzghNr*e#e7 z=DzgrMx=zhM(Xc>F8dq2jFub+yiIZ<=VnX4)T_}?0hWiwUcmS8X;3Gf~Y&M+2DfGj~|pIBBfe3+P9iys2Hx$3=EFSbS6K?@oIFVk71tjmS10oM@~Pi&G73Rg0eozCid_K*c7z$Z4_dKGw+Iy=Z2P1aTaT z$5*2_wq5LAw7i~0un$2m=+0`fR?s74Mr-W_%Q*{_5KZb|QK}g%-`hMk@HyEe78DTm zgucY@E&Cz9hlQZ6CFY?$fyczA9YCkADh4Buq<|N`Y@RfgV`X0VnJyBSC zfg2YVs~L8`#e0dkJI+^#0a`3Xt&555L~sRq&zkp?D8h?qu|#k#v3TF!OmSA|^ZuEB zEBeWz74&=9@$l`!r;q5bwLjJz8|@YBd7Qi~y}@l1%N)O*^fxRa8&~l1DY64>qrq-u zvBuuXO~#K6{6rP5JJad+y{tK#y#WfafcmB4Cy^`537*xOk7czWZv?qK*yAm`D`?(} zjQP@p8f85IVavdhHmqv2`?(*0va`eXWEy3%8Bx&s~#J0+MDh?Ysj1W zqpXfc_dbKFZ%^UP@~McV5hL&3;Vz@Uv@+(6^BZbJ`1V?3oZd8XW_Xa^YDB}AIwMuS zRAC9>Oo}`wf67?D@cDRC#;sS46=^P!H+_F;R-YVRsrTI7lc&#@F1#H6JkZmhZ%^L{ zKl3<>H+AGy`O++_6ZK}{Y^gp(UszmKy7jt5QT3fzw6p54S zCPkuXHKRC$vhGqO5P|(W zrMgs+2*_K22Q3am>srOphqo6sWg^Fr*Tsq(NnJN94omB5#nIQgTXFPZPs2mV9NQ=) zL`cfIU6EL+u2&oi^R(j-^15JgDDV|(Es8jdyslUzie_!a=`h4Ukud?U1iVVINXX)0 z-LgooAwqx*9c10b;pBDC;!x&w(c*AYR3MUPdh;P}6shi7B!=e4M@m=ZN}Shqi{cYd zG~X@~0XHq;=r7fci$h~wxi|!koW{}5>(a%cux?!(LaJ*Qg~7UaarE=LcyTCNH!qUD z*42wcfQ1F?Cr*#xB#M}iW@E&S1UoqLGI3ug^6dUxsy!jsGLmQWx_?nTo7V-5LrHZ5 z<1koPFb<)vI~a$B>?f`L6ODlkCuDj>;-tEUaVUtdr@Du6IIN2pjlld<%^ZzHgPgIh zVieAD-NiVxvMysBl6qSrJ@|T)Hf{uZ4-|I5a^1&RWUUJsj{%zid6V%-$dVX=!@84k zBjW9~D1=nEG7=%LYZ-~7buZ%(w2DX^{dwKYI2^t}8il|+0de$MmopAwUbiz6NAtz< zx}K3}50W`m*8PmbgAY9V+=;rOQ6pHcD;kGZ)*X#QVqMZm1l&eP-O^|b)-{d9iS+ZP zI{=Zq(z>ZpJW6#{BN3o0aSJ~RBh_V%Lx_$IBQ`Xz>l!x})_sjaz?le{4tZVJNGz=z z8;QWWvXL06?rbDVtal7Ek9+W1w>EMdsjh7tiq_JK^zx;;xN)P@`OrTV^Hxftd`bHwK?r$Up>jFn%T+5CXDO;$tkm?G@jY8|QMADD6smr>> zQRA4`HI76n>mJ8pY3+hI`eohZI3(C8T6H4|L#rj@Mt^h^Uy)K>=ExD@#zk!0E7f(5 z8%K0LajFX)HwM;?jzmCZn>hM)-RVfI>(Cn)sLsr~)p4W1y{){ibu1F=UdN)aE_NhJ zY}}7^wc|#R*WHdnL0#@R+M1UfiJ*18qeg)GTyeCuE_fXMR5v^lBQ_gCt8}DV5|Q(v z)l*{;Vl&WL*F0{%%ev>$NXvE6<4{>QJrV&?i#RD`-Swyuq`K^J2&ry+BnI-5v5RY! z*2o#ry68tmFm(*qM&Yl9R0kmeH=PP&`P!X5>kA-@6cTJkuA>^Lmt#>lA3Q=Y76A5ydgw zuY>icFZITu@ZE|y1bx>cihlX-MI4UasIK3|h)3kR8F5JZu0|YvtjE|40KUr+HHNi! zJ7V#o@9>oGe#DK2?}Ef3Xe|PLHzW=ve^(?9h3}3;A@E(2IQo3IBnlzDYZ8fo6H=U= z!go=kMzHVQlt|3AcU9sr*WO);MBMi-OC)Zp(6&^4Jr;TSU6(k_ZSTIs;io%cV|lF+ z8O-!(-1)7g(dg^%&cw|z>gq1POB08QXjuMkO(fFVYZZ~0``*2Y!`<>OP8|CByE&1V zPrs`ZhYrcQ+`SsCqu@i@_byM|c!3Q4xb|*OBy!5dT7TCk5`VlFf&UIw3&!q2iVEbh zxclA}iksK1Q^e-Be8=~a*g^-w zhUtG56!}1~?-8@@ed#IDI2KLuXrgpC-QYxw3bhUTdxTd&9_4PJ<=xm5=>w~UT3&@d znO>gh9Sfolct0E!1^YPyD!AQnCzKF1>8Eri{XnfwP_eujem317Ii2WO-nB)v9+eR+ z@6eu0Uy$R6cUWyNL6?+{r`Hyr$tvePGRrK2I^N!e*65$X3$fgeWu&aL*ejWMNAhup zo#bYR`>VZl@CpGW(?a1|EZ|$AVv$JL(csO4j0nPs@ewyUU(ifb^a32%a2y;)Ry*xRVb&WL+8w$VgA6v_L zE%JR^o&clHv*~mf7lTb0JVC~dW`4qq!^uyeQ7GjJH4Z28?bP`RH);&^2{;O=JR!&7 zz+y=!=qQ~0gdK%Zo}1%zO^LL8LXRA0I>ASwlqdW+oVf=e3MY98;t+BVLKI4}O_PTq z5^GfL)6zo`HKyEy5s5MPaKs_aJs@#7xrZbQW$rZ4|fL z&OJPFBb$4GqHxwdL~)4fL5iYJ4^tfd+5;7bwdA3SMJqj6BhW^y+uQ>dHKybti$b6W zEslQV9mL!N7d3*?Ll=cpd+_3rN)KNY(iRV(-ShXm-TdcNe4&U^!;9%rm10i)cazuG z^Ud~>K4J&R9Pqh`R#S(4s8Px&4CL$JeJzg)d1e3S)zTy87q2cnN^Ar2W4HUt{^QQn z#l#=v0ejq_E)g`30&(n^8MvQf$>0v=82A7Exs>w0{AuG=7`)vPF}r#n8oftE(dP~2 za(>}AhlcRyOdb2N?hIw$fdJhD-e`2m8)4s`!>mHY#xA5EkO5@09_>CAZho@+TK4|hB zB6KA`_`W?3jIXiVwSS3S1v9Mm7khiJbr1DMf6wTlC9r0-D!^G2=e07eW`Oo!!!m;g z;vUi(%};eX5Nm6nfwaf8BG-FJD}{aQf|AYkft_T_zvp*sPd@M$8d_t7;5B3pvxW0R z4^{iv*}oQa@&cJynwPb2bS;$G8@7Jq%PG$7fmUfDCAK}|Two8a}&PF4)@X3$kOXG&tO!-S*<1Uf5jF)Q@7UeUsIiYK=x+JQWpx%|T zrQejQU)wz4YzTjR-)*B)$&Nay z*(jtf>(IXbO089%39O?l>Cev$5!Cfe;Z1q@%;Akvo=Lpvm*);|`sKOAn|^t2@u&aG z^_}h07qm0#o#$xxnVe|w>l689^KAX*jr!e=J1{k~hc{}RF9~lP!k_Y1?9uBcI~4EZ z(UZ`2I-YZ=#|wHT_i0W=em~!RERh*VG2Up{w;ni4Z2vA6*?jCD$!HM2XrZE&HQ+Ob z{~h;+dinyt4(9op`w6TSq*lmqu+hVst&dY59g)AG|=m?1lUw=GQ`l{A3IJ?RvVu zc9BVwh2oj??Lv6l=0z@NiTzV*;I7uD+J1&wr#R=Hog!}kYlN;>3OHInxgy1$ndFK^ zZ!n^E|5(X;{+VqpbzYVq$4w-*bEzDImP>6sVkyzol@N4)%EB>6&Z1eryy6RrCP%p zS8?J|4TN5v2`AFIrA5$HagN5dAQLH38{a$vC z6SV=`eRAz>&#m=gP3}kGQ(p6RF%Ela-Hbyo*VQQGdfkme?pc@IPhiiyGkJEn29{*b z>Zc&>bFmcoO0O5qE7_U)YAR9FuybrW~Wx` zerx}FB$##xGFzZep;3D1P`k6Ob7ZZfBW27cBH=0}9vX&}a~{~|msHd4=|}S~!hGROmT!6bV*7{5%X=er7~YA8M7{fV z{|KpmH$@AyGP9lPAEke&|M8E>Kkean`G2Qf5>btZ-QVTEhz@DBK)l;+@9}pZX6R4S zWB8Y`C%3!?PU+xmvHiPH(f%R>-QLey-mN~)9+`}{Zz-2^X^++>M4hjOXw-VOL?b@6 znxgU7R$DY~xf-MK>eU*J*s_|rJ8AW(mM^K@*s^9K>My)=&~neDzRtVpROGRDz_P=B zgsdaG23Fr1-i1Pzy`7$B?mn0Jl1E%A&*8N9ZEiVQcba-NSSj|%Z2aC;u7>C}KVL1; zX!F$+kA}!=T5ZvI^VJxQHeapLX!F$^jaIMrXv7w6z`d`oSlB{^`yX3)OObVj{~}M^ zu7UFyk}ox%iXz%Syyk?6v%QCP{8iSbhSu|d) z@1hZtK8#1I^<^~XQ~ER-zt*?Wi0k?|8ae6fXrx-7Mz{8%*se9bj;oa(Vd(67iA{LG+&9I_9JR(XDy|P;<|B(5>>HfLAYzTW7F&)ed z^qie~E#+2Twn#D0Fe9v%ls#Nac=XCHS3opgy+Wc9%M~;RPghtp*8UY3jb5+NXvCHk z?6-0m&14%lZX&SVH(u_ukTiJCa8f>&eM0XF@D7x|Rg9O4Kb~|l>PtUNzsHV(-?zP5 z%%1hV?ZU77Uoh!;qnN0zZxr)8>}a>bTkLO$>ANkF8({H5Grr-G0zM0~`;ZBZImGvd}{+)+%y06E%Z2csX^`l7Er@P;a zOyM;(?7)4yE;;ebldYe(RFJQ8so4IaH)Vo!_Q?kS4Zrb}YSJvnffaKix*hS3%jw2$ zkL5z@Y)ce9@#CFZ_-`W+eiF+Er(~=s5BmG&iE9|=#vg@e`qIQUFBaz*oIvJ1ZY13a z|F?TX5`|ltv7YZ>`M2bN5)qmeRS9}}t2;mPa9ey-2^kg7P=Ti{CdcoQWrf~@-|aQT zNG06|fhDp}Lihz+oW0Qh9?s#V8pI;;hIIREgX5?;gzVow-XRYccftGlhBGAQ9aYVa zr8n)dRq_#(?t~6}h>!BSz7g@{+r!Ue;ls6{VKRe z`si$tXYNQqz#8a(w~aeRtp~k4La<=dfXc& z>3M(3X&>;WJ>Mt1sn`36H$uJ7_#>?CLlH>zKIM&)?>pYKpW5gA@ydNL0%2*N^v2n@ zkFK2@)CUjG+DE%DCN+i2rjzqNStsxOPbEqAy>|z$4R5;n+VQ46Ut8YP zbD2%+#~%e6P}i3??RuqnBkWrRv?-Q#z|j;_Kz!l1*aGzhTkfa)GuwUK=D(rN`Q=2M zH18EYRCt=OPogTowZxwgdHP&_!TRj8JjuyF?*1mX_y6|}QK>lXe6&98bUy?-gaN6ytg zep8>26Te9#Ie~UAjjd4^L>jIoCkhruvVynF24@A@UA_Ez6aJmdZp-(4wdP9bAVLW{ zyp_hie1oiMy_4TVVea*4KOG@wQ1e|-KmBBicfguT=Yt-a8VSPuTuN-JFJ@6&Uac~+ zWi}~=r=tDzJca1bmM`B)}BV`)Z#f8NrUVnqN=abV`ayV+pi}U zeyF^}j&uu#WeiE<^yv#qM~nP&^>Y_#^P&-vZN2J_|GhskHygG?@46-qdrRH@QlwrZ z*YFm>Qt`y7N99S~rs>EL`(8mxY+Iy=-|*as-e8W~Bj}{WHn*4X`+6>xCCuuhAU@l& zA6q1FTpwz#sMb$6BaqpSykwn~p1ED;K+=nmc(sH_^S6adev)7EIdX1GEZ>ObP?A~GTF~@Qme{bs$K z?fDAy8uJoL)Qpx`rQVJj)3RQVLo4-u6w(&C7wn_>2fKCnm-JFVN z-w<;_^bpa$`G{@1w{pIqkgQY-7IAQ#Ir zQuL?hU2Ognx!i}1uWV!Hn)*-Pg}E}gW7#}+oH0^UYu9Uq)YLgcQI--BYwc!TpQLPA zlYX8p_J-|#dxrDrt>K?We`>x*^{AGIQOG^=;x8wqeB}F}Hz!K0`92wqMu}VA1ESI6 z<*ejxi+uUXRoRQ$OX9`pWh8Hnwhv|G*!KHP<9Hf#3aq)BDH%9xk;f~~U z>GzG2*ezO*ma@tms~TU?AZzCm@4J@y_{rq8NeGBdB2Ix75sL(A29oz!$a4?Ake1K- zqh3!`eb!@5_j0OogvhP$EiC_aU-z9FG|E@wrxDoq7R|tK{LVzASXyUE%5pox%QAWz zbuQcEaObi=4tITRjO)WwYbUxV_1cO?Y*~ALYpz^t5i6|3h*?i1O1u=8h?Ln_&sm8& z-_gCKv_=^-Yx^wnxUn5-=6;T+eZBdP9DiiuVDD|wOx#Ujb>A8?_)x(gweKzWAGLNq z{TBTvFT7iUn;3RV@Ls?}+20qe_BSTZY7RH%2J0!Vf(a#HKMKyUeKBj)WlJ)$AH`aY zWjm5(Ja*YAB|erbZD@ARaQ$gD2+JC(S9C0=TJbErDzW^yrTc-tCE~$-eT&1EeXn<# zUx+5azSZ+m4|lA*@<+;Xdn1}3wU2ZsjMkayq}$sAi{vD9UD;|0sC|jM7I-&%90&UQ zVBta9p<^||RvgP%O6wA|dThCtZ+!j5Ng{DBr3|7f8Ke=_nHUYS-D;%{$nk7eR!yB05d_td*`+258G z%y0aBOr%))6!O)!WpzU%N7jVO`=hIW9O9C87>B;BJ;vcv1JJ@(D`U)D6ML?G#*Mw! zPUEnbv{y8Gt=*y#TeKhT7uZhF0WIHB&#_fsy2aa*D4k95aqB*Std=YJL1f&%gR6(< zD|vWVOKtfglmJrWslp7Bx0 zJrdzB38e(2-Orm3HInX*(Kz$nGa7B{?i$^XsP$XUV!k^^k15~1i$t`-B{nqXp0_TH~ zy0{UQ5-<)i#=BB;lp{>B#W-%pd2Z@)m9d*Kvp!!6J>C_w6|SGoCQORlHhf;mhfCN61pp0_qRo^lts_EcUiO-)yf)_6oDKAP|L9@yps_3LdM1O>#r z(%CkiXYH64E|n*aUTwPNL_uFZPZo^#*rG?I;%O9gb3l&w3~8pT{jJXgw{oYgEEo&Bx7=vEnS z(SGa&wII)(Jv2y*o+Pa$f3*8V&ibEBuh3tMm1V27K-&6ZIP7@!03Cn0JBF9AnFWFu zjbT}!ZZwelGLg$sUC0jn=HDk;zY+Ct+%Lvaj*@&%d(QkMfs?P7Kk#Yna=$wiGWNr9 z*xWD2A?CVL_tV2*=lkur9xU(2P z^4qbvL$Qt8BS@d^9LIAbUA9XneXkWq z47Tw8pstw44B}A6$@o6s{HXD%Co)a~)vEuQ`{`@T^t}wQB zY$0wm@fu1I^8F-I8oVtX&I#K%3x^17w^%mxoLovQYRkXpX_qa06nFCa?sM^tz83%f z$#ka$b7gt&3ONK8Un_3xU_|{MGsMU7f~M^j(Ocopnwt@w)p*|CUBePfP01wfFMFJ(UvgiE%Bv3po=r3S=4IOttaO#7Y1j#&sR}6(PMxgDnbUxqtAlre;!Q#i(vX+2Gjq2F#WHB z>3{+}P!blcx&^uB$(m+`7@@!=#qgUI}il5TjV zY#u+#Hz|2YIB9vvc%tPk#!6 zT=V;L3w9Z9OCb~JX39N3mA|pX_&5@_Nw!K2;We82G~z|M<h*}S zM&R6u{J)xTbic+pBam=*1n(Z@OoX+8dHhLM+@)CJ$i%|yM5oe|d-)ev2j~IiHDc_j z<_(Rbx7<s z#GI8nhB*(aeKh+gO7|%CXq|)T7xpUXwzFxSLXxmAV!di^A@XX@M2=PS!TaE^lTh0h zc(4JXBj9<$o5;`+(7zFw^wS_;2%fVD48}~kE>Sf>xvdrgBdY!!#VZR~E6H|)_XvEB znZbLhsUI^>w5mQ6DO5fo*8NUKhM3VO(*Cgf+wRXnHo<;$cvaAY{}%ke?*5nTqv(_W zY_9hLajnDm;PZhzz9n-W_Vb6?yDDWA{Xbqvz931MTvuSR-Th8m(EJ(8ubU`N)Ai zUgG9`7%z7<{z&OVeO%m9Qmb*S?9H)9N?wgTTJGjO7%g=*`be33KIuE0P0}6*{^OO9 z5%%>G{vmYFQa?sZ!df3j$^$+AfIW7wBDWfVDxIM z(MPT``2DVBx4##8A1!eSd91w6amPzr!XGbdbL_E__9zx5yny2+Eg_HR_#AsA$8W@j ze<>%(Q{nnpk3o$*dX>$4FnZQ&^s(~}KOtimqgR{8QWE+|-M)l9Uh?MLA1`|~{z&P& z7s*4s6mhe3mID{Y_JTFSu@cmK9_gXi+jJU(F8ya??W%n}8Mzjp>c{AHxvxK?*XUDy z8M{(65{>MMb6K}HvT`2|aQe-8GJ4ONV;^dj945XpANwI!!g!BMo&}NT*^SoNxM_lu z*~PSLGTU#g9Ip3ayry36%Xryb@5fjTo%%6eGv?TblE_18#(eCDl)gi0#z(Q8@nRja zXvcaGR5RE3htWmzejG*;>OPFuLE4AIWFh4eG4H`4Wg_+DkWw-4#i8Wla2n!}`-83t z)zBj|A@lVxet*DyEM#iy?fmhhU&0?L_irW-E3C0`*|)fxQ)Y0h^hJ8MaQgrr0gO~L zf|8dYCYPJp#<N~G*okl##J2yP{5UQ)z}EjMEP>z$N0(Rd>*acBRHRoq2RFO3~d zl_0gw#Hs1-5BtG3^#`$JR2)4Na(UzRQ2j_ZVh`1Wbn5~u^l-h%Z`6+O#rlog@qN+V z`nl*YttK+62lb8JQOG^o)L)BIOR*iRGl|MJN<)%9Tg8DF(|fjwz*doP0vm-?)_xg> z8>!96Pqxa}xSfPJY!$DrpWvaeAx0ZQpUt46Fy`XHfK1UA?(&F06qsz&Ef@%)Oa(@~x0o;qkzi~O4 z@5B0ZAQo=Sj~dtK1F^msi1p<_tltmB`f4B+UaCWO1@so;m?PI)i9K@tmDnTKV~IU- zeU{iG*K3JAa{ZRrBiD0@?Y+Kn4<5cVy_JqNj5oo3JlQ2}RCkM_jr**Kry#O{N>Yex zXia4=#QAGX@cX}$UuU}`u{H4Cy;?OMHcg2HAHut7RFNq6Qx!A3O8<$V=`Xzh5jU=c z_{D_yvHbT_`5A-BF^)vTS$+gRb!^C-7=wnj5Iwd_iIk3&P9lz!PGXLc&N> zX>9(|39DCk7tW!BX#G%b^4QZW6 zKcB#SWOFR7UKWKl_jbhbFX6$_3hVkPLAL|W!+97ng|N6+iL7kM@MalD_S z9(EqK^d)*ua_b;+7Unpk`Mw3+pVN$O9?2>GbMY>HEI)seis0~`p{51=X`f0CGPK-L zc3=8P{tngWH6rTaUF+`&vb4wa1Y?G;Lpk%g@IYTUgH(>-1V*}yeEJs@CPXtW`bHMI+bQx6z0#>zP@(SY`TZm}b(z+SK=$kXLgz<<;0;w*vyCTlE=q z*+-(;Ze_>(S}g9jf_5$c57|g(A71|Pdd0XE7unI)SD=3mZ~MmU=wFI#>w9x=Mdmb` zj@PE1PZD=w+Oc1VwP(-#FLyZpSt|taP7d~cM9QtX`}LezDYobvX7E$*aIdx9bRktz zBM0@&486j4CQ)<2&chndM-oWae~w7GXRi3Izdj*l-sfPubsoKU(x>~Q^E?W-mV;5q zJ#ykNACw2SG2AD0w$v8O+N0FR-ZR2pu#J}iBC&0md%rm+I@^7FwpxxJx2TE6*89Xq zx@`T=ccjcV<}^p@Y@<$5q|dfoi*Ym0e3nRr_}M>}Q&fJ})k`~ZKG?=-B_gohqK#-x zA~OhYTECV&wJqOqzh|rW(2-r#ZzH0T7EaRr6i%}WQ^>a>Jop&9Os_d9srJqb^2X^C?_Xs(6@8N)?@zafC!7@P)Uvz0>tkGogUrP2E;$V5^U|nY^YPa<5OTX4`?MJ^RZ%2ABlGL-> z^j?r%zt8EQ|1T!JV%P9B!hb2<>@Luc& z^Y2HF8L!&FIN>9|%ceo{+4HxB{mvvjL!|Q!D&ng)h~Cr7QEW%H%>O=A(CyvFYr0rj zZ^TxBb@-!6m)5jF@lG`KRT$l4p^lR!?8g18(Uz&RB!Ovk+k0V?3G2xmmK)BxsAqt@ zO^sn&_D`*5gu0cu18O_l-%n>BcvJA2s;yV>&VUkT26;4kBlkg0|1c5*)ci`IL_UNFE-cr%){i8ZIqV&vmS{uf(oG%%iqC{J3L}(9|;NYZun^{p`dg+O+m;cH=(Eu`?QH zyKzi)E{s(kTdXhkf>s#s;rGxWE#}^wPJU>315g_lFM*LdYPsHu#qW>u)ew}g2cUc^ zH@V|E_GJ*pg`Di*14ASNXFZJOlily9|Hh51N1pt0P)qseI>=8CDMk*TNQy|?-vT5W z+Z>f=mT{PEXPU$GD?jrbst0p>YFsb!6Nma;#>irhyL6^H38(A4 zG~{QxLvdh!<{Q_G{7g6wb>EqBTrUo#1M4&AxG_IyvJ+c-Ok2yB!8JZhGn z_UA?+&m+=tDEXR>(~f1PX`LTGW`t2EcSgtC@jOrFL}6Q^MV!Qm#kRzZ z-F<2_<{L#=C?(p6-LA%r-S1MiC9;j@S>JJMe|?u#{hDXr#`R*{j<}TF2p;5lq2tDh zT-1~|8j0)elc3gNyOwVmH>NnB1INS`vlQcFAOc$;jXKK{lP!E0$ZLI&ybj#;)fYSC z=C90+j6&MtDPXjo_VSi*7@cH`NF;9kYTap#%=ggIyOviXahCgwXY&2&)E`Sm7j7Qm zjj8_8ppGTDVxgfnDXO*Dcv5?*dNV^>=9RL=)#a6s>mw(WUiMCTeJvKq}##X$36{O}?{_eD!tE1pHt z@QMjMkvPrid#KQg7xMdly^5-opo(Z0-rGW58|;;F^_g1n>>PuJzm*?Inyqd|>8C*7 zdUn^AZ@8yZZ`#g`ke-JR5(C+3=?QHEw(Jq~I;wr$iX`<*VojAd8Xknp@Pt|9yr%A@ zx0fjU*gQ4m2rH;Y@z?W97@m5+rmtGYYvtGU;hWW(wmx!MpCfv83;Wu$vZ-sh@_J|x zpXZjev#9pGoqWnz%MWsU$@X&15(aE}%=_yPdM*Y<_h0Onz9Z@3xSvO$oICP&S<`!+ zsh>urJmx#WIP`L-7>PXJNyZ`X+iAx2VtpqX*@vZ_Y8?LZPBsp|-07l`>zyzfu|?00VmwozJ!^x2jkIWO*#KHCu*gjj6j^#zgGF8eQ{cnTj8qVd>|)h`=~6tnk`b8Po} ztK!BtURKt;U2$X7y<^#96pdP*z2fjnuS^uumVJUJs2lOq-F10apiz)#tS5`!Q=sCf zS223&z^`*AKl*yjTlt-OAN&10x(e(gJf8T3XbWHd=pSP#K^96S=OK-8-qe~4`|&Wc zzUa*s>E}s~tAOW5o6nL|IM#mIWB+O!vvTH*EOH7Xan%0jRS-C63;f&*9xvNBtY0C47}pEKXus@zx#Ighut^3zew97 z;rEmE{iDoR|7aeB7nb*=usTt-+7k6l_ad*SoFqiSF-OSb?zMP8^FsK8msViKUrP_; z^DKHW`)yr}@$SP~wZ63hz41)Ag!3Ts5b%1MO>=5)W75FbSK>|>b8WvL%fj&-2RDeU znRC!c8l`(S$vm?j`ZNk;9a%TleE@VWWp_HCR<~vx;>9>rc0esZzV(1TT1Q}6smoYE zdLz`ahKb{kg0qK}s@{A=&dz?hv%YcwK5fOa$as!pmvQXg`qm)y3*PX3BrvOIaD7y|mpe zC}#v~6(?I{W}z~&XHR(#)<*MQGz>Txy;`^7SXL^|p|6e7pl8|Si?Hx^elCvYa2uMH z&)33d^&8;4A1gcakv)y)H#WPzS0C)$qY=%m+J~|q(ZVm-c($k@qIe#j8qk4F$n%9| z<@$MjAVJUMWb;gHIPm#o+OP3e;Ql$Wo*w#8Et2r#hkXPzdi-0*(`g6mMPCcU*$1Zy zcqw25!#8W0Q&>o_b3JHaSI27y@R?ZZ7l6(Sv59U>y~;g>W8< z!tlstSf=Y4f7*ti{(4j})ckO(&9j7>@{N|WHu_E^H4@L37)flLY%8`cqP2{YqE5ZL zZB2JnJflV~;~RR;L{tZNGU1)I+2A$eJYtYWjy2G4%|3;Lj0aexK=(z_3L6h%v!EI% z;(N+FUlhbKG^+{u-8wTD@^`osp*0Krq)}rCn17Evs->CniOMWd#vG_lP1V*sIivK+ zA)9}r?mv9FHXr>mO~fui8&)-U97ai1vfy*`F}L~jM)!$% zuOlTe`2@#}cJ4DAhg+N~)>{L=Vl6 z(q}rZKe&RX#pEB2cvSWL;&u2TbC)<6xBYRc)d`Dt$@d=OZ4}HcXQIk)3 zENbac z+=I7Mgab8IkfWy7%UB+?^}u8NKZi_SO1DSZ@~zEmN|$W0}%cxv&}b?Bj@}tG9y|?brj;?WOS?; zqw)GUiT-}Nj|enmC5}Y?LB@zYsK*aE3EpE&douRY{N*HXq#q{ku^vLKF6z;n)W5K% zush+NCnC_uD?IMH*-FjG_LpL(;a>NTT_dJ#av~Yew%g&PgqlBo`L3uKeIa?$H>Qdl z>>KWJA`*^072X=0Lp48CGZ!_#e1tXX(G$BxhFhn|;Eu=`SD9p$70uqp?rF`jyAxZ& zi;j2iYJFiH@6S^&CT$UYZk=nBXtcSuiALME`r@&dZVyDG!f%e%I|ip*-O;G^>W@Zj z(GEQKrb-sdpA(bap#B_>s#)Blk3ZZ$MDCNN3Rj}{bMQKhuvJx8(q4~WW#vpoOZzWn zeL>5?vb3(OL}!$Da66*i_N-^~KhsvK#~Hm&`gR4(J$x2uQ^lLyU*Gp45%n-m=bEX4 z8)8~{78Z&p+IK#Tvw$pYt(jJ!e#`(p$2Ynv@)yo+0ylja#^BAF1Zpy_ESv z#;(2QGslD4_2AB<=rG053e#5(Cof2s{WW#=6nxhG9rnWA!~^^d5Rj8;sd_|=dIcXs zQ_+6oJr7$24vvk~utr9$&l)>Qj%%by$*$3SWu5f><&l(Ib`E~S*5ASnm@g z`-0eY{R-UZUceI&JKyPLw|IHp{~}u)Zx2;{fm)F*{J7-{XK|y>Ey4J?9cvLX^FdFH zo_Fv$S8Pk9^cq>@%xT>(eMv39T@tAabOy;$$SU9Bim*y+zUpgd+D|k z5X-2Q3bXXeN~OKIR^%LJ6>p~FC3;IQ>z%B>XGvQ6=k=|~D|m&Q)puftS;j_M*iUS= zv~eHxPJW(=On7h|9&@kN1qu5}dezVKm{p`yqt+-q3CIn#u2RzRUZ=+*yU*mN2k7Hw zChSw>gji;2nWFAJc)KRL-p1OPT6zHO(pPT;yEyJ!zWSi`@h&g+CfhrjYc!7~OBnK{ z+7KhsBgXoc0GC#+{Q|v3yb{2WXsSjEl_i$OW09xT({$!(@ zEE^wfth;JXD|W_P@ockuBf5unlAem=WEqi;?P#`fE!DS*99bKl=h%3H0nHv;9=lI& zokdIcb}S2*9%W2=4mgv2z@ky+l(hN?#y~GpoK*EATR2C{*u%;`-jSMzon4-!dnLYQ zmGH#O41C)i6xhS7Fz0d8x4y|>m6;Y>g*^4Uu|SH9q_WNJ7_WH78djR87_t{A!d!csJIKoQ?b`mNW97dGaaw7A<%1ppRbf2 z(vIUu*b>X%Y9j*MJSLY%I)zKkgQZ8|(kzY_eU7VCK%Z<$Hl@_KFP zN50B(Tew%|LGN60WpE?Wk|zKsYiP-n{eCgKELxhN|U_ z9-O~y`c!XPL9<^Pdx8@g>0orW_1P!m$nL=I@~q1O)6)QLxa}+2va-DPAMLx(4pds# zy?a@U6-+A{n&x7%Gj8QhR|^H&(QM&?c`Z`-R^%a4Be&!A%S*z-ojKeafjtJTXt|Y= zsOlO<9O3A=&b zcO5hTJ=h*&PJS+C@4~Wr5cwaaGx~erG5+m)oNZ@Kvc4VBXM8xZvLXg5S&W)jJu9~$ zdyMyuj20H5THE@bRsWek&6<}*K#D#4IxPfdaNF+Au_vU)6Dq13z{8>Mo6#eNSJqHl z!B*RYm~2aIN{in+WHnN02kTWKO4AlBA9}H1mP3g^NF# zv?j-ps3WEvwQg&)2;Eqt`0KYC!*89}v}4wNjWd@y`}$l$SU*2m*KUnAQm+xcwz<8= z)uWr(D}URXlxgL%-KI&rEzzfYv!rRp_oCKBO*XzCrKUSX57sr|_};W= z%2B%X>$&7Qp*{d|s5}u+OAx1v zJ2_cA3T4b;bXE*G2M=uvI(F*5ELS43t!4M;Uuf%Wyl&kjrYrJnJUSR!~x8ubfzabNq4Jtg&0 zF1$HHyf|$^1hyV?Kw5G8KM~k+H)gvwKT$g)`@xp|U|Y}Mp?cymzwz@*zOkL}8_~QY zD%e zTg$A)YQ(hC{7?Oq;hO(H$DL>Vs#xNEv|gb^vaRQ4bPp_RBW}*fAGWzyEQ&+KVw*6N z*K8bi>7y8h+#?Uvk?3=Z^z=}o&SHxfei1K3mfNLMJ}+q_%lJxYBX`oz-pA|9pz2HU zM^}nkvA}!e50caTqx0B3>X2~0+2XBeyykc#GIeFDWk!l@Yi;he(^}RNS10#7-!mPl z#&MP0O-PYip+<|*6H7SWnqwKmSC`ZnG1{fZiO>y1iCf7;u&ij(h_kU2NrD?r{$>C*pAdB z#AG{0ZxD@bI;})W1d-SRF;*uKiR~B-Ks2`bX=oI0N9Y)ik8N%Dj~eYbt;Vsk?a^jD zh2i}NoIRO?(?hGYiBNNDPIXLX5BbrKk8`mtyEGDVMwYvL(tm&39~B^-@CW9B^K#5soTC8e4VUT=!jly%?Iw>_Y7d?i9X zG{AW0zB;foEWN}5YrI95ktb}^JOaV9PSl)o?$fdMx%QY?DfV!Xy8>$$_0Zrfa96@j z^;6jmFrH6EyI#xhY}JsKD1YWkxo6z*r?@12ST*G?8*R@d^t20)!>o7dvDoDziC5%H{w<1RNL9L8&h@}-W4tdE+8Ho-*QEtVI!(Ov;#`Yk$g2p1}R#G%_ zZAC>Rwpa>&76L6Gw*EUZDYNCf!(RL(UABCMMKfRdE^#DU)Q-*(vBk(d_ixf=tM;|u z-buP_LEm$4CSA5XYs8I!Z;D1q6Y6o{#nCAGQM)#0fGsNA@bxT@yM(06HhL!{4%_nd zA2mO~iP+^h4z~Hb(NSYb?@GrZjMepg^=tkvb?oRO^*zSpdp@Ilu~mdfou607j+=>S zy{~uAA~k>g7IhrAa6;l+)p01%dkkL(;9J*mBZ)lGawW1w+;Z;8jvLpw-7G3TO0Kcp zaz>-)8>82#65d$Y@+JMpyuORq zMqq9B#yPM{&3l725mmFc6%C{6%c882K&x-IutUwINaHXk^0(0TdS;2^nGM~-!y5VR z6ft4u$H*~nMYfC=uOQushx7TQc`jvbV9$6he@2x>%PM;z8t74I;gpMvtn+ZGX-Bkh zZmvT>sYmN$U7?oC!|Wtzp@!UH+=T}3HTGti#a!dY>dEa$mRZYDK&MwfV!wYetrDym zeOKwtBY+u9uWzIp2n6;eLr^nSs&@1g)MG*zygny(C4@mUkinEmUan(axOL zhW$uZ?K}Bpq1f^*$HX|jS4LmCllHF%@{2t@mX%lUExq$!Q}@yAHQgjNp1knbt?Rx< z>bDxrcNbiy@3}7$p^h6`rLbk?vKL&zjEMBmAT8>o(|?};j&}b`GWaZ7X+Le`cJ*t# zk~Tb1`m!f$3_YK6$gm^holZmu9|ZAQ_HUnTvP;qbI4gOM;x^;{iC7bOY2*2HqCc9( z0n6NvXF1yGWYwI=UW!+rpG$qFBRL(smj6bNa}7Ceq_`XYUh2-m{)SIr+(?&^<3@@T z82n1FMTSsy3pVPw(XOLMj`)6BDbI(FbB-A`I-IBH66kheo=j&U z+}!6h4mI~ljYNIgCqJ$?%Ra5K_)9*o=pNKQv1r5=tw&!O@+`2U-iwCD`ShyG-%hOq zK4TZ=+1l>kJ(fO|G{MKH_%lb+jth@xU&g{dOi=BM&8<^KT{CbMZw7nZ`>uJV`BwVk zrzM9cjlnC*MT#wJh2Nw#gSzVQq4rQAHMYqrs3qO%MSmK@TNbt;;*{Vg8$_)2trzz! zv+QqGa=f%E+3u4g@13W;52c*_9C|dD!%;n|<#ZHsj~x5UFXfeOt@XV5K_s?I`_+6$ zJVXy-rIBN28!v@KV%s8peo{wtwo5s$5qGuXPWT)N+n5tIsk4nZKa)0F>=FC3Fum3# z5!oIuWI{(sMeJFaqhSj)XxB0eGLDPTVNYcP#-W#Igeasfnv7=!a0HdME+^mTmM>{r zv3(||$}`E`xE4BZ<#(7*%c)K8x+7b&|8#yj`E6fFU#_P->3oc_Iq??c{uBCs)LY3@ z>^u2CMYs3t|1{lqU1wK#h#q8x--xEh+{2&WFGcV<;f5Y^GCw6hBZY!A1k}G-L|1wBo;`%`_7Ujjumk)KUkBN zHG$P|D?ciG*yCPLQhhgB1bod@cPgB>k?({G`W8&zRYPieV!)rY@2uVuw-@#!TjXmW zD(LpE?Z*#Nt0$7!)!2e%;nf_+k2|2$XLTRTdQLg(t!#k>O1YrDxlAoEo?o_hwU2^>9Exm1fNJRmq>}NT@px%tx7)D0qi)RaU(+p zci4TXS!I?ZW&4x!EVoE7^*wFNKDp&OAQd0~))G~%Db#-kS)zF(b{JL$tg+rbwx&Dx z&3V&(5DW5|NcDT!SK*C7cAd?exHN*BY26@Zk!t8zJiJR7DMFV#0~%-XoQ;$d?>7_h z3H$bOZj<=+bjg3`*>l)Gu*+?abUT2iB3h*9~< zXl-Hp@u;@8y=mj_wkNkeJ?*tDDs}7=HzJP@GOKuz$+Q31O|_=w3s<^`vLeck^{I7| zeRURmv#?7&c|n?&(r;_jq@GLPn8!By7>Ay(J^6$ixq2J;*m*VGPEiuizM?vYx13;y zek-d6XKz3K#MkZMInosaD^%qWD@m>2c*MxPb$N^)F~xd4VCQ=j?c-~+`o#t79^=f8 zoGZ^cE~9~d@~V_!eRwG_-DB~KIM>fO~?a>jb!R=#TK6#M@HWneG5G_NQ0b9I(s{z5D1L~j!J8JBxdf99)~;kSC7VB_h*mn$(p}= zWFOLMul?cU`oTR-^HWlv>>7xxjBQ$N>Q5gz#<{;p@yk^{pm*Y8y#n0nCBYQHB2aQ8!yl5oicsbAGO(RE& zn&WyqabG;@&^=1K>v-Gl>3G&S?&R^Vaj11XY$P`0Wg{`?ceY0(Gu}29cj=bj*dCPi zIp#5&(S6C|c_XoV<9*}$nBsxsF!N|$9xpsh56XDrxL%}qV>D)b+=kv#wk`1*#woRu zP-(-oP-66mEvzBF62b}*_G}IR9G}uA^2;8oDJRdlH6~qqYU~tC|BIC(BGq`E^NrZX zTDt=|;PkR_cebK+%tnuzqqQ9J zC_J36mMbR4A&!tk#FOd@e11`E$_3kFK`X1^#c^=y?p_?4-dooqohPW$}>ROaHO=>Dx)DWY+5$^O$*9 zYIrvewTW@h`(AGHUP|9yO~2!3j9SyhaK!zyJ$p5VMriOZJoZOh??6H40z0imBQtIU9dj+aEAFX-i`;dAW9=)dK0w1k zzu0bQqqoQ{LJej}A}ZDQ&xei?6jA9fud`(xb>g1disyoXom1m=r_#d8vREK#hVEpx zZYK_7Z?nyoR(b-5!Bq=yQIX10-9}$Bg5Ck=bav1uOT`&v%AlSS>b0r9v2n|4MdHmQ zO0sno>ud4XVnMu^I1kRf7b&yMp{JF5Jjo`{C_WZyNfVJdyNwE}k79+X)rH;3`eFg8 zB|QlFv(1d!(Xp1xm1GlOr`nH#RoQFj-N=`yYmfH?`R1{OFVKTHbHWOMennQkk40cT zc^vPQ0(LNH*8My^6Zu2UfM;Dd+}p4$^EV$8DD~FC2PH_GZO_1*!)uGRt64FyJMKl# zTBHh8{Z5XMb8?9fZ$6)Jkc_UDT~n1O5A_4?|cq*Eg3#OgVo zN1sNr%>nHeNM`R#bva*3%SCUt*iVLuA6hNNwvsq%rG;1PuuDH}5moYWmjA_VoYN zy`IOHW7gOl!^c0kG!lCFbdR_A3-?ja?LLeDu*UH9!`1Yy-m{vboz0gv1p>51P=TY5#we=WRtGx^BX zCw3yU_uOT?%$?2Y9tm|+mOc``YJKVF)9;xxhd)I~<#J#Ce|gj5Ncw@*fF2#P7rHJ+ z_QFGY;M3D{6TG%~d#Nv;NbSLv^YsT=FIT&N$$$SKI^?4dvSI0YiSUDr;QMI|csELA z&zJty?$1L1cUC!Swj4d%UJDtKUe8+PYyC@>+9<@H zdGnty&JWx5c#=0?h{$$(ylGrdo{mS2?8{obYFr=6=u3W{9NCNIc-LWiupAFNbYIrv zWg~l5#?!`O*Kx2s7I4UZFm9%|0Y>)(8Lah<$8o(V<9Xwdx5fKL_DAD^;}DnPh2zlY z@x*bsOYz2$=;{l@O^o^dghTaV9ru5}qtjz+KJ&C!T0J{@`@ zVHNOX-|{^@S!_`+UgN2c65GPN;HYVV5l7W_upsy6)F)JERgI;OA8`-1eavdXBdU%F z_F=|u>w}#K;QSjI|1*(s&21Px`<5Qq$A2yQqDT5{_h-23deJ?`eL7{ZjGk@iQDn|+ z$MqTO{9eKsO^&g);rfjA-R{#S4tvS+soWuIL$;5XSz6e;?xswz?ULm)Zc7jBSMh(&};kJgravY%o&9nQM&RYC-%>g1MKMEhDjPe<>1tC%cwL1bZ|Axsa%g zk=gBdzez0TsB9^zY=0>di_!NrV8O!Au*ygu9RhLH0QjQW6ALe-2&Jkmx!Ng*l*L;iMqIYDI{bm~%O(O!^ zh&UB#vwgavN7VX-ghT)BtNa^{4X-(KP2<)zMp{-5jvLvCyi7_DTh!tJFIitIa&$P| z>B?Yb;%GF~Fk7!&9E+Y}rPsn^&m0xb9b3#{RE{Pw*s`|pdS&D|uF@7lO*+i$dgbK! zKGcNe~9A#vj=D?`U2tyhkY#H`;> zi9}1b+8E2dtnWC+b_Z%)<|d_|ZH|`qLD*}NwpwgvCdbBBE9+|wg5`?RaZ($7Qqn5X zthpRF4y`${T$4E#b=+BSUOl9)=^Q!EwVKaym?QZLj~`AHuvzxkjE)>HD-e}>ebl)s z-@oA3j?}PqWu!RHj?8?ePG@^O)x&_#Rdo|iu919ws?Kg4m+Lybaad)Y-YBFk(V3LD zi^>5fUC|)e`B>$M*F8*ae}y2>x)n;Gn9E#mr0Puib1FX?L+qt;CEp`EQ(w-2ra=!; zZAVu_R8Dd~E=Jn-;>e?n3l$!oO**ufWAMt73Vwep($y~&IFTSadTOfP37bP}HoX>l zsA*=YjkrV&e-|qey{P^2Ku_08SxN9VA)^u&houe}XkuNWhL%S8EwwuJ)$8Z78r4Gt zU5*|?&*H45dWo-{;9sE%EKW%liG+^`byt9(JUSP@5zx>JXdFw0V)&qLMdsm;PJWaq zJ=2P2k){jbP5z#CzlK3($nEZP`T5v1lZ>DE^orD$R%*0vuwYx}G~q7q-{1vauZe$g z#;+%_`~aU&pAoiS+^T+>8{7I};|1PCfwhEJ%$`j;5^ow=bT4Y^q2A!7P_}7Vu6avu zN_)OnqiwO*naf4Hy|%Kj))2|WJ8utN_B=6kT#zBWeS-Dd&!t4ui(91F!Zm(F?&ZFT z9y+AQ7M3o~QJRHikxWu#i+4G36I`<)eJN|rIP~1|+BbX37Ik~^k|?YsU%K8B#2#BD zz%Q$hyuArhEiLs?E5E*vGzz&# z4ykiA!i7jg4;4x?+h3l}`As*dF9c`WQSw^vA>)wC`5uMbv%384QtC*y%8j{C%$p;` zVjHg~iNv;r_en1z;)TkfEnia)vP~ZA+Oui*J|^lO@#MPCp7G_H`f_Z=lPi~#-x!%3 zi1Ok_=c#xFZIR4+9EfY;m8MIf4$WrE4XEiOs(d+lB9OlX?{&lrs9rIMy8mSsl;;fN z=7CTAweB}EfjuI!OO&~C8L#p1fRiTlD>4Fnqf|?yVDIfSTJH(gGg^<U?MLH!vAkc6!{4``jq63Z-;G1A_rr16^?o@Ld%mBJ zM5gBBj)nbpEdJVlJhB%Yef-$#{XBY{E&Dysc&Hni_Td%^w1(LJdH3D!kGpSof8G5c z|Fuxk+lt8LMc$<)r;~k~k1{q#s}B>lUSD>1!Ylnc|C#ngTlNZp5KPo|MQrw)<{G#Eo$2 z#zY(%at-@#RK)dZ+l`Aj{L*t4hZJM?Q-0aP>!m(>+PR)qK2g~2^RdNoGV0XIQL%;1 zh}+k^;Sf<5F3z&xDA;bX$L*E`(b&RzKs6&urF{=1WwvEBz$$m7%a%Js{w^h=DaqFM z9}}d4oVyh7KK>ko!uxJ92>WD?=g;H`JKB9Bqx?j2V({OOrn?}BJ|0c^=I0_w`{?l$ z#CiKh&Y*wZ{eSY`f9?Jx|HYqLzW;7I$NppY2l?*L^7Z#kU(e6NAIT`*$*>zX&GK zzmZ>PTfY5XzWw9wZ_?*)n!fAf)V`k8v;5H%B|O;}Q|4^qs~fR$&qNxq`fysfli#mN z&%QpDl7gruUbw;zjn`vd3l84Gd^6oagfxD-`@LWxTkDzpd^%-qoyZFPz5M=5Y0u@W zkLB0d?kl13h5Y}|0uL{`=)5)3`C$(z|{CHgOmCUy-SeOr{$_Xp7k6 z^nmOq-lt9bUaJQIF%|5XXTm?dmHSLa-AV-!>dT4WpUT&$BK3R|&p5OE{!8J0|Mz`g z>V5~U-9BzT1FO#qc3Ugo^!wayX{UgD);;&Yu^v&WvG4YtRtm$&`{Vi-A~iR%3fuX1 zF-a}fcb~lR`(Am|?|bA8`#0uGZ{lJO+D8w4*I$W2tE<9O+jIXDwVvOI?rHBeGWN8V z25v?6-p$ugQS~!^jm4`w`#Yy(~V2`LE7g96(}$-Hy^4d zqh1tVve3+`c6vBhEY-!qD;e02!L`0ymN=Qd}*Tabg%X48AjJn z2`d0=8vAySymEAD2C-&vQta;&sv6wOsB|Tyz12byQl<24eZ%NHdNij)x%cnk@npwa zYKX#Gw|L?ZON%H9X>LE*V<>AnV~(?AQH>d4vbaX!l@?hP?y^l4g;v{K zaY(sM7KKvVY;j1*ri;VKZN4a!WD`aq*IppA*p&*qMEC%QPZ*!!5oA+)us`4>;^8PyWDeM{K$$D$CKSzdl7U`nioA z5*b94aT_J|R`qHl$Z&t$mb{^MwZv~p>wJm`Koay#HM@5Qa*frBtO6UiNXcXF;4QMq zBV(suJL(bA^wUggw!xyU5zH*{=FZ%qMXdAqM(z|{TE5I4ZLw~~JM4p(wRtSmh z@0Z41-$~ENdHXJ7*H_jEE_KA5j=nAet*S4KSoo%I9U&*dk8icD)hlf+wj(WH9fu{( zO0Fl*Xun+6v|#h$RRl|%Xo)IxndOC&C7Sfn_RilDO|^FDLGs`%{k)#EkddsJlD<^w z8@6+lqejgVOH?||QKDFSxvafKwk7V?SzfDQKQS-x;aYaFM9X6zBQ?sjW~bTZW=;=! z1NHEGGECV+wk2iF1o4Q7uOVRmkS?UpIvI&d>t!S+t(%dU zrS%h!&0XEk370Ik!}xT7`2vUM9h>e~8^M$E0_Xso&Q9F106*U^YA z*0-P4O)H!2dIZ&L{6u2A9$}3_T=Mb8VQGXmj()`6&ADNVQ@TcYBP+?r<&_<0>@A+ep~ zEM+J}@o--TLmXy#&nyaQ%jt-j5BgS!-rB$G%KC-1t1-Ig(jH5s4w>iuF&pSwEo$Srl7usKcv96>Y#Wu}=*BV|!DrYDfub#nZ#FiQ6%F1ifz;AoewWi5)7(m#LbpM+O6IHC?gDvh~V~Kz_)DONIrrA3er8w+`&OPU}UY>WfeQ1a>-9 zynGZ5ipp}BYmaf4?~?ZJSd?z8=x0-{B+L%} z*G`dMqbCmArBh}UR-8@E(Xibzm-aa%8r!%yC=u8$*Uq)h8>zE}Upd|M^ro(NBvS?3 z82X;E1@eV$%JTK@yKfwe{bjp!1Jip1slu)|<#~P*IA!y1*@>Gh_+uWqh4mbXpM6~{KOyX)Qer|R;0;~{T! zTisrNWY+cdr(AkGyb<=(`y8I{gY4rD{Xtwyhj@umUlslfBTYw!-Sm5rDZ8HTlPmkUdnA&mtrE%aY!XQ-Z4$|^w$1OW5BJMkE2o`EWCqgP z9+5#654|CBH^s9qC!a+R9rbk}KcTPgkLpK#QR8Z|7jVxWktVe0C33%TFSxyTTJ_>; z+Cy3+OgQaBKfqQ}?)3Bk{YLdka0&XReGGW_1#*GfO5h19q@kMBJE4cjGka&Hj2BJL zB#zr7O;-}Xd_DC9l@cJ6J-kX(ZiL>0j1&KD9~Y#C?`&a(yq7Wc^amB_V0B=ffdB11 z!P?T?sH@2y#=N~0N{IOOt~ia>B>!ToT;p`5x9l=dUfc)2!tj=bO)VU(3Fw z=SRpctUrCh2?*P)IquuYxx+m0bf_Ls{1+KWeKWjlA@0-J>Cr09IAqNp)0J!fKF!T)t3 zzxr2Jj)Cgg$bHBASKJ}t!)ez6iRy{S>6`;rYWk+2l?p5fXryDwVbV7Rp|yH6W*RGA z*TPLuP_pzdr=uPeXkz-VJ@rfK`?(-Mjx;9+61OA`*wHN`!&{IlTYMQ(Z8B7*!w$-t z0x@IUf_^JDMw^aKJbX2+9J?INC6bv68VOKOeGGs0xuoG%b&mDf5x^>52rs{72)HwN0-Y@Yr z-4VNWjru}%Mh^>TiSn{t;K{T3?&e9^SLfE;@zKFGT|b>n+V-<0DY0$Q(fo$qZuADa z6*i{UR>I%(P~>@(?OcCbqnhkDTcWV-(eLB>w50#XqH}*3i@LsljO)i-ACJRr*?-3M zs@$JOBCqXVQ)Ix)MAlo@5 zyVa&I*Pa!RNlZ%1>|_dW8=L2Tzo zq}U^clxSLDwo^ZQLTb5Wz(15rhSeuPtXgAuQSRX@3<2TXSw)h3T=7892*CI+9UyMVIJe_iEY>y`$ z?tL01b+(EUfBGaITjU_*-oLmxJH+XdqhyP?FwZrJe8i2GDD#L-9A3<6iF{*=(P-U` z6!D20Q)w$kA#Ks;v>BOgawD90-sN@F$S<--uBM2dauQhzc)9)VL!L(enFv)ga1Ryd zAX}8M&@>(NB$}iFCeXm#2NU=rY_>JHGMvB~D*d}RN)>~e?!PEF#H1)XN zJSENJda^~@qkB`YhG@i=)#A5GxGLCg)1h9xASzp+Z?!Mt`jA$_-2R9|94YbSHCy$P zJ!QYdjlHyQqL8+%FWNo0M+z^~z1VZ8B7&U)a*4k~q*-s>ZK23rfGzUpUJDhAy;*Xs zcb{(QKRLtQ)5{{XawGB8iz}qq!X19&r@5XNs#>wo86H*pb@uadk?>{tlWGGx7yQCxnQKPDJu?;?|GM~{`{H+W z%hF%VFAJ|XrM*9yIH(?6&%%Gcc)xnOP&F!>C#&_Scg45b&M#?&_{Yo5CiD8Gd*bBd zb^DS0zSeN+DN&s=lfGESfVI1N#(Z>oe*Sidrr)1RFQ_kH73p6VsWnb@T}y4qITQMj z3~HyPCqEZm*2rj<-CaiLr!roO*)@wl7&X!>ia296x0d(Jw~RgXN$CYuId!hMSvK9| zsQW%tr>b*Srw@Oevh5A2?!u&|O4Yias;7 zYDtf^U2#%td~jZrcGa8gyri$S_`0LQ+Uv3$?=Evn`JmoW()~5^+`1h@K3w}Y{83cv zulMuF)4VSuN=4NJuihM@Ie-p%aP6bFS>MBqnwj_U8AsnvuXvbt6Qx4W^h2|qvJ%SM z&wPyp&?n`4?JOVJG{vx=P27(~pB^|`OKef7o#WO*{=W2+wKv4GSwlO&W(V!O`smxv zlPKJS)7naVjK=i{-!5~D(Cxg=#L{-OBk`skW#psmTzlL7%||_*vf=n9=ov4AkE%EA zDX*NXs^Fz`?nxHlaf!yhD{;N;-MQzNh)X$@B6mAK-LLsC<>q1C(Rt?nU|P4O-*i~@ zsdwcDJe{elne***nEJC#gW+=b=nv%%I#pSe!_QX*(3!^9yZ+cC*z1u!&tnmy)rPlT zs_R<*x8``&LHi!+BQw9=AVTxwcs;7}#L>P!YaO-k?H0B9$r@Q_?OUK)#ePN9ZgGAc z-iZi<`o@{)gg)ESzUSJlyjnr26&lvUJv==U^vH^Kv~N)Lse6~G;waW#@x8n(9^1xt zZ?X0es`9Bdk>l?#(4sx+uc;tev<=N&t^`^5ArmyN)gX9COD{;f@5(xs2T$aKYDe{} z5=CmA>~|&3zc2sQTL9mc$gAE$YOSM#w52`r^BZD+pN5S!rqA!ojIMo7bZTEy*!DGl zj@NsTJayH-nvIlON*)jG`^&q^y_JAJBLUQ-KezVepUO=CYob-Dc(=4Ix0FD${Qi=$ zKQ6SsY4E$t%BxPAYNyHXE|%aAmw)%;SDSDPJA6h`Nv%6~J{NkMk*$_HZ)C^z`_Gqq z^Z&6_E`DG7qwaG0X2+}b!lr$*|pBZ4Day&E`bdoIj*l8~M{)#6Yq$x58e=Uv=M=YFxj@ z=x`+6uOlMN8M{5({Fh>t|59GQ{pa%EZ}A|ucuT+8<`~=j*J8Om>IK{J8byPDD6wbG zTpOY5t+KzCd3MW+%t$Ife_Smin`50$i6W{$m#8vtZLA^QCek2|tgXI{3@s+pZ7+J)oU^an7&6Bi_4&pT3A1CdUP{^P zTzwkb?LU>;$Dhite=0Lc8R9n^{>_Hx+VH=W`rDt=VnfZjig+9Qi4DJ{49!C7g>#*l z{8Z-t)_(s}`5W2EUrP<(x~#Vu&4+sO*W+jM^li`2{I$$Gt%Ux$%)oDc1{=Gj->U5q z)pl9^zm_N`*UwbM+gK+2%q?Xg#?6yqY7sT2Tp~&j-dJ+z*rH3% zg?LMgep?x8JeeuO*!uk8vb$Qj>|e|8zEw@z%tM2r-{Q%bcv4lA|CXA~f_Pi&Q(Ju- z8L&QiGVEI)js2f4(a*P?tKY1T;v6h4F%Q%uK3T^;e=ae1-YNHrcAFWm8rm)KTV|;9 z%s+G)>#g?F?#uP-$WZgfY;2(w8UFLv8^6A-r&mjaeS&_A3?m}LOv_wPG~Vje*X_`o z_2bssZ6)#ccDa}A*I}8nMr~}DEBZH!{LLccg@}@&{?W4P(;i%r7&@vE3hQ=}^6k)- znP;JoEsG+)KbBnKTP?6rN-@jY2x&!&v*jzi5Tx_z$J;_1-oozmIUC1_WJtk=fTtWTkae z^V?$8a$ZN)tB1M`NtRSEP1aY)-<3DK=HDXWHSYRKwfyjZ@oHcjrYabRF zwO?NEbLz~0eZN^UDyx>Kws3x3_^BWDVulC5H<$CO7Ky4xSvcrLhWZ+>-mR??n1*Q}IReEP5y&x>zT1g3YrMzK<^Buguohy0Kg%#&nnCG+sHqIrm8E1QRp z70yE(Tj@OfYgRm3ir&gcBcAaBe%2pr4_#*GUM+?<^sZzA``L6HYL*9~ozRy5k&VZ- zG0Q{HHt5}>SdRb>l-xS966oz?kR94`>^Oahwc)e`dROAJ{hU9pmD`#?6w(nJO|3w+ zGF4LEmfE>$hDW|73Wio|{GwDDHLq>8#eQ_k?Ve4{%rEB5{5nCKN{pMFq3Dyw<@H>v0Q z^v5DoYyCW*pGu#;D!TTva2xgOJm>8!o#k|9CNUeyX1ywRnoS<_l9`Tt%lK=ye|nx; z({8)4{P_CJgI?Yj#mYQ1zx`U_HJ|%-_|5O0x6gd<`9{rukowHZI+ADX0aEy7P;ev$ zg05%u(pGpyC&!Q#gZM?s4`Z%vORp_N53;i)9~wUSwu!LM*HySOUYI&z435g z&*xysER6Rh!_rO;J2&*?{Z~DC`_*$r_WFk+x%MX1ilp*7dplHh(JqkBi;PS&GqN3Z zf8Lvtn^$iiiKQ$>Idz+9(XFe9pDz^^)gH3(?6dKR?aOG_NZ;do(<}GK?#$Rl{1K74 ztrmbJaO>xUHLn>Xv^n6vQd(zi$_54Sk%)jsIxjNPT zv^p&hR?jcID}SRnOEy~i5;1NgJL2EMktg*>`Kk2P$D-fbpQwuL$MRplKI@}&uNKT$ zL3Gu>0s?0$AkcjaDZk@!iU4FN$k4R}&4T9RKWt(1WjbPkZzEO9Eb{S37Cc{y;}$+& zwvh+Omty20@}(GikSsZR50iyFqRSWvK`>xkBS{6T?c%zdz+ z@mdZN^o(}DzBKfWu!5g~WMdGEY@Q142qFp9&{G--dXaSV0WVufy zu2nH>DYON8cc~duU69_5_V0MuY!IPwRs;m-5$i!s>RGAw>-4qGS})I89^paO1J$0W z$E8QL>r87eI4f2{(iXK? zJ*Ydg>;IWY>NRCHZ$C4NXaqT{tx5@(yV5gjt;iTFG;&Xh$kBTU`%}@7r{$M+GwMux z)t&Whnb#fA{d5S9XLN~g{9+S0V&kBDBTN>3eD%Y_=AV`Fh#q=*+_SPB)6;0_dOeOt zJfr7+`i(wAw^s9EU+UE_NC4eRa>jn=$doyY=oM(3eu4l!qL=t^MRl@_y5&NeGv~`y zGk#b0oMU6>y;8d~%)-NKE%!)IVlC-WotddS=C%L2?)5*z2Yyg_ecgDWTZP^kB4PN| zTuo2P*(w+I9-iG$pc{Y=R9br?tO170y-3QXg@mH5?DqrZq(=>XkI^o z2%YI4|9(LlXncQw1U=Iae(eVx+Q;%<^92OxGS-j7h>Bup1@y@1;qjiAm|OMW8hxws zsVq!2V#VY&(m%ovn+H{0Sy_k9Q_aRN%HbryVvVl!KV@> zSfo=0RXl2L*6zND+uK}Yc-;f6RVD4*mJedOnCa~4;;_v5#RXvFgRk1p~rR1%l|wq$#osflVho)d8kc?qcM9O zk48K*2mEv&U57@DE4-opqX=i{!(PE_G(m(eV~jWq?c(p_fjGRshbUOPy0jz276UJL zh|NYtiV1aUROcyT^~iFA{;b}8s|inz&2RFw6oS}+>-cSTRcUoSk=HH?>~M|GdMh5vS*%xcgXUl0{n z+wapNnrU&A_GG=fyd*l?Gd<63M6|R6^J6)}@to5=fA2ZoBUtBr&(YTg^!#JzD|mgZ zXVuO#^A{=nVmdeylR(=%Y;)TyW_TF0&HX&g>$V|Nj*;!i!{4(lc@kiI@({_ z{`vWLq`SeW$Axh)&5SKc_J!p~VNzQ0&bT7ntt-X;xqw#*4j%Gun#vK08 z=Ht9Z63)=(1LF|3Rxb+ah;9(sXibgtnn%7vAEC=!X0|e=evz$Qo+O?3GI|X~w;E26}F2}CkI$QS%?b!k^TYf~It?kIfKBH^Va`n0xjd(^^@k+ALZ;BoMY&zj7 z52TZvoQGG`^8T_SbYHCd$8uNiygx$ca;#!MYPYZ;j1KeMmQT}zaO+2>+Ku%H>Sc?1 zN9tpj@$02P^Q41EteABqkidV8;e2KW@(tAAYrq+=0k!FYw-tL>%CmZsC1I~eS;#Yb z=C5Dq6ZD7|@a`e7p~oyqv=s50L3^QRG{R34K!+Z+!qY@h2EiVcjC$1Rolw zYal?6=opcUUa@*t`bU0!d0OKL4?F^NSp%Dm0Vy=yLIa6kol-u=c#@uQMxqP!bN9DYG3Hy0KK5Czp-&#YL0Rf?On2Q z)Vpx?-aEZ@W8tOKM;}ZascA@6SYWKt&%k4L(x#}^s=odYB!BWR`o{f z*lLwjRZ!iiU}>XHGTj?*RL~hH^}R)o$|38nf){DFM>jvok2X6u|BfBEEa;pUODxXR(#|9Xx|PG zzuI&4_Ol}dU%xv-@bbewZ>wMKImY_w5uRc(FTdUM_w(aDZy&!t;%oQwJy)yWA90kw zocCZOjk^yr@c6z+YVYPIn*xujq1gLaV}L)U&%{VrXxTR~~ zos0XhZ_krw>HG6g_hN%g3C6KQrUcvAA`hF`BM~-vuiQ8hM zJXB(*OvD;nWg?!Bz4D|R#b(jyee4#EcqX=E%|Yj2-jw?6nQw`0pp}C?lD{i8sAuI? z(fY!=s#3ZIslHO+!QHDyMZ0JEjD9EL_1>l8H5Rn7*J5i10_U0D<6HlX1kPk6P{xUQ z;-8o7A@@D+Z9n#qXGX?K`mn(f^S@P}TvLho+3~%Uq zJv$ENSoJ&(KO&>1521PyW4wwT*RJ*ch_=cdi5Qh3(~r=*66@4e^0?OIX27%z%HE%C zwLY%BaTzXcg6eLfkwIc0%J~IS`;nyg50DUf9qL zFuK22x6K~;mY5M*eX8DE)Y^94A@#Y?&*e}0yU5*M+MOh;sdusF^Fw(u-Rz`0gWr^O z0R6dDW-WZ*mOi!Z!fIz9WnZ*Y<5_91cA(vtUy{r;=KuQ7Z3wsG;S zyl1qWnL-w5%hutdmUw6`bFlG^(c~V-s@XlqSWWNoJTxD!y>*|}O*o#>QNHoZ&ESYn zfi^u`>#vuOei-+014jMNPUO7tS5$UnfplVbr-UtzmHQj0^&N=3S~^hphDY)C(ldJDh`-?9Lh&DD8*{z`Zcw%$tr_j32mGRXI6{e~pw~6iD>4Ng3Ock&vDVyt z{IRy>%dxNdS+evqBMW(E=J?MTbQ3zZ-QIly8dN#t$dkw8=4l{pg~n?hNYFDH=Nlm` zEnh@_hs9muXw{1QZmr&~@x7MQ{cMWd8^$)nv&ZlIyz9%k$~?4AseJ=lVN^DCUuI-` zTh&truUxVpb>G~w=ihQo?In5g=)2NS>LbevBq+T-uxjm@`HmFv9;WHy^;%D~?O)2V zv=w={`?e%chThiP4*R$*%9C+xt1^*Cb+SDCYql<0iryARBc8F9#76a2v1RrAs0N=w z!!kp4pV7xsL494K8&zZDD%kso^Y!aJp3BNVZ>VRqtgTr{!y;#Gsfc@x=-u0uR>n$bBWw^JCEg_u{O4vi`p-y9oyQOC%$jX?`oxo z*MN9EELP8s%i__a@{o#Tw0=>~b-XIp>1BzIwJM{?IDTe5EJH?1Z)L-N4J!)qc>}(6 zJ9ncagTJ3xL_4=^_od5Py(@c_HJ4vqey?>t*-Gs(_+r=rjHT2)ky?d+S};Cd&YjvT z3wj51d(V6Qo4ViF7cHj7P+z|Fm9*E~TT~Q?mdUrh*BX~=Da-zA_Cgg$kDLC{VOk=z z>+>Z;hJHq6pguh3YIQ?*q(}9Bl|eYF!D-+A$MQ>2M@A3yGd2#@e z7SQ#Kqpp8_wtL3~$7H+aJ$wDgm+^?T$d~H8HMy0H$E;1hbVsaFmQ=m9%0iy8X8u+T zs|9^1N~jrfPXc1I{~VAFC|M-)#cgLK9!}`&(;wtk9rTRUWU|*t82;Tt3kRw>hYy=rP`G@Z?78b zkuf9FVC=|#Y?TwZM2-QHH>|iZ>9M>t3C{gYp4EFeo*8|<@$;B)#C|~AXti6K_hyin zH+WcczMY^w9!1t!(7j!Ox!llGEtAr!#sJ$^E4X2 zpU2UNXXd%z{ANBwmobx9i{TAb^s|hqa&V5uT#?fF&u97+x)-y>wQ3vFMIjxTALJ6c z3v<2q^vKtEA85@Fbx)=C@v2%Hd&A~(SNPls!iUxhW@8amRJF7AgE0qsT`~#V%|hak zGaAYqpPRC$cO{yAY~)bwq_V`iSF*mUtu=mSW%C(G-OK;6a8-`T4$pUmuSO_xulB>p z^EjA3`?Payv)Dg!h8#~5Y$0hR?{0Mj1+E?Z0*`12uUP$I^kw{*hl;JtMO@B;JY;Nb9wN3l z6Y(Kioh!@MmfsS6Wb1D$&pK!J*kSdsof-Lx=w9f3Ni^aaU*qRv@F~!oHTqlaevI%~ z^c?iA+}%(sB3>E;XK1}`Ty=~%1aCip?9h6aTx;~P!(FSo>-f#3VpdNxY6r1Aujuxzwb_h9JFlOV-LUhy zq1HO|uCMm2=>85>_;mWSWR1->8sAhYqT1!-MszSofu z$+V;SP3dt@j#ka+Wz5hNQ2jIpj%PH7Z~P)6IO3_GO#|ARj+ae+7!$3_Lqsd{5YK3B zv{b!TMg^P~E&A{utg zYKQC*F4#+`*2q77joS1Q=g(!Yt#19)Ifwe@qu#gBsUdE@sku|cOd&qi$*+S3H>Y}77x0N zLA+WFZ|FlgNE~jQWua}*GdWCLdm}6dErjmnKyj_gu?(~i8fOtefF7{~WHwrZP&J~S zSJ}R4d4v)lpR8&1Hk9{=a z8Qt*H33LE@%zk?H1*p*L^EnQ6Yk#AVj*KyO`BS-(;_YPBzpcd1S}d)1y}#74?@Rs7 z;xXHvwtCiTULxJj@uYZP`P;fTHgdJ1qSaBz!jb+fTG0wb?D42^>3>>r8ITG{|zp1g18r#-X=1ZbSL zfB-$BB|ct7YyYYNu|}f(Hj1=VozOlH_3rXz)>%u7Zx^o`SL=1-d0()!jrE;8A6`F{ zcE53wOm`5}xJ|9M*Lzx<>#KQB%YJOlWM7#*Mtdn~eVs?=wLC_szG$Nr__~++-6dw! z4GoGN)!#ptXjN-(K4{war2S#S(|m~HtLmU4?Z;()XhvH1{j?F#8{&Gc#q#=6z9I1Us0n(Yn)AH7}2D%E~` zSM;Opp`7oXK8Bo)sE)*lWO#9z#6z=q8$WvGS47It^_Zz%RpZMjyrz+@x|xC1V!e2@ zz4$c)S+7F(@ofh3GXBlM(|dI_K90vb6F+B2Gm5Wofx8!fXUNjx^LVrtzh_`=@Wa6&#n|S2C z{3jl{&yO-NAIhKNWmw1RwMv65oh3J|byZn#c`McB zBG=+aXqG!%pOx43w9{U9>soq2VzS7ae;8LUrS0oso*WPBX`U=ikE1c;Vz<4K zKN7?7ZCs5#bB9aJ%wh&Z>$=t5}98gyE^WRp) zln1B>BjU7{Z`_q#yS(>eiMi|gtgM>W9PB$~sB*u*tZRNW?S1I0N1BhUPgwFT&9%>8 zcO_U}xYX*wm(q6@S(bOM&c;*J`}X#d&F5rr?#IH?=61hT(`bu~x-(NeKYqRYQDYi4 z2P@7NZ9Wg#-cLE-5A0*ojrX=2ftGcmd3lz5nAtC2(V8*8jC+5y4^7t5BK`PHzu6hD z9abNUmw88Lr+7fsVfD_Z@@pnClB}9H%iV2bjQKh6 z{4$2xgQ~bgyM!XgbA)CYOGPqTCykW3VOS*9u3p7Dbq8wfiL~#EcGrDoPfE|s>xTB6 zelprdqMv14_3ZNATGeffKhQd;_F%le^s8l$cD>_Tmf>ybFU>J||9Xo;tc6qTvX5)g zIAR%{S@8CpygM%2q_qZ%mepO(;$z9eWO3^WOznY+vGN~kM?HmjqZLG=yHs{3*A&lbtj zx6%sR84#^o5Rq7C{pS)(%JQjt^1Nt&{qpwmOcIMvO}oq#?z@4Ht?Wp+&(+L1%At( z^U#;sbsp+|_MIt1)35Cscl$q)ns+37&+Cu9>^@I|b@rc$`%rb@wz7<>2bmJB_y6S~ zkE#!u=xx7Z9_q4sk%xM{x{)czy84lcJgSc5;*a-b=J=w%x{}xSzWS1nyyHw6J2mx+P^jmdl94jrEYjMK*D*}tgy92-~AbI`hbQhHI(4o0Mojr_eVZ>S!ErM=O{l&GWx47dIdvOx1Pk+VXw9{dc82vr_+tXu_=rMI? zv=X|!zjyDfR$MFlvs{r_5%om+5n4~*#?4?TQtEJV+*&8nE3&fV&S3sZNA*y$}YYQSzIyQ`N=NeTHP_OIMfMx9Qc5T<@ zvX|$496bBkHf#Ukcw|=bZ=rtv0FFdk&}H8-9$t3k2{oZPWBQ)x*H26bx>zdvR!{^vn9N-LP9abk2-(KPi-3g>#)ccQ5-V0ZZ3UrO^`JNRBP@Q;wa~Y?$@4>$(8_N2w zByXLFdSQs(g8IKF1ZU`SZB-R_y%oQG8P&V@kzJqW!P!w~W5F2pwPbTg|?C%!vXp=B>fJ;{B(YSHS#=fCZH z@odC*t(S;o=`FoXt-CotpXwjDJSjcBwa%lU213d>Mw8@VVQ*N2X9LieHatE#cr z7hrT^MV#v@l=vzm@_D^nW2U*Lm!x$T`rC{l%052^QM%ZVQ54dzbACO%8ap$`w3}gG z+n<*CH~+QXDxLqjAEm!<-&98;N~h~cMRBhq8O6OH=_sTlGsqsDU|XTv-F4R1!56yR zedo>Fe{7H%dTmUIJrotxTWz$zv%V#J?K>h2sP? zytlgN&A-=8Z-i^Qd(FcA?$JLMtMQle^T+a2PZ527^t+1%kp5ibi{-=0{>P*L@#ud( z`d^R!<>?CYyWmR=CbUv=d$a1 z(eH5?-#-;OHR>Auxlw2IRRvc*{d4IB-5)M2?bbch5Ad4PAika%f4)6C{(NT}f4H0* zf4;gl{`?#KmHfKS(bW};V{6N3Cv-hSkD&7<^x8K~V}C9yDrbH7H}LoCBRHPvE55;wNgC;> zYPR*oYW>X&M{E-Gp?_-bRy*dbf;d@j7OLOhUF@`W`PG#Z?PQ#{vaNM9-i`V4v{?G*1}XG+xYX}xv|j(C5x4Ur z8gDs&qOsciibm?^Uo_&G`H3f~_ra>tR(G4o!k*#79D(+;(P~{)3Hjztsk` zk2Y<$jzX`^R*o}T_RX7YNBSX6;QvFvd6OFNJ)N?0^dhv)uV~xe&%bEIGxHN~QY*&F z`{s8>qTx?28EEBbkJ0NJC3PQx_9ECh_9OD!w213*h}OD(oT3r?ag0WsnORhUZP6KA zbo=J0vUm6B4-PYcf^T=UhwbfngK zY>z5QL-468_L6(o_Id=bY9E|?TVd6@GqwOJ;-la;_Sfu0w4H2HZ0$%Ma&Kqyu$j@a zLwTrcJC%z&vSWD?tnFMj?zX0%Ip$kC8Qu2Yjz%M%v9o^ml{^tzS26uY7QWERf4oZS z)x%&xn}^UUY7`2qtWo@ztFTcR<5k)yv=}?is6%6HCfuQWkz!QeMcG5z1C6tTAV81U zKdLyoA?jV3$uCRYUAHyAo?H`Xe7F_;Y2l{zO`UetD>W|+IpLDK&QHF&+#O%{6kBr5 zCErga2dp<{)&7{zVEd7t%^Z%5q<^M>9zmDARQG9*#x`nxF40G0pMl%!MigqVJ5fkS zbPFr7UFkdWEqV#n&L{0WsdxCdy77}@8+7xS?oOMZbI&e&T5S6=>OQD?QoLHR=ce6W zy6sG79<}Ogu z;+dJ~Hv^e*(A@b3Dl}qt!5bPqm%xE$%p9l0`$ndAdX@O-;2x6=DH4e zw!WDe`xJ@+Z)zdG9U)^a?_f6Wks@?Nm^=YYZsrJ_Hx3oy3bvt{%t463jrEg6A{j~J+7Z-V+ zJ8|)7RokKR#YxNGmENv5UHHrzMFk#OAU+;FrZp(7@YUMu+>K(`nNMY&yt zdAR#NIZuW?pPeVc)~DwpFMWO<@+c)tkL_qrF9{y0)ra%k;jD zoA~JdpiSG|DjpSy+JnVG^W~T*2de$?|rc)KVNPP)$SYH zt)^P<|Dmj~+fEDg-b$yDG*9&E^ZeYq&iAg!{mH~%p6_L8|Hm>q%NaN?v2LeUz1Mnc zs(MbAMzdIUNB0fSOGP!B$Yq|BokQ>a6;sXl*fZbY<>`qzua0+zOZ5nze!fTW^!y|4 zzOg`$;1eB;_-clD#S0_;UJ=BYca7t`V~XooOB-kLK|M-D+|i@>MDFp8iu(Z>;1o&@U%EE9JWp=IK4Be-1b>k-~< zWZ6c5c~V@95c8zik0A487)O|y_^$Ug1fGk(j?nY4TLd4C*+=-%h-WeYzsR3RAG){V>D790ppj8M7|^{H)2Oz^ zMC`N%I$m*&Yg1G-PJ5tjoo%nW8repo{-{WsRzfr4YOtX3Q8Y-<*q9j<=x9YbN@w<0 zoTCw=qF;Iu`k}01_AAzLt&50kX$!QEW7jL*QLVd<+g8M*@Z%$BdKkK0QIBd_Y;;U( zpwvhG<5Ccy@d$AdFKvJxiF8@%uY0xHEx<>fP-pk^l?aUnPn_>(PEgJE)>s8OyPt$tdRrfv>OW=>-h zYO7aF@G$CHn=^5nw&$VtdYXrQZMH;9(a)x6#51$aZKl-q2CJGKz=1dK_Hn z2!9##>P>iL(o|0y>V@qS_TV?`aS8$b4UBGRpgpe1ivrJ}##=?hQM~ zb=p^_NOXe9x~ILp%=MvUKlbQ*%uf;La^H#0t;}a{x_9$wd2vV_t$cL%^izW@It8Hf z3YK$Z%8GTDUOn5Q({8eTwil>trmgcbvg67YExDe!N!#bg3=Wb+=VNU0$R}2g9_P}Y zJ^Hcq(;L&9EqJi`9)g2TIjS~m8L#C_b+g&;_J+^-meoiCe)D`@Uv$|r{nz@G*I7NifmBaBD$}%C4y}9q zbm21JPm)HSM^@XyMRm;9N4Bxk`if37TdW4}Fo=(=kmYn+JtZeCwcedtU)PgGobT)U z4%>$!>}uZJv+L7|Y*VJYnVW<3$JcW~HTFwQ5!E>)j;^QDkVHBYng@YYEOo$3)ic z-9A->=U3sdyR>_sw9Z@J_-p8vBEMlbbpcKdPlsPpu)s)pfR`PoNNT>r%o z{f;@W@7}3*V<^kguJT%wR=liH*Z;G}7)_Vdd)l+%Q{x)9POqHkWfK-JIdSBlY9u>e5*Y&GE zp8a93?)CC|)cbm_b*GZ5{+{iRtLybG(7oS!y-D6@d9RkL5}s|^#+`ot*tf>m_fXy2 z?p1Xh@we9@M_g6kke{`#AN9O_s-7dho^{X>e|u%L=jvH29dXwf+N-A{&h@0D=Pa)o z-jkwtmRjaWR@Hv(RokAkXAO4DT|2`R;r-}pPZfuG#L#_?Y_-g1a$tHazTVX$xRxJB zR|D3VvctbGo>u*<*C6G0fA{E*kN)~qFP={AAfcWIU1E;&>pTgT`F9@bGC$8m-Ou0i zr69l0MLwJVXG^!O59H!+>j!!0t-g?lxvW2AqEes8$9$e@-E&tEnpbJ{kIWwK>mzyS z%lb(s>bAa;hu!Kgd6>)kOdjg8ev^r+JoMVWtvp$-*MBnQ(5?xsO4nUodC24XQ6B!d zzLbgI>Q9-d+xk=<_PTzRi94!qW#YH`S03u9K9-5TuAgNhQ(w!(T<(#~#%=YvJmgXR zE*E`S-^)X${uhnd*9W5!&(sh7>W0+Mpq$L|YAt-BTAAOT+=|1Btpw60=$VsWQEgW3 zad|Q<5=S0RC&!{NA}X4U74+KvqqugBcI`x>)st({2|Xwayba zlc-)TciKd?CZcY{Y=v@?jAW_^&gdX(=_TLV*T`sZ(In|LHV^xpM(4@( zu*OHr($9!!#4|I-Z>BIapmEv=0<`)^uU>^KG;aJrfF2pQWrU))E9CWL0gil&ErZrG z;6D~G^QL%FD&m%t+LGpJ;iY>Db+=kQy*?ki)U02-{@Hv|*3BNbw>;3r8w(iUrG0ylBuPXQBOH8Sq@>==) zVCWaL2np*enA*)#Z$_VwFMG~*P3?Jl>u!(WrOhMG^=?kf9O=1w&YGUP@}gHo<2+>c zo0G_SW^VG0pAUy4o&ehBRzGi}qnU=Sh+;Xf^N`!T&%@la3waW3?M5bYvnzS1BfFD_ zzO+lxsJ-2aMm%HJ{OlR_3+g|Q;S7zQwctRb<|zE3k#hqKXq*h=Jula&Vp*LB93#QgDP~ouJ1@*V~Jq zk!U;mS+exnk%c^?J^osQmO#hz-n&mggGP@&IMDb}2MKy+w6XHqZU3xzT&;HMwk%Z> z&+uXXL974%aN+u;aP{!B^$eF*3#*jXgWIcSqrDY&uJznht?k*~tD;Z!CcSbmjg@wf zsUO~#>ZC{CYInWXyq}dir%-(Mpvj->byeLpBi+(EgwOjmcVBspxYwOvYK@nM^mEu6 zI^Gr|jt-p_hGqxBy>RWG5w};*LeHQY} z*r4Uak}-LQuV10TfNKsk6PRywyXOFYRO|@68h^_Ac_o(naDRL_7l|AsO z$7+X@ZnKV(xYjwf15Rgpbi%XQGVR5DQ(9#2^>$}lOuxSUetuI{7ivw|Zxx>Vb##-D)-USoXEANiOwz9FTFsUw-jzQKRri0ryJ)M2 zZYtN)sWGjL$F$^^f)F=@dw=j-ubZ!Zi+x7oar}D{&KCdYpC2F<{vEokpKtrvJF3}} zp&x@RrwM%c*x`COLWX5-$JhI9y?#6hdg?|L`#pKkG_ov@+CME zG2BABTG!o=C~hOuk%%K-vWFv*e3`CCELrmO5lt5IOvK|K=}>cl#>O?EK(FWh-Xj4j zv|lBOM$C>-XesnI@d)V9hgM;tMxh{)Z|% zx>FQM+CpXXC=ptbA=>gVNbMfJ$MqhiYZqn>D6FNBNE`Rg8C+gwD(#?@{VWF@&sY$? z@rzjCNE`uec3^2;qH~9ay0t!;$V=;#hr4gR@?>b%Ef2G|ewozRwXw61x` z&H6@T_SQKX@r?EMv)Wi`XoQW1H#Ev3!ymfN=Dfxa?$B|z7lr0;S7`?{&WeHnU1pO} z{iWJvJ&EA=Y9I1LsgIJ4MzzJ?`q2VtoYeyXdc@k1<-WY^Ipby;oh~`@HSs94=Q5u$ z_PpAfh^5w3@4H%_RhXA?^lrgCw_Y>;p6eX__|5kW`qeTH-=2rR-w42QW)}HH9!P;j zfVO@djgpsbeHeW_iqZJ}C`Tin(E`8GXSAV_V+{th$68x&uO0^vI_g&$k5m1%Rlb2rBeP5!~d7dANT++LF4&PsT?Y{8Qjoj9k ziYu2_rB(GkCXd&UtS$7IJL4TDp7UJyO-P4*UXvW(USWB2rnlUgalQ5r>c!5NmoxVt z%gfWYmCVL(d;fsVUf>Is{22>Ry9AEksK9Z?^Z7-4NP!oJF8g!Tk6zKy5&Uc887)UY z-qDC>^nrMa%*-}N;UX`oO0Z5rd0Je?y5Z>V@>gFYh!;UOlQcBH$gXe6dXD(qsnR5` z-0jg{FSl1bIN~g+70oG%(+%E!x6QPkS;nFBFYxakOW0J=gg7w8!*~SjYVPo8~PsEEv5PosDJTc*aZeO?=eU?9z1)8XZG4IgWVP zJZX9loQFN~L)qxfgXbY{J$xqeegu#w#X3UB#O))9JoFY}hTCDPo{B%l#4%(prU2yBdln|GZC0yq_{1CAeyo&ROcUN$qZ!=hctiY}M;b-!e+nfV|d(#-l#pJLmv% zYF$|U^SP`!Ylhi5`}IGvo*4(e@r!-o$ee{Pd$6vxtY@a5eL5n+dJMgM&mM9=#?gpp zbii-y8Ea^7IlbBq0(5Ixqi{x+Hwr6GYv?uT5pBU@YbV{ai_g(H(I=Pn(mCt>v~c-& zS-*c*R@IdU)!5~W!K$u#>K+c=6F6_B?+b@_WnYo}igFgcVNutJ-x|2;w(R$%wR*ky z=kg=lXyHdY?}~KuZEakhxNx!TU~ID7yU1eEfhNmy7g;P3N#%6jeUrQB)a;M^WV@ z9z~UvcobD$;!#wYi9(6>0BABa&hmo*J!0`&?f8AETg~3Uqx*;TdYslfEj2XNa-Wwv zn(jDwSH{~?8|?czvw~K8LN71s4qKgqovXx?ligoV-CI_Zx19b^X4Gtt!0XLrjp6Rn z!e>PTK9&loo$K36O<#LbweIkT^6T!=PZy*Qmo z;1L_azlHimhj7GVL7TRF+5Cr)G#jF^#h@dGn?`J z)EI642`+ju+9wuZ#x~Ac<5kA^v{-GsmuY-GqmEwc@vOIc-d?w?9M9+ut&H~sZIrfp z6+M?4@>=ima7TKXhd$QZX#8HUqY=;OJvNj3S{2)C&yCG?uJy}0kNY09ay-*Rd6r?_ zYtdTL_i8lanO^s^!$jcFD60#9Xr!$J0~$A$AV80dAvUGe!d3T}vna}F-2dWkrx`4K>OM`&j=}BxnD3v)=}YOK$3~reS7wIvQ5C=E zUw?0zC#rqkm(i;yW301I+&)~Sv+v;jR9g4uZ`v^5|9Wv#`-^S1V9CYf*2+(%&+EIx z$}nYt=Udpvhx5F8Z#8RMj(8*DH@s52$4?sWdynIC9?tg<68rTN9MAL@-}psha3q$1 zwh`Ix%#V&p`yP%)JTn4lto*S~w=B=|%|%m;z~f?%zbm#+zR(hFG=5sg&^t@qj%e3@ z`FY(;{MyOZBP(&BA}#CLNO*cjagA?110tVkJ9*hS>MV$~>HXh)vVE*{rL%)N4oM;=HRi;T)8QO{K-KJbyiGKcFk&-FZMPo2K9OW^;79J?Vqq@N$8$p zTj`zbna5bvvyncsRbI;EYjwb*y*h0ti(oquNbmJ=%5208$1`yV-w=y-JEiK0C6@#N+B6_4jv66(M^+<@h*+z~ ziH*KSc)s!22q7Xu>v8;JwjM)Kob>ujnK`x_UEmFE^Ts<;>=7bj#~#Hun%v{>7hmon zctxCh{Jmn(o_|zS+9UbJsUzNgk?V-NZ!Fs*ct*E-+vrw=7LshE@mt6+iq`YxxE{^l zLY{TBe+vml*+9M=eYTJulv#_$`~cl>}rvx56hI%E#Z! zmhvTO+0-qtTefuz^mR6tFTpZfyA}F6o6C}-&-SvAXR<;6ERd`Z8lCfj1J(M{cHf2f z2!I96%ICm?W@K+*LHE`pqWf4U0d)6g+xHQVn34U^uh86F2UO^GUrBTyvp1^7japID z?X%IS@mUM~3Ei$nM7J;_8=!5_*w`NwXhn;Q^ud8vyr{Sy6lg_@isC_mX2tB_LE|EH z5TMI_JJEA*yRRo2FEZAq|DdCNLDB77?;DCo(;E}o2dn$R?-IPxnYi6|zfoJ6sgYzo zQ&w#czSz2}s$nQ{Bx3bDamJd@8fR3)_iTwy;OSL*<(T&UiA{Eovub^hc$_!uxq6;J z8*$&hQer(N)nhbGx1I&M#@plkS=7?n`s!82Q>v3D&+i4|(aU^H7(*JP&o` z>od{!Vu3sv`dHx>*vnWV4}UM#$dh0ki{#>OW0gGgZ7h?Cyo_~jgS(7{G9~C^rD*g% zmWoC^6Kna!SX7gs%ZSRa`VV(#PRs-vRH&*F=meeDXu=yRjL0Yq9B4%318=A*`^w5c zT+UaK`$zR)L{vpfpph{Y7|^KoCip|!jXzN%6Bl>U253Wyh_XO|M#og(Kw~2)P@qDK zikHBEMny;Phc53TinHZQl!(ZO)<8!$B1O&R(Tzz_SaGowJqJA!L$PjC*N=5#SbLOo z2bgZaSB(9+c)OR6j_{)91y%H<7ZvJ`L(APY;-r~7zoM~ikL^Yc^u@Bq((A!&ee%rM zFy2(`Z1)1d`TdrFI%uUFBCXY|fb-_S2;??JrU4Fa@zm?(r;&7mF8rH6`YQJhxN3g{7S#Iva) z|FXmi^*x{?Ut(#XPs&Z4AIc7!4o7B4mq1+S{ZXclCam<;J`g!7GE;4b`^WyJ~%1BdecU=d$#e?lDxQsa6C(6|VEW z|GjBz%j%Cf%a_#o<>3cIzqL29_k*7oLDntmSK3)$n%6S2Gp{TSZOocb8eC z_p$13K--!UlKt1Ce}D8ZkN)k^e=qd+Wh}pxpBZW42mM&IP_qN+|EX}-+GiAR>j>@3 z{g0uNi+h~xvAr(takBp*wBN-&O7=*}Bjx^MV2_J;RDb^|P;T*#lKanra*KDA+{^vls#XCyw{}w2>ct^?o*Fd?&+e>asOhS|Ag$jLzJS^ zACC*qPnQf-@ps*=_Ctvd{o<@+XkMcL+I?x4PE#v-RNfigx`*(*+ozoT(`DpzmPczX zszKb95v;L&tt5EG%Jk|nRPVNG*Q0izYUiV9{AfOm=-oz(^?`?jpSY0NAf;f(bb=V#^D>k^;U{k%_# zWqDJ6|NF~4@X`?Qnx@3?xAcvFEVu3Lz8993NBZpTC-)@Sn@?Hz{d~(pp3xBI;nE+> z@L@EdZ8k(y85{Q)o7|7#ufb;CViqq)FH4X2MmGz;AN?%knVILW71(m3s9iP z;^KHInq}FKo8zU}#?{e?XEYGs!PyDhNe{Tl6M3Fl!y_&EFHc3;bF9W$w+u>l-JP`5 z8efcUWM%bDP5$eZ4nG{7_|xeiogb`k(zX-2{Jk>OdUv2S;=|wmefxXQUTr=nhO72X zYJaUqU89Xxle%h~avD$hm6g?7%!ZEg5`XOr@{sp^MWzf}Uy_G?-Ph#F;pL0+ByLvUzp6qM4sA1~p)P&d zEn2qbIGohq0H4G^!(P?&sRpI{De8UvlIuy4&~jH!JIOcSzVAz4zc2NTe=7Yo&qc3?)_d^oAAMh1 zS?_$Y@RL;Y-~Uv4`9&Fb+v;mSJ7zPuBic{K#w>VJ+MwE#dR8ZI-(7TYZa%=9H%ERf z{HVyx_cB~P-s6CmYDb?+i#1bp>v}zjs}r4eIqR8FMO{AJw)Nau?V#;vw1jWa7Gisw ze!vxOs@Bz4YJ4@O$MMvmp1aNhdyV^udz{XK=4XrG2%XV7zV(Y-;Rq#efIg&=UeO=; zQ1GEmj-|%tqc1i37U=sLpCwH{BeIZZW{m$#K_j7KOYPk!pg|*N0T|G0u|ae@*Rex9 z+BUX`Mm(d_#2o9JuXPWMddW9l{dkFf?XlmJBA4zuv2)Tbsr9B9J1^~TlO?s}m3XS& z9{x7tQ7?_4-wFgS%$)F_3+O&HPV+&4#_2f-&`7NY0~)ErU_j$_7bNHz zo!!Qas?FPOW+wWqoV53$Rt!C2srqiBJ-+M5{NNj? zzio#zvlrTW_S!gR`N1X)p>RsjGp-G2l@cb)CMr2QDYB(X#ALi1U)m>_)NXg z^1*bT^2Me0p|xyPXteg{v7(Jd>3i|F=;dPjFs?7*d`9#52I_BZ;LN;$Hv50Qzq4#} zuiIJ3GvnnyPK*mQcT_-y#*YR_&@-cez4oqwYXx3=YIGLK<2C`Eo!7nb_WTA;I_Iao zhR+;$J}+&3_jT8S_I-HdbK3Ety+GRYV%c$p)%vdVpm=_EXm8{7>9TsFGdda{`{=wb zZB@JNoU~uO-tM4#<9#^RF;Ru-)uUgWc)l-k-n(dw=CI#L)VDG9(x`_v*VC-h<)?+C z$7qP7TKiL>9(F-v`l9^Y7rWqPYqTr2zWnB~iuY5|yLu8)dZ@P$q}O%7ouXrpUg*8m zZ!s>e`wl$)6IQmxf__@m^N#R0J&Lyn>JgeBvD^D=+`YY2kLBmL_Idxo!7J`_|6?e3 z&-cOF9HZAgU-4?({UeCJ_qP}^W2_zgimGbl^0|15yW%&Vm;e4){#I)c^&GtZzShv> zwd8H|-Dj5wVAS@dR6g<42*oeL>bd*ITqFL9MZBW0p1*%wHpcLb)Ov)7*sVwLjpnZN z*F2PM@Y!e8V`apWp!&y@a6VJ%;2Wra-3iXrVW7>U^l@iaWv@^VOqfgXsCpcaIwSHs-!X|8HfujShO` z1^K$Wmb&H*w~(Uc75V7vyyF)5%e*8Xzt3B4Bf~PU$(LZ1_vA~l&Wmn~zs{TTWjT^p zWl7cNU0KL8d6|DcM*anj&%Qu{#^qWdKtWorYI~11SkR1Y4lHPN?gkDtGE)Ns+OVSY zG7zD})&s2SB<~kv7*vycfOj~^N-Wb9?Mf( zd+wh43wM7#hVvPF$v05{%n{C5b7<4UUe~=Ng@qK01t)c1`Lemd45BWW@8r?_-%Y^-U=I zh|6C|!#z0ZJ`v@r9{u*RwD3(i15?kQ*`?Eo8jXZ9bB%;f+-Y{#+o`PDt=euqC#Rdm zwI=;T;p}n7?RAm&UEyc%fp&WROOZ{hP&ys&%h%IlsA{Cq_dC70CzTi{5IT;*x69?Ctz?IXi26%YX>4^782YPoJ+gb&h3pd^YkIGVdJnfwD%3FHw5t+;g z#lwe=_{L;A4R#oA^!_E@5C%@)HyM|&94D{JeKJoC5_cdyKD#Jgt6 zs-O7e$~}f}J~`&^lVOf{`{kS??mk)Qh_hc_+H;S{RC^T9+;xw;M>ad+?Umn-c>86% zJ@?2Qc!Z`ppifr3$K5+m2FE{NhVx9`%(wn4;c#Y^5qdPI&e}K7K4eYrj?IAs(Iy0v8;*R3SBj`TVPXDy9IWux#go@ ztI6F$npU&B75cWOmnBDE^UFe>sR{bm0;&B$ukZQy?i)~{k##;Wpcz#?u%J&bdsm}- zaeX#`HzWNmJgtJRaU<(?KTWO;|q1(MY(XETFqR}pBT+Ivw zs9u(7XD*`qF}fZ`tDw2nFHoV{=P*9g5#7&ml`Yx^UDmOdXF{S|xWvu0=-WzFy!YB7 z#@EIeTj*ATs7e@k(8#(M7|?d6C0cW1>q)c++Rntpw=1qzL~Eemm-7fZ2NU0>xH=GR zfo4>Fz=B5Bc))oTj)#ZnL3N_ zS*5mrslH+Oq@3;0Szf)vSECD`n+h7v%W_`&{*r0vr9Y3;c#U_95+0ZH-s1nHAbGqr zy2MZmbO!%v`ESHK-b*aM)5MMI_m|U|caMC|XEwfy1?wFO_mA2QANGCyO9Z{iYUkL` z@LQZZjIWfop1VG~qb+Kaqo4hG*!$KYQ-+7EN2V;zy5ym*txq2Ao^{HUpjxkJ#NN6^ zBhJJxdR<&M7}(BQ>jqcdS*EqQdW((6sq}gu!t=uMOX2OovH3%_`IQsb)`y;NGE+nQ zt@v3YW~~%xOsn3ZEoXF>QwdwzYvJAHE|?z8-lt$Q_VGx)J+Vehw87H%{?QXsk(EKu zjIOt(-IM9z(a(~k*N!aY8STLqEboiAc_2ne>*gMw0)B5UUhPHk7RttdDt~%a5MGu) z-(5V2yq=#mdHd+KtMrwhN9Ij`yeoP9=W_3n^7%K8Emz#&*N^>d_}|M`G7GY7z_D49 zB@MGE7f-V+3voLOvvBJy%|h#EaTc=0r%`3`@n>*Td>N5rVb`zYc;+=dqJN&(W5i^B zJq{7!GP3dWYi*RkxhwHP8y&q-sQ5?g2E5b~gI-@l@#Edk_5Zy*b{pC8JkPRx*X3xM zmgno5M7^EJ@0GC~$(MuqznuNI7!%|;VuxHL1AyGQ#tlK_l+|bG}D;Uo4B>s?mS#dffB(oW&#V z-t)T0@SW-K_n+}_o|*Z4>mNz2lLG@vnQ?TgcO_$}P})tCKIq+A8Ja zFRj)s(6?4CUxu~S%fj!iVixj@)%3TPSVw5AZ3G3HYY9PxKEJF;*L=i#)WLv`@|9@J zNSi{tpnbj*-MUzNLA#*wb^;{m8T;U87eI%`?fM1*+Rg&&erN5+@zNj=pz*uOL4q+&WA7rmQBvxzzfeTxyfgOO+8TE|rxxrH*Nlhg!X5Ph_w0v-QO@T8U;qDZ2Nx?2FXtg8Sl2 z&hS99q2i?xx+^CVmt5y{2+n?e1jm_n(r4BGGS{TnPc|){;f`d`5sJMh;qKp~aJIDF zFFHdSC{`aj?p-gh1}YR(Xy5x;$TOpZ-eWh{vwMak{Q`yaH~pqZplA$#-lP0GXzS4| z&HDS&X@5&Y&qMdUq4$V@2#p^Jkf3L50M?|g`6{0Ku|x}RN{pzp4=+s-$d;#GrB%Er zzKXdOzk96y_c-GIwnPDHk20}WCE~SPvl?3{g4SAl>nl6wUPor5PAt+H9pD>uL2c7} ztG4(Pu5j!74IQ)h-@{m1^X}Jcti5~$H%1ph_SYOZ&u9?e`rC6jW8;x(DJllQu%nXe6Fm z{_91F))Y%o0Xf45O9s_lN~*$rELEd=E0^LZk5j+$^*YI?8?x>Tch#>|<9c6yXZQcK z-H(%Ip4UwrNT(A^bswqrt$4~>@4(XCbs9hEgU5TXOSyFlS}T{HQg5w{C-1kVCv^Kp z1W!wSnVHu57D;QJRux-MnctTz=)1yGJEA@LO4I7?lXai9C+|-sQvX=ub*~q5TVMNH z9o8G{ZSTE}H8Yv<0(cc-YVZ7yabVrf-fzq%|NevXOh59if4m51Vg@K9cK5d^p!KnL zq_ihPKL%OIGh^gGHjD*yKbG?D7Z9P>W36a(X1U+qhdqr0X&I|!wQd{BW#a06m2Iq- ziQi(uJk&l`%)=%^%hYHvJ@=D6JdZ$K#-e%sypC1#aQj#`4|^Ny<{__R;b`0zD@P;6 z$07Jx=$ZJ#H^y#pU&ekm>_wmgni#jn|$^Z_PnuV9#x>;R^1}+kI#kL>^@_sqCG{Es@bu8 z%#oeTlVUvs^Kknanuood!O^Jw439=U6OH)Id}1N!E#^7cQ2$vCXDA45f4d_~LyeO%(=4<(|~%A$1Q z`|{uUh+lP4oj}q4LhaRmRk&(Tpyi}ZUH{acwU1@(i2I4=W#on5XGKbOEP8OOyHIVr z9-C~sg-PRT84caQq!;VzPD$%pUgf4y(8y}OX|=yrm3}Ca+D-~-w}5Kfss+})gTnb@ z(8XsLZIS-n7g_DSS=TJX5u_2thRxD=+N<*UP^6rFqHZNj{d=)msrP`^k|;FPG`;oP3?7osFNW zOu93|a-SO9GzO|zYiGNq*J{tRE82sXk4>hX(hljh`p!c--C9{!kz^5C#9;43jfQr^ zcu47C|LDPGygOT0(tT&Lqq3+LPpchojHYa!M>{=i5j?ctv069VoJSYbXPRNH)$@7= z&-cZKc=TJ9OuPAPHl)UlIz_G#Me8iH;8V#hWXGSCUp_6>3)&-3cvq0VFJAJO@@sx% z`fP7@o}J0d$m+D4Ry#`1d`nITt#xwm*X-om6*KAQW6=zq!1`4FWYG;IR;;eP%oaDn ztwnaWc@IB7jez6K=%OKt1S3vz#b^yJM(Vpuj2NXyJ({m(_58hc4FrFkgY%5;@vUEN z8IDjYV9=&7W4-jM$$}3BAKK)&X8-b~S?hH^{#Ng^BUN|P;Qgte2a>DuL^5^%4x3CoS&Tg z+*3cq>)lt)@a4;#sXgtWZFy zPQqU!u&nj_j)ATblGRm={?T6;WusbOsAuE!YcbNAFJt?z0TR=Zs4 z`A9Fjw(S#7skJtxNYa<5SIZTz5Fh%+EPb|ci|mxFfXb~cBOt1Vo8B>RF|UVZ?h#@w zT#pww%fRy23^3#DuOtc@m?eol5 zkE)Q-`qt{#(MbDM%xL8JY602 zuKUQ;s=2MtET^JcuQkrPcU@H#tryzc{;WjtTIYS|!s|X>qI_QlU`e{A=I;79=FPmxHr4yYZ0$w`P#@H%hPK1+@F`Z_NJ^>+vk7JTl0>S zoiStXZ?F4ScvY+ z7WQ9#Ln@+u=+fR-PvV`~96@fL<`(FCp6Ir+tUXn}EK5(8kKR08KK9lV=1b6f%3H`# zbD@0XJx_ZJNqSG5FGcgz`Pg{!d}KU*7IGg6WFgN)3jPrS5doBXoOdt4AG)m9#bec4 zMLbScOhGR~mpe;hBM1xt2$8t;{9ACzo1x@B1&mv%wpJR1nmC_e^& zXsd@tkJ+|f8jn}sVb_^V`O5f}-7|iX6{&ZnI`+1ltkwCnwhn6BozLC79u@qt<+4_$ zmv^hydr^NVwIdICmz=p7l=T+t&7f2UberkBBCq&b?s9JY^n$A*cb^k}I)kgzEI!=o zopQ@QyepFG9>~{4<_`|FKHc5=WTE}`>LK?*YVPZll!s>Z@wRmqoUs?^3U-4!n9Wka z6>hwSrW5$~QRulZXQ%JH@3FjR=?KAh-u4Jf?+`O0zP6%of&bj@F(PI=DE_k^&NH*0 zZ~e1rI1?8_$Ckl6c6%6o-*V*3vb8L?kYH_j@+H_>rhEz3mMb5>w`}<`j4WT46uo84 zLY}dl{uUDp30-Dg-aP>Zbexq%qvqLQ+6i4E?`3t-t&g_)v=!P;iq!lix_z z&MKqZ8*jg9D>TDeg9VMZ(jY<4SZCjCw#BNx?|2cNHva8R4WC!IdW5#hRBuz#%{caq zytSb5$8TBQles3p$Lj_o{;E-Uz2>#YKk9w29?$!It{%br#jX*8$GclS-=+7D(WZ#E zx%zhazt%Ov(4KVN4ka|52lA*JHM^n8_jiSJJ2CFdzePQ=d)D{I`u-7rtsW|x(`g59 z>FXujD`L;{k>)mXudie79d)ch-86@_4>uevvPlCNlMy3SI zN=F{w1rH-hq{*sr8y$}p<5V0v-4VR4H04XMtVHFbuPasg_}fZWz69j`suty=-zH8TRmw8kzOJNYB44Yt z<;iia5|=53;*uAab(MV7Z6z;Lf?9(cRr>O!*j57bB-mC8Gm-ZyiJ3BtD~*{F9IHe| zOVn2?qY=;SJMybwvg-*NTdxEKs_LZfbgt_PPs*CM&iv|LI={FBENFB^6dY)5{Sy>u zLyM?(f&y)55iuJm(EH17knNr+uXzayw4!CzEkT7Y?-hv2M?iu`*G0jBMpr?>fj%!$ z-J4=K>+zEVo-ps{s9P@tpzTv2`b_HssBCyIY89#^{u zv@jjZs20UhuBit6FG>K*NYQmu%p)qw+Tc)X#iJA>o; zu~s7MtH&Bc&q@?+g3`8#sv{UsuyTAlXi*hKS_K79Z!@TlB3tvc{F2UmDu0UCn25?H zt%K6aj7S2k<^H|6Sr=1-q~D+!l|Hbb?3IhOBVa^T@n{PaU5~8UfdvIKszL`2G^!p4 ze`r06Gup=&HQPQtimkQLGH5+7IEwV{iX`8cDDao^E6zg4Rn%x9^hhO*lY8~lX!TU8 z`yct1-Hg!3C5HZViKumZxNbJE+#qf_#|C<9l_IG5y}H$N>z4Z3L)DL4`019>`R~=U zsvFcy#j=-O^%cb?t}cX$n;%_V?R6elA`_(g{Q?n)XYJ3k@`DD*p^<0 z-&>Ow*85a6o90LCeQC+((iYp>Led)T$bWXR!|yJ?o0q9_`>~Awv+{ngjZe*pY!+EP zY@e;-Qg=C7Y**zXJE+&a=le$ATfL^;Ug#uz%~92^nKb0vb^3Q`&nQ7((RtQHPJE$>X}~oOU-oah^v>Lm0$CE*GHtYBY#F)mXYVPGS*t{ zJo7zKC$w6q%4fbWM%Z!9(Xqw4gHZLm?y}MAW3u^nx&6ixIcLU%vE}@Y z_wAOxV@*r_`wq@CeaN?ddmP{hB~OGhT6dR-lR4>^34;p-7uxr~SIq%LD2ULR@$nxQ zq=Bxr+`BJ8fiBU;7B5XAK+ z5$ZdHJ{6u3f^OONh#Z!^Y^x*nQd;XlJx8w!QP0_4GaB)LI zy&RdoXqoxuwRbNw&X=HO&Uv`Y%sLNsoq6Zsjx+OY{Fb?AA}=%hT-=uV=ix3Z1DU8) z4l*&REaYLXD-W5twK9>1II3J^qK_&YnfRm1M<#x&jAWuxIf=&XD=X26XDTm#6&F^u zp|SN9P@taG6nH|}o3~wc_8LcUpu&r+lz;<$TGq_8Y8KU={**!-Qa!~G#XqXj!AL;; z>l$!|a!Mkqr`D$^;!ynS1@sU!wmJX`RA}C{0C+>!YuZ@1xcMKE@6#4&baoF86g=Nt z9`4YH%pKm)m^>Zs(AcaT6lg{B&%xmj&58)L78gHHh2o#b(^t@n;ve(F9~w~|fH$%98AazgS+$-TZzoj=lkWMw*E%FlhdJ%Ihmx+To|GGX6mv%52n^a;B$=o|Jd*iP@j zrS?ozPa(TskHPUw&+(04bp?*-HuPMtdd=xQY5HD|Mm#erexpI}LyydTubu!C8rSC_ zK#%k_Hse#Vf;yw2n?R0yheknlbBK1=>TZDL{YdjWu0~d1?8pA1-~P+izz5%KU{jp(!)B1pqG7Zu)3qww|XeK?*O1-|j~+i=9ALHEYR z%V!_KU5;BcZa=Qkh-b!~*+*<^n{#lvE4JvvHV`5oB{?eA4( z({{&9FS$7-BB%z@%6L{1_2u3i{K57^E@CxBmE2*(3rRd?$9O5dL<#s_Qd%w+5|n~ zx2S=tAGG>)Y+3 z5?S}z=yjo=E>)@8@AExXzt?Uc?Gd%|vGjDW*R%DSGdhU=zPya{{nvH;IXy<7AD7zq zy`eARBQ374>?%6ei#nED-{Hz@X>G2}k2*ZrL#FrMbca8@JveJ!R&PsNH62;&Yl?Qs z+8ng=(msZ2`Ru&><`F%1WFqcQhqaXj*NmPaFE z1)_15Rw5c}X+@&3dMgu+c*Y9h)v2M{{5f0{-^&MkMygnyErv(z+Vl2|W_#{_@omq$ z%_FZkp!K*{4BSgyIa{J)c=q#cHadpmnfb&w@m{@+%Klu7wlrHAmUA%=do)+`(EGWZ zhuzQhJnXG4$VQf}h{o(~Ni^aaTjOVA$P}RQ76l||MU1c?AV4Fn1sKpc+W-PI&JutC zjhgrHhqj2oYtF$L8aKN^fF7CEMB+M0pjwtzPM())>XC1WI-r_OdS?_~^R<$vmuk{o z;Z`fec24h0+icZg-PWc%HC~pV_vM$~mz|eO^@I7ZUld8+UV6vg*DuRxf4J1DYn8d4 zU!9k2%hwAEe9TX)nSSdtZR9+CMlbo;>U-*mm;LW$_wop1&Wbu}Y~|S- z+7t5A#g=M>Jvk$--5>S+;p?G#{(jyC-hQ3|j%Pds-}vR+a3n{7HapQr@m`tG5o~$% z_zKIiwc8jXueD?El!srvS1#V#yX9ezyk8#ro_EZXVCy~ekhk777kTM@Gm+QcIS;w_ z-kI2={4)=~_x{n?eRL3wcqV%AiyDX$pm8w*2vBnTRtfa#VGy7owMthqlXC_8g?rrqhK7}WRfw$?|Q_wzkOWzDar z;CRG}@o%Aic?TS^sn9kicctH!w$sm^?_u|S?B%QW5YLPO9z|=S%4F)fDXk~TdY<8g zG|(}}--^!CZ&ZfoyfA$QgZ1ZBG5g%;oU3XLF2~(Ba*W09ai&%Tq&ZqX^(@Qq>%0VtwC*GBQ)J^1e z={R$%+dGv#ymsNKJVuuEt;#>;$P37d(MRsitp?I!GD&(d1kiot$$nqXS^k}>G)_Sc}EEkOTkRbN8ZlFdzSSQjGMyK#Z$A0&>-JrHBid^-g||8bp)cx?a~W~bM{%)>-ox>)svdGb-oej^-790TW{mm)@ZD2 zfk@q3Agi=zEDTb_$3U$Q_pMK~wQn+vty3QUzV*tJVQby;kgw61itaCJWZ@r@t&scXqt4{O^9qrQcYB?CtSX&JWRA|dxUQw-z_W-mA>TPx5 z3*APbQ9T!9QE3G<%6`Hh+9Jxjb1JIW8jiPZq$SWen+O8*h%F?CuGy}3Zy))JxER`Y zk?QVCy=MQh>{6Xy#d_-U{awl6>zyZ-_2zq{EcJUIS(IyQeg0$VVciq1oksJ${Je0m zj8gsHr)A%jw#epdx3*2HEhiuOv?sLgw)j#c)Ls|O7*#l*eD#O5W;8zpp$uo*&EIE_xluvi_}6d0%*FKcs%I-7(CS`sTI8798R3 zqi?G8>TVb13bhUOrew8fz5k!qJt-|#+^K&ryEm^jJ$AJX`4R!t+(>3lE9?Mip6SMut(Q znl%^EAORF^+K0Xz@mj5``>uZ|vsHWBtR4(Z=pJ_ULW-5N6HvSNY}Ob&=pOF#qA_|W zAp+B1hiJ`=F7d5jr2vjlVs+?VpI_56ubc>(pvVN>lWk8AZ!OnIPqSp|^*9T8M$i5A z8=Zl!t>)5CdiM*6&^2=Nqq(?CKO2o4uY2?@^o-8=`sZeoylW6x!8L+1RCzydU*Ayn ztm$lo+_%HGiqP{P*$r=_HhrEDO2@ud5G6MV5SsWUCF~c=0P*%IaEf(2NKIEa*PwcC|iAuWC*o$$h-qABC735iyR?Bj`Ev>3FSUe>Xwg7-h#}j1M#} z4g&#tB>tj4F71&;P<7!&sS)Z9#3SERd4Ve0(hBi>73*W!Q!@Xx*K1PkjVeo9)Thr( zy}|D1`5x)@ZMHIZMjz0tCl{@HSMt)AQ~KnmS8(#skj6`Mh8tEbngU0^(SqZdQR5p% zk6PoLEr1&oZuLcVo0WhkwDI;F-w}835eLJ6lH}QGy3G~ z=b9C1+A|w#VW}EVTU^}pTk4&kl)Z-e^0r0O^g^$dRw>VD9a6+wv8FvuBco@0!>k&c zhqu(|Ow?$fjb($j9&Y(%bbI?56ODLgM)}Po`WWgzZ{Q5|oh5LEM$8F#LwlR=r5hkX z{q-Hr&{l7WYD3&iq7Bd^GiVtB=@u$g?~Z(j4}!lE;>uA6-iZG)#nzOdX1WmX3mT}nxofC)Q+F0^alND zTx_R|dw#Xrq*HUMIO$$%jbGK$`4Jr9NN06MS0^Ux$w}KwLSTrmWY(E0%?#btbyqOx zcY7Ip>Wj$!@ta|c>6w|vH_SL$#;VPq{-()3722j#VCeLD zj8@aChIvq@bQ*8vN94Rg8kAflDc1Im+X^;a!zZI8fQ zwkZ-(J7DVV1+6{ct=sL_%4?X*c1Iwt+eAxS+aoZSZHh!B>b68d%r~gN&4%-g?dDr- zI61tJ-G)0{mvbDO8gI$3NfB?`N9Z~oU)x{rwa_VYzj@LlV(aBGb+=5Loy?x~EWWKQ z%h{bN$+{2dnbudCvMl=`R}#eBPp?U%5#^Qf&Y02q=F(9bSd4~2H&o~m|@3l^siagrTx0) zIB$FPVH^MJ>D6T)j(ouFBt7dRvL)?(NVWtP9~0+Wu!GPR+dLeNNA)1GK$}eYeghex zSdSwSb5{RclWA!&;;b&c18oxB&blHCw8^yWlT0g$B+zR$nsvYBjodYvmi=}MiPnAc z>lnFZpWITSWnC)E?&SGYW(;)3m&VkymiYs#Tt4f9WGo|u*pvXFXGN#C7_;mDvqabH&~?5o8i&VDo5660YF~KV z+yDg{Hzz=V9+?Z<7+9k`_exh=wW@Ew&BsNGNU)~!T)r=-dLEZZt-jE9cR8V_*WKz% zeDi#D+DjRoUBZtSnc!ooiElaSW;u^O{9K~k59Osmy>w?g&DCrDY}|Q7tBEtW+HKo! zuCr&4%j|epbf})UQvFtM4Qs!%W&DsAU8uDq)lcpot@U_bZ+i^!-|oTnOCtb2HM z!YHb*e006{lHZ(wv@}p z`_R0OmZYEe(THd4f}cHL{zLl+(W~7cKw~ru?$EVo?jy{o*7Xr+9Fp#}zJ)$4@ z754F~5<363!Q;y5I^ER3q?2F=`-Wm8PkFFZZJ!11RNs=E*{GeUR?fxnY zzb{2L)z^IPgp-|@Wc7|JORe!u;qtcdQB_89-+UXD&*`;{r$!#13x}7N$c_Dz?=B-- z?}oF;r(GKFi-f;?9aG(ZWa0O!M46V>et7h{^w|AHn#Tr7*7@yhjN0ds^PIMM;w&fK z!T3Ybbj_Xlk@X;nT-e*ZD2XDs99 z7xy9!lqd_@x8Ey*1Q7}%bf$m&`vqyB@%;f3^h`fs8_11qtG;m2>3sPUj|iC2ZoIu> z;)gijJ^HS+Lc1a?@yEj`?=N<=z793-#~;SgT8>_C^ThDXEBY3USN~=sfu6s2CNo0t z%x!x9%2U0vou0d2{?qfWF;+}AvxinAI#2z7TdadE&*^3TPytC| zGJ9qbJ@=oAj>~dM(>;6RONlQnxf7E1KHh4h&iHVoh>vBO5AOZDca%po18vw_|9>mw zb$oC;Dc14B?c`|jMV<_!_#+p+-xZjLzZbt`N-&CV^6*FTPab+7ALU^miJ!8iA->AQ zCH~68?BlaM>~;K>hdYYz^3d1uUmh;;VKibNKSm>-i7%-UwObl=#-XnKX_e>@YA%WCIacf@Md&Gr@(J;1t)U9w(_ZjptcEyU}#iXy!P zxBs8LciXb-#_hf9`g@AJfHPuulAT;Sw!7Qkw!0nMoz7hiNu^U;r&g70*-yXmThPDB zd0Mq532>Ec4GozpSrY(3oCt#8mJIfDPnhDlD9Al}62G?-RoY)^yq(k0wrPu!p*4k;@BhzuREbJQd@?lz9UxCkvwW*O-~}5DtyY%DJpn%8NWS z7mcSx9NI0?^VqA&+Fq5uZ}|f%ysoWcluBYeC4cJcIz|k^tu0=H{@H$0fe)4KyG>|% zW-zBcRC?h3Y%`}mxu@24tp`tQ?m$&`P=rT^XH-7-)f00Pj%9gl@aR<1Q>FTYQG;}} zbA7&kJ=7py+%)j{_2rcOePe3YdBYsI4}$nl=MGqyxyrXHb^wb*&;z~u_|#nA5oz=P~$1~Dk|!|orbAB z8;AEcdpDAjr9B*n{kXjxNypNjj>EoYZ^vQ3-5!smWp1y>;hx&_Y`jZ*pN(;kKZx@K zvilm3@6n@pAZ!}9_wZR5xAypX`0B_!3(Hz;o`q|SZ;*s&T+12hCyjZ2P8c+v;@|rHny_i4VvVe8?i1&+RwkF|KGgukl;^_OpvW8^+IfedpZ@E)SB44Sd!# z@4sIB(eQs^bKXPqDgRbBikpAbUHFS&r&Ul&x-@=z@fYLk9}ism?j%a}rN*x=zBDd> zaaiRqnjjpTvm_lWr+0|%n?!OtaCH(kNdqzqYk(JI#U{znF#EKY!|Au!`+pVjG^CS8Rju z!tmo&Th6!Wl#eW-1266N?|usCZSv-D*80M%d#PpIE7#m;d-?99^?3swSl04>@&0*m z+vC2;UwKL47yI+8T{dCq#rJaInC+6~kqslW!HsP!**@DPNOXx~{jmGxX2pKq6?m-w z6O)6fm7?>_Q={?9aMAG;Yuj@J8r!AQYtHY8_-;)0fXdJ4^5a9CwwM2!3DsqEhVzI8 z=*$ZEL_&;x6&YA>uf`zj;PDZs)Wk)N{|A`-&GU%srK=;7h*c+NABUaYuyhgWQSt4KH$tsuL=itNZxU^~@69#*5xkBVN6wUuCP zu_IJqQOl5r0n6&W8e?$OD~qo|f_bqQ``X+4QJkuA$ou+g^FD5=&uiC?_Xd0bO{kgPfim>~le2Sx9fh?Kb#+bF{ zL>%~#*Jf+UpG3&fjjhLHTXUiu);ppyDb5pVb@zBAeWQ#9)DQ&_-O}@k=wfJV6wLizHVvoPn2|Ib_nYUTWa?K((y(QN zN77LIayHg=znzVB>H*jN`dAw7f=*7kQd4v;nOut-ve#uTuE@r@j61S1?ukp{;t|CF z8n-fdQL`5wjXJkDHN%%h$LSfsEDBD|5a!`OErU1`zcP$jIM*_eS-93Rlv%j8GMIVz z>MY$f!GfOO_Y7jqqbSO$H$k%FEiaYl@lbvLTwc7oJZL}l7XG47 z6kotoUfIRAs~$JjM!q&KJuv8SOZJ#E?x=ZOn{A{LhkMm8&3^sO{!FDrjx-5dq%ZW2 z|8D8YWKiE1$seMFH}7~7$92wSsY6qZZA>&%S4p~VWyWMvY*D67Cn6@EFY}060k8ezwM?qQ@~r2y#xT+@#O>s?!)e)* z#pNkQg?7an+je_VS%Gb;)^~&Xc#rKCv$1dOCiC&G?KVeX<;0rGtW$QQBk0-Qt>$Cj zx0}tTXKlBejdN``oQ?C?Zg~W5vJ{H@i)YWqT<;|7m0s?)M^d8bBOB+j-Fi0ezF#`KC+mVlT zJsXmZcbaLy&mGH_jHPCjubc8xe6%ss+@v{@;mRags&Gtv@J<|h6@HFPlRv03vjnBV}I*rQ1Yh3ar+EWO#Mq$?2 z!?!$si8_tr_#})PTlga_BJybYUTdZC@!@2RiiduyDws=-_F7j#?wRp8iK=;;rE`)Y zjZ2=7!rZbZ@oOPIX!P3?{oULoc^U;@<1~(p=DDzFERkVEr#j6^qp)Z!*rsu4_AKi- zH5=C{acnjX+i`9-=6tImS<<-2TEtliVb_>9Z(-1Q%6#S1b^H62Z=|ytIfwI%{PN~_ ztM}y$FQzX`98Kz!fNhHG9J7>&R<7+?J*|AqeGADCnLx4;rRz<}4Ytloa&|iouLngI z`ym z$){G&bP69d3-N=y?o1MBwpBv=o3y3jJR=5X?)bU^r^kpV<`*05L?zK z-yW^6e74-5E*aUBEF~u!<5IG+G47E(S($S4cF)(cCmIWsc%3h>Bj1GJRV?bxp||wn z?$!OJl=VVfN>MMwmUYgzPi2*t5|phs1$QkA*;v;lFdOSqLbEY0B{&=79toEnT;9Ro z14VXF;}Y6Zs(bZ#xstsQZ#x6&g+I?v#KO}VMlaN*y!JwDk)(W!dy0%^YwA*lvoYQ# zec8|a5c4vurCwO3F3p+B+JFLE@`@%4I-Idtt- z;-sX!WI$)**M~l#a7#G@s(kvn#9OA`&&;hp*&;g4(q)U&%ZS(I3JxmwcuP&9Z&= z+}ma2yUiPB<3HvtvvJqnG#l4(Z<~#M?k{Ugm|s^n$G-H|+4Dcmn`cu|JaO&Wrrv%W z4acK_Y&w?F!WfJmVU4)sTkBZa1C$6gd%p8%WGp^KE7@45(M&!zzZv=TuyeY!9Y8k5 zJ<*W%@J|oB6+F(KueE>GSfG5!)Bcm~kUrePkYwmaf_|Ql`O)|>+053+@t*60C*g)t z_PQ>2ve_8-SZCRGq>m1vd%m6Pr(i^9<(8V!n+v#2@_XTbu8@D%jAEI@aKJ80;?K*uXbO*UVyE~;V+FvX^Zm&IW;a+~UI^Mn< zU$<;+U#@F5#y#t*$q75I#xA{2zSN~)u zmHuMj)|$=vnk{$llceiaQBJdaWP54NdabsvYE7T2GNNC*Uc*$!V*MU-DG|NgX;~}T zxSPZ4nzzlm1%otI8cL@lZdSduojRv25ir;2zHb(qm*{x2^`C7Yd}F!JXM?as&8vks zzs4wKZGEY);WhE;3(nbge2tQ+UC5?jX*aSl?vX;-6ZnK}eq$pqhU|eMi=i>YkCbi< zYbS2KmUuq<_;Zo@zPy*>rBwAoY+3PNr|mi}YuVa+yDVl?FqhSAd`nr*#<)k;Wg(;b z&b)utv7Y(ew~SNiRYtPcIxc0j7vdfnlxO{5Z$G;D-Nk>}{s8VjXo^y_uhDp1y0b0O zR9f@TLZ|qo9oe&7YEL%CJ=!G;ubU!!zLjQbd>^g*`%wR)wVCGud#&(NGJ7GmNY4km zo?+Lga-D5)&SP1(^dftyey~i6sC<-1oR8Cg^-p%A za(#{F{LGfGyDiai9%9b_tuFOZ{GQ{>mj?Hxy~O(1b^^S}+P*eZPWG=govojDQ?>M!tunJmhw^OxTr0Dz%$n9_ z;hJl87M@eJJ`4Z0Rmj44%xYxeT(>HD*lPP-tWF-j+A3vXSz4_uOs7~iMH8HY!)-bi zwN=nB=QY9_i~jqEdoK6P%lF+L_II_OnfCI2wW_yah>ECLMizWndz8hPU=qQV` zg|kF5lK54@jccxP+4zIe#>&&^R?fef$KjoxJusv%8vQ;9 zJyNl~HzD4p(C0mB{YCSBd+BhV8l}uT%0rM@d15fVhJ)Kq{gisP#?Jx$l_K{2&qE-S zJ>VS&JY^gYYOQXKnj}+8E$W5XBH_{&KQr{R)V_OjnjiA_>L4@sZ5@bK^cL!k{luxK zznw)VnSNOkR#gbZLBuoaR_LWkCv&-HIU6EaZUCZhJ?Ge>T%&u)NQmrt^+p-10{xFe zxxVIMd!Q52F2tf$|csg`&Gf8})5`v?<8*sB@o|M*I0Ks)`nm!g#8d zkD{kofl(N@t;8q_)~v`V-0N0m6!xVR8il#VN-1W*i$Anq-s=Y+<2(Nrn6d;K=^M(K z5Vk+gmm^{H28i-a%XN!_+{43h-ebB-qhwaAI11l#l}BN2k%GB)5@pk=#?O#rV*BEs zqvV}@cErw4_Jf$*ty7$~7nq#bzBPIycEO@}DP>W-ZIL2THO`h4nHr^ClDSd%iu}ai zoz!~G zg4`mR;^YTw{Qv!IHrM*TaDMJj(Q)_AHZqb%`KlSW}~(KPu0suIaj_jQ+& zEMhz2;?T=O)dP9wNSO25+25AE^A#E;ne_^e!#Q8!QJ7owFy7P2?rU5-(~ahcaB18+ z^UcP)ekPoab?wY}1kM+h-{YiqdZwI@dF{+OA8UOkosTWshDcL1?y(*6-9MRU6&iy&(B2+%9qsJDrRuNP1r9Be&2@{aP)4|Ob=81wPqUsxHciQ+lOIWli7 zP+b)dU3ZY;AwM^s;C1sLlf|cUE(ZfW?a2~x*ECuiUf6n~YxixWHZvYW-7CGmn;AV5 z@6*ohJg-)bh96zeEqkXi-X96F#$$d+Q981qTffKzwUk#EA6;fbmluD!`0nESi%&1UvR_|Z ze7lR1uP(m4_{_cn#uvN3{M!CKSU5jH($L7_xE+1X|i^&ys0*ldSC;D{z92$D}b?k}RU9Szq^1d`Hz!xq_T2_6x&#LLGW7?yCBdb7sNR2$cmAe(Ed@CQPvE5qB_xhH~+^cTAJuCdl&4p%W~52YT@*7u+2rbtaIc>!Xko=!jI zO?q;_bc5`tneH^yc=u68ADOo%$*;`nx2^{}F7JKt+-3$nR5 zWqI%Jq0N*$2KJczPcNnHW#GW|VJGz3B&O%?+#0kv-RZZq(!xE~QWGuHU0AgJ6qg0q~QdLI3a@jK!^Zk$DwjO?+D z`bPT%sKLW=E1SE_5AEm6U9q?zD(2U?W-)v73BsjOxHKNOsM*-37I!4Jr9~c#y;$t= zcxBPYVy-RZ)Z(9yj;TjDA06@-V<{*eWh`EKoNTPy9w{61(qm;~+~d&{>z6(5p0DSg zr0BKpoIkd_@(<<_ugyn#E^Y70kuYeieU+cHI;KzTybDZ^EqfJoO>iJZXYiXt#SM0DJuFCCXLgRsa!16ldCLT)03nATDN&L zCC*Lpz)?dh(Vvv-)H%3&-)3yF9E^pe#Ll z7QWk0{_-%doeYk|HqQ3sWhG?YG@jy*m4)E!_-Bg-TF)E3V&SP(Owwhy|1@wN-lg?2 zB~EHTH`;=OFtPJK1ACt4-v#ES`Vn7<=hZmo29VAcG69h=KpQN$Yw9!Kij?uAKE-x^Hg(snMf zCvjTXMZmEhU2&>@v=zK>a9KxcoqAT$&#%yqcxe4HR#T_B_pWOVlbb2j(nN7k@cMaq zWpq(X-%c62&fc_DcB$ZJm#sOqL!u^+y;lq@^0mk-*$vi`-q2_nm4q=ETqdxQ?Q?!? zbG|v8twZNS^JEVW-nU2lAKT1d9_%i0t?L7vppvMEQYsRA{1x}1dVC307tAceFHy}v z_j!-T+PT86*5B{q90ba+xP9z#eW%&-ZROHB;w$GE^ojrDy?>+a32KLr`VO`-A^6#%CL})?3%@%}yPe$kdtAkAR z)#?K0#(4DPzEug)?6Ak>g$kVyU>Vre(!Zdc zR(ue<_6Vy0je^GR&9zCI-b3ZzO%Le@&wD#j(GC$~y`O8R4^QjyC$fZxittQcL+{Z+ z-HEAnt#2%c^y;8HJvSQYfb`U|C-fWhJau2rXPI$po!==LHJx>2o^Z*cApa%TmX$Bv z%9$w_HyW?!AM&uCn#;?h;8?Ej99TVHk%f6JSD1(EcrGyuZ^P-ht} z{N`sZS(tA>i^;^ieO8l)6`Y>gFE>qemXnE9r!(ZHy*_UQwzadMJiN!wit@0|&yun* z9Y1Tz!^&yq@+>M3Uwu}Ug=On3D+}N9tSbxCDQ97&x}1~rwoX>$w12h}Pa=8FE4kI} zwI)DpKT*AJkmwlh{gofs=rnTnOUE3q&ggmhECVLDIeV%*mh$nwpM@Tmjx6avU3#)8 zI3`_LI3;~q@R!n=g=vrVdTy41N*G>GI^{c60BEEltFMCLzF9!JSbE2e9-9Xosz9-& z?Qp2_>zfg$Gen3zH27IvKONL#sz$S?Zd4~uaJR3Xx#;$f-F&8Es&~Dqi0GvR4eDDf zAb5>@|9r@Iw#(ktfv4@?lOOc=YV_Vp6{bNMeD?}SH~Zj)M3@R`A5BCyM`deUQ6C9 z8IQ~sbGtW+vd2bOFQ2GwWVbNCuZOJ=jN5tJ=*)k zWRdr#TJ50MfqNCS?%gu4KL5T3)iuU>)@xta;Qx4dxgwGeKVJN62WP2_@6{u}U;Q=! z4qABc&;z93clzxd_dJ{h_QaIzJ=kmN?(>b!^>-ayui^D`-y!tH0&cN-Z3W+BPun$J zTHVVPI?43Th8p^wxe)Q=2m0yw(dg*4Mp(ATHY3)j??nrT)H^GnygJ-3a39<*GY+X& zVt{gO@pQzze9ikb!Rd*k2AkEhTY#Ro-P|cBfY*miPW!3@sh2yz@KdB07Q6NNh-qIbn_ z-tnzxw?L1Zo||6qvb+Y!*ATu1?xU-+rq$4`+6JV)ya~Hs&a0l9#`DfP zS;3xOA>4Mx;<;VN7x#5JaCp3p(?=CF#q4dCXP>RBx95zqRA}_||I=A#+$;of&usKf z+}&$IR8K{X%XvnfRBvJ2!w0QuE#+5DXV;pq-_EKd&yVc|+>a044bV*2B>dj#Z)Izy z8nV&I31PTXe{*>Q93Ee8t3&Gxp+#wMi!n@ z&yj~g^&XiRsO0e7c{Y~1UL+eAF^ZozHIwCjcv^3B9=yt-j>GO*fXo%0M`muBNY?)RO*3wJcuGB09*$CP zm50Ogidpd2DxdP>=QXT#t|Z$(ui^JPp^<#{dax{vb-h>?j(I&<9-g&&vpj5^6He>V z@^G!!tBt^y9c?^+7teS#OV*60m0NjOR4!vwdc(}w)b)t@c&MuyfhE@ius)^UF>5}{ddN&n z**dybPnkELQg4}u<9JnLo}8anPkC*R!6N7Noe;yPlU8)x+jdp?cX&45gkn3(LGpHqUF!F)izHGv}Ba zuP^I)v*vQV8n>?ZJr4!bdf+@7MxB!_Yk>1KWQj}l#(8|4*CS_Q$~{-us%Ord(R#ge zE`>3U(I?f(7@mH)lap4-g&Vf@q$qh}`Ss@^^iAiVCnVS}X5DXWJDx*-2xl(53c zO*<@3*yw#k-<%#el)Gi@ku{f7ViWC6ugx3bXRi)7w&})k%GcVPYNQjPzn{(< z)z1t{dzAP5KneS0s>!Ls=!MAZ;eHqWp26;iMxLIoFn#k}a=x8sOIUF`iKtbe*WL@0 zwO+n0_2~VM;n#F0%Q9^1Ru;x}-OIx^)y*tCOMT0Wik7&Rx}7)AsRho$Gq(*{c$T&! z3)3lHR6h94Av@gq(!f2-t32rNx9*Yi5=Msla7~Nx$A4w0p3LN_lw%+v$TTmcLXI1m}Sdv zw~%?5mliY+mn>`+hAj)6hi~0NXW^3t&w?)tp9O#J0rK$3L*!x5>rHuadGS)k!(`2; zc%Uo{QxBDeW$wYU@GL!C7N%3;{QmPl^`O$|kvSPRKjrV8o#1MW=bs>JJjOTei@av$ z`Nc=p3GSaS{?mSceDUx0->XA}d1HUiZiQYf$Dk=ksqxE;KV5ux@%_c87hl=0FD|~_ z_4U{G1>b&dzZv&n{?@+z?BdUc@$=o^{rAOR>}%-$V&~U?z4)Vj^M%cI53Q&CTgf#s z3O#ncZ_#P*mrTS9iF^Y>sQCC4cckEwFQZzoYxch~Dg*>8f zbp72QeYbX3g}d~ZZjJh>QehwO9jd9;+D|+C1G+r&dM>-%J=~+?J?~OHJpKnFrw80#;~a`= z$u=zn0vQBurqdZ?zl;Fkjp1jlpW8ZfI^D8Hm&ON9=Gc+6RH+d$d*4QC%U&2Q{M?fL zBDaWrd2JN6&BS+YUN=k*kA6697M_zWA`-rSY}_q*-IkStxA%?551O)i(DKqG*RAFc zO|s(q!Fx87X$5z&Qf(ZEL%_bW75AtCyR}?_Ht6j#jg`AG`-em#;p|dh8Lc`Skh_EQx4)p25njJN4w?U{RIsLvjiua}zI!m0nc_kEg;E!HGCKEvn|De80w;=TTN|j#EDir^e-)jHaMx&7=>1x2WJIR1cPXX|>Se z{Cw=0w_s`hdGb&$~fUk>7LCvy|_1Q4%5l`EoBg*EpVk!mP1}Z}Lli zs?C{|XZQD=1xow2#wCxUy+aQVM74NvW!Wh|(}~)n2!qCaeh5o>~7Y|*t`-*ZzGx&GtUUF)pze0&uJ8YB2Q z&ff7ls#hiC?qy1BPDh%I%#k#%V9K}JTUI8^ODv^43sZy?Yemjy73R`C%GSyjXe=@* z=|4XuMLW(#%a-<>i=L%+or{tP?aS9XNw>!F{1ax4J$%dKm#EV?j!(jk-<54hBo zb5RnZQ~9f@)zdhhf5NP>hi~~dND+Cw-JwSYePr3?Qg1@NX=^3DbaZQ+>S~l16}F`e zMPulZ5m7l_w~Fj!+a&3d#$&QInwF{TjmEx|&CwL}$Znp@N>VhA$p>fHND4W(po#vevb9*oRr&gU)?wrN@K5qQ=yp^C;nvZ#_CQLo6Cm4BJ zEez5vjq9sA+7fEjN8?>e#b^q8q$W>VBpn*Z@ktmpw(v)`aOwm0_DYb(%tz`3`#Vs~ zq3Y))r|*(?`Hz*Jf(Vqvs@< zis;C-wMP1T`Ge>=lujM-++W(jXzF`xoH#q)H{6Y!#+q%+_VMyf8jI~5jd^ZcN8>+c zdq>l9nr$9U)zo&6#(vDUkETVoe>C3OH;l%;^c|xq=hwsZGklB7M(-@0|+iciPg4PrSHotgT1ZO!75Av+S`3f}t@# z;uI#uSsIImWLwiM3Z@!#E-Iv9=fXeLz;jViYv{RXSZeUOD2dSUe9e|7YaGu%Vb<8g zw>*A{I*n_49Pis*`uKiyK@V@wgY@;!5>}1lR!JB&wycgKw%SUyz>yEoxa2}qT+lgq=-6=@-HGO6^9q6hXH`x7rQ^q*TEgHGSYw;?#@ z9nO2>v=)A9J(yF=_gW8)-v9d5;TKrRlQyD z+-CC3xc1cWP1mMh*EO#UUe#}Wj>g&+%*T zWrpGvuPyPSkMjlBP{#T2&!u&txgRQS)h$PVwSAR&`Bc zM(J{!JddKTmiJLO_vynZs+RgP3UiA-$=Y9Ng8sXQAmDit|v+_NTGTD?Wva?M6l(6gq}HFkm6 zm9?E<4}5V=W7!KG!}vk7dfx;WzvrVgO5>^PFlGRLBDKIZd87$bNiSmM!xLp=S=a-qZ- zKUfAL5{s~DEc#MsI;b~^*G=KnczMY7%FoL>P_+U6T)hzu8pp|uFlMZ3ek?5NHOg~q zJSI=sURyM26ipiU$lLj8omcyvo1#d09Ve$+dyPvyAH`QupfQ4<`Fs?$8rLIq+3u%@ z3Ouqy@n`kYYK@{uW3fE9MSj^UE{Zgs78#DFYP=?h9~yf!K+%%w5TeeVBwa<$uPqv( z*5QGTZ|qN;!wJkKo3ys`6l=Hwh}++#OJ+x{)PGMbLl z?apY*w(QbqI&QODL{)kAQZb*#aeNX+jV=6% z=ZmP(xK;%lEjhxaF@Ft&N#h>gE5hH}J?(*|C`scZi~WAENbvGv3O(X3C{~faXs->r zN1IQqo{T-i^43jn%p_dE21i^UFz_F4RRm`i_zJiT)g4R6lf2>5SIN~mv{?iJ>k zBBJ?tDV#&p?S)>6Tf+U@AzvTOZ*=pO@%|H&zDU^RE^R+lB7CajBP5`%jO$&aOUqTz ztDam1S&K`fk-F9hI;jA28Q?7Mmyp%DwE8ix!lmlg-?2kp^KA6e^Y!w)#T&`P>7^HErDxAx;GslCA{tW%4UckV0w(zx^{qh?%tn^8FBjYi??@mAw#oXTG9&BoC% zs!m68ud&4&YR3`Py-VHTiDM$;Id&23u3uWTG>wbmqEY3};vrrhx@xhPV_!=xOz-Ye zUcT-eRA>C+Fg!D!b7PKp6#lah&)9h_@l16&az?6csO@&tifo+|M!_dizp-DeNLKeW zY84(H_5_S^oS$ZV=Xukd9nLa8Nr$@(U8E|A9dfLOXUHgZuViMhro077UjHg^oilQR zvUubM=N74s^T};`p2D+dangG?_p$QeI`MgE{JDImzhn_^LT?KoH`pGs$Uu+duGZJR z*m4%*Fp-IgKI_u#G;YoE(lU=vO&>XFQa(L{%XzY_8MZ{D`c`9Jjz$<1J8N9Zch7DdU<9xrE#Ucx*p8iiWb^9F1|g zA00)(_I`B~1MAR$ ze&+l2QIt&g^P{lV`~6WkxAFnw@GonK#^GLigyZ>yag@D}Ckd+2>xn)5B(@~F&kkz?<{9LF1Sgk+M)F_Hh_g%H+* zeoXNzk(Ygj=A5<7SkFu2?<4!gYA!3&nafjKmluILu;p0QCU1TO|xkHZ9 zgC5JHY^uj&{FmIg_cRp0d*I8mX#~Shrdv_(P6@8YJT41E1UK^X4Wd%}evNa!XZiFk z{L6J2O+nAP<*k?2N8>3GKo;jjgU0O$VH5>h5yU9`%Lro>=F=jOan!hfpiemGBY z{Sb9cBbZUR>j-BQ&eJ2HaTLv8ja)`Z<7jyw_lNU(jF`7i>RN_!V`zyhZcoI`ll!Lq zSN6;ITg$tnk@0foTx#8pio(4<)g=@!u8M(NF1@u_{l#HV%1xDX{>jBsjHZN@1*J4Vr>3eVEhBOCLw4l*0#^h7#-XRYqD?gO2MRSVyqwR?1^T=>Z#zP3Ca`Q)B^PDF(-4w#sS123Tas{(7?pfiumDNgW-1fRr^A{$K>s~nz+tdQZ?X)x>jdNS)UOj7G z^3=80&&E|p1-ZCh9l8Z(;h6jEJeindnnxX@W>_-qd2?MxEm^b6v(?gNji=aXMLW~Z z18tGca%LmrHP-7*)80+-LLB{qc*+Xs-)fBWmVzjG(72pul&o)I+`|V+B==mY)!*~2 z@M~Pov{fPnv4xrsEBes|zkEeG<89%hR)F3?5jO{ZE3zSeW$XBopV=7qNKtK7%ImQ7 zYU%4Tvz+w@3-h2!zvZ1Ywyf?Cww@2IrjfeFd-i(|4A~crA6AyPK3aS^4>G{BZhL>jE*!q50-CxV<&g)0ps*bb=SjPMx zvJmIntMYajJLev4^_PjM)Z|p7zh30GA|Z3WW$%5Le9gwVN2-+doZc<8rdeQB z$Ld_tYj^xbXwC^DyVWn(q?gV;JeQTuKXIO~Mz-XwuU0nJt`}>xsa6poS`?MiGuH>&G8{8o!Cf24Z^mj)U5z4z<{H;(6mc#56UzttG$YXni+sqxs_ zMCs=i?ya@U#=BhGY>a!>K5osm#v1eHEesk@nXl|iPnEYvw9h+YNaA2JUOn9oA6vCy z&n?Vy)3^tUcL8BObYkh75uNh6zy7-GZZWsbADBgMkBMH`i-I>7Z#y9PK8yWIuV;X- zyu8XC|MnTxmB&3$h+!WZMbEn=@5WA+$jtTM(|&w7r6T6jJF@MwBR(kd=da@Z(!q1y zc_42vhg;vzZH+^SJCF-{;6J_i#pv#_B;}r4k0n^z`PSK;V#np(WzX++yeN+m^Y+A6 z1k=iI@iJ}&1#!YKPoL=Y7pXgp{1IhBO$2yjbMvhgk@DI4P+NsE&(Nt8ydZr*+2 zI&zHSsi@be$TAN;-kRI$_?s*&YmsRdt~zqan^B2uv*uAE-z*Hv$T$nrG;+?uGL5YB zuuLQGEIjkbJPXgIo#S{v;W~29!&d8OiR|<6-5&Yp;a=MrJ7?%tbI58;Ns(pe=&OKokEAf$DMmbA{$PxADX{r&-vqo z&YpcEkf}50ZA4DVdSLOg3@2h#lKH;-`p^@nb!W7`#++q3ciY$Jna$RFEIsMSF=%b| zn#%Ou-yTkq=OGt|RQp@E;%obyfk`gwp=B(7G}v#t*4is>+o`E7SF3y=2~-o%Ka%&h zSO?^|KeOdK31DK+9UfU9ptC0J^IrBBP^(;KDkvA_xPQXGka!!fqXl8#3zTW?jAVuUmE}RP6OI`El6>E5yX~x ziBG&&_3E95JU1@!zWPi1^@GV<3tZ8pQSd#F`?7-H&&h(Lyh>ZVg=P_phgT?gaRDtU zoQlKo+~k$JE<`t85Bk9V=GuM}N#x+e2kT45X;hs2rZgW40w~zb4!og8A->MZ5lcoMt;mQM0tIqcFGFUPW_yA%Q*5 zy>}k`p2fj#nOY#r?rE%{5+C&Jq6Aap6QdKDt_IG6G`3UiCR%C4>FqFNwIdo-@)xGrs<;HS9s zfQhs)OYZ-+tifsA9wm{>)fk1jMRIFhpkgv3SM$)`XUWZPEP4Lh(g600TRa{meakf% zg}G1eZ{N$cz?Ub`csrK3h;z9Tzb$Pzh1a8GbGa6yFtZ@Wh9D-KP!2E8X-w}++5>w|&g%Ix- zG4`+6GWzU?87-?*c$_2izKZ9iBDhN38g+!;bHi7!p9@}9q;rQTcxWnbD)!V-N&467 zo+9b2tEtcpD z4)t!#$48}_-$JJTCu4SNK0ih-y^{9I)`EH=Dm%T7S{5wAT1|0a52&t2R%4*UaY5~U z)4fR4BcqPHG)O9yqkeNfHu}Ib5GBOw8WERANXVn=L;EcZ@*$t&wQk}%YZ*O@EsIGM z(R+8GtTI@N)r_8l-z9PjPbF}~8VFjPq|fiN^jiCt-AMlKPWznfJ>6~lpltl>mMR}B zTC}#0%Er9D&&tNCos)avQP{TkY1tGk_i@=6*Yn(cjhK5pX9C%eP4gJ}*q3>Ze4O(nVK_diW*2EvpE6=O)NK)M>8XkHB@O2cBrssO(zQ zTm_etQ*ILx+tv38u(0G_d?x*QRpHY}ens>f$*MV&^~ zK53MEMcZ8A(VSB2=qdYi`+RwqsE4jK5P*+ zP0~R!OVk~yXJ#DLOX;`^+9+$r&dy(>iLqtkXMxLEYhCLc$64W;!jF^+VaQuUVd$}} zf{|xog+Vcc#$&6TWdpZpSxUiZ3YJnbnt~oFl0~?-8p-=JpD@?q9z0q=wY4R|ds-?V)rl3cj^5jUqMx%C1S=`6wF4k(^WxNHRJcmZfT$BY@5BbZ(US>}5rTJgpD*xf#Y~p;=)!}9k z@1>EwSjQfRjr)iXY{zz9JbBzK^UFCQtKs|IiHH+n$uzn`WhgbGeXni`qDI{(aT!Af zxbd{6n{l#M*9JyuDL2d{!OV%cIF90_g^i-S$Kw5Z{rFy+JId3ZW|K!zQzU*}U+)GT zOPfBPuBpvG2may{Mqyw2j8T~P_>@sJ)jnqw&UK$O4*R^CY!v1epQc@bzuiEtvIUBw z8jaC=0jkOGTWs;zVkFL9=#!#Th;nl$DmKuaMYo{XFyiyN#-(s8F4oD0{~?z0leSUX zc$&cl9_Eml^xy7<%cg*6B@Mv7OF!A?Qx8!&3T}RD(&HIkRU3=$IILGOqBuit9-o@*i zsL^=reqZ#O2&cyUwGbwad)7b^6*s$iXLZlF(sGTmX1$T3chG!h@z}Bpn<78%Xwngs z`*Ot85%Ki=rnUM>Lxtaui;2JVc29pK=kG@I(u%*mTJ~-cuYWbgEgMb!w176{qvEA# z2@rgZ>)y^QSB_<5vgs-jzSFXmvB{=Gdw_UN&VN~wCkHUjfj#wZ{6 z?HQ$f8rCyTBk&&2Naa(Ym}9!X&c}N!qm_?+E8~@q`B+9QAA7Orqu0AcRCRCDu~u#? zC4k^ylxEygP?1N?TpSdblKRQC-SuE$4fe zhd(L=MpVk)Xxz6dS@vLyy5*{krl4n4^Hx);Vbs{NLh&mjYBa7# zn9-{tY#Q@dL6|h|;lBJ#$#3oXS~fx>@hxw4P$^mdjS>f2;aJ-_%V4Fm7M@^UscED;qf>%4lM zj^Wzsa3VCrt9t!3ZmE&|vYG+&m8~5ye|-!8yNf?ue7iykd6Je=pq~c zTJ({Pb8mE#O-~)YWaDayG_q+rJr2pI&4f&k=Y(F(M{aN8-3FMqSSrLE)Dd--gU|z805uSBI`YZ*0A( z%5tj^K{2}}Rn~FZ@gju&+Cuj39DXZ%k#()kwxDxKL+kT#k{fiZkd13C_nD8g+#Ap5 z=H-Q3o%~tNWo64MJQo<-S>8LS zl*)*R_-LNziGWFbi6w=wLz=`_}Tb{jp5l3tN!6xqofH zxUEJ9B&5V&w+F9!c1CoRn^wyHI$|E3RC;9!lHb2~(H`cJt%Se*NRGa|v0nUjo+gWi z_{dU_dviw`g3h(J0I}I{8ZYEydmnZao`1Tw$gJ;H4ZCmhW!|@mx6-*& z*B@b0XT_Un)CUutP%m*~u`9XVh`5+IDCV_lzau^G^)r>Z{utGJsnU}oDhEwIww40= z4#M3B0juYgAl}K4JU4Y${W8H$(_fZ;X%J)Jk%M3Mc#nWx?v{EauYFQP-B#VHk{y@s z5VC$TJFFX%1()m>rr#0#`1R$fwP!0C$d))((YVIab3r$sB2IJ)bRrVR@#8^SlpXH1 zQ3b>D{G^sw*M@$HD){F9Hfkn4s~NvL(R{UQ(6}xy(NWGe_APlDN5N_GHjbXTJdVQL zBCoPxbRBW;L1$HuxX9nNz?7_LB-Xll7i*}FGE)Dwlf+&U)kVT4d#c>FSNYHVdL%aY zy51du{4Z~VQS}vxV=kS)HEX#hi=#BuJDa|+7`w>oINYbm@;GX?WqlkCb6psPxy7o= zQmZeaI?=bll4aHywHFq=E8A5?+F^AnkF$eBp|9*zguDWJQLK6#4o<;15sQP!xzNw% zm1B1V+dcJSvlx<&EU0sB{%v_S>F_8UCLJGzuh@W57?0VAQ8W}AG74jDV@Ba57fD=p z&#dv>zfG;hIEw0g)i|tk8#W4aOBA7q#IwCIy*p~IN7?U^A@O%>=sAcFjr7cpgIuY& z5$E-Jp9Nb)P4aVtkB0h7k?c()>UoN1CRcyo@L}COQ-8ng+Llrr&SZS(HIx^r-Pk!R*CnxvNIFv#A+(t&(i543u%^8>Ptu5pz?{Lh5 zj-_F0VaMUV%>s|4WZOcIqhe{nN8_Gb_;L7;d4O?Ltb2%Y_}4wiIQ&ZwGY*C6H3d%H#{AGe=u z>n4`hFy&*d?fQtjPh`K>YoPMyTC(oiYmVzJQP-rb)llWneqLjhk1=!aAUmNk|2Baz zY24!-OYREGN+(#(YVKL?gUrw#NZJ8v^qc(;E`B(`^h^PsLhnG5`S4y+{TU*!+t)h| z5$`i%4e+Z}^g?OGNz+midUxzC>!TIdIYJ8zt+7TbPnPeL_OGkp$8%1QduAUWQHm;Q zvc@g`MqA1){7e3i!rZbx@#~^>(74C8MRQ6tXk6O9Y>aERF&k&T1d5*;_eh>>Y8^}M z`CfXbQD@k_{=t1ZFG$dl*W>s)_UVOBg(RnyI<@S#_)a4drfS32cCs_w;q_8oqV1?# z0+BpjN_y{nTO?5uy=LiJ;7PJI#?Mlad#H=|9HL5AMdNb*(H3+Ia|?eY4YlT6y|XAp z@z1+<3ZgX1>IvSG$Gw^)$Spk7x8r0?{2V1&OMZ{S+_D~9GF|8MTi|M)HEI>2WRFUy z$M!46cZo_tRsQLTd3AbPlLdGkdZr&AGiVM(4aGwB$0CB80W&gU5S>f?i;}q>J&>NM z0-^_&W~%YN`5rnBd5rkrkiUvscVPj?Q;T!sA=7`4yt;T3kD<0^d03`(5%IIglWeVV zgjD8vc2TM5QRBM&XL;Q%DvExL#=O*@(G;xf*Jvu{`Zpf`s7in>8SK&DJiV2kYE&$k zwLZe5QBj@8ileb^$C9HcS;m^9FwbMrQF!ZEbrjAzmK}w284--a++t}I1DDm2Zg}lZ z-h+c>= zD&8%fS@*i5xbc3)aq&_qsnD1uje@W7wrZE?^%q`^`Pl|x(s)X>O!llx6rGc0(p)u8 zYk{(7m#@p3m&QFFMv->Cm*EzJzn;KN`QE<9%lY@lo_ppe`<`#3mh;S(tUY{KTW4jE zv}fG|Pkhz5oc)JYZhK=$TC8!;s?@9evz=wTKjD6__sL^7fmX(N)U(Z`1S3 zUxD*ZD{!elM&X7lF;ymPmy~{5m-eSu6ZhB`dEb10vTb>??d|QBDW8IC)1_Az$1GPi z4QrMy8|QJ$HwOFC;^yOBx18B{PqD1oG#s= z9j!DcjcfgPh%vRgc{AmeJ>pJIGBdIG-7V#Am$$64R+4&DB6hMdbg(L~73R$roZKOM zc=3y69A6rLGiBAiTi$GB!!qZ9r1MVTv}=yK@#M`-bsf|0Kb$IIWssRn+r~q-Uv|@%Ln(+8WS_=$FG7^H2`obj4y)G4laZ-mq_zFL< z6-ENvsom18c8LChA8Y2X_w~xP!{f!Wdj~Cf*7efBBjX(vGpxq-fkxsZWTGdp`S?Ja zcjj!BOF9a`Go;eJjaQQRK8{s>*|E+*$I1Z5Wv6|GADdiz@9!S_LhSv_R+ZR?*dI&L zV`&OJ>?yAm_Srx57un!7O8*msNT48}X6b5)=FV)_8_&-qV1WgX_FcD=lQd=)maxD*9gB`pKm6smY@69^3ijuQn02 zyfz7GhqK(Xw{q!}J$d|jv@!x3*R(t<*HZTHc^Kv%Bn#7)1t~upQ5zWMQrM%6T|W z$-iV`pXX=taMk&nEG#7-GtKYhH;^qcX9zQlhhO=@X^tu%-|_rg7XDN6b9uPyGleWHM5g2& zZ_Dpx;h*OJ^04r7b;%EA;hE?7MrIb~xJU2Km-)vmE-$l&x8*0tQIffjpXWETW>x1u zvv92CN3(Fv^QU=u{G?y`)hs;A{A(7bQ}VN_nNk*l-0~^kDX**Xf#tWcA?!)%5O?2B zWZQGpb7-#&b~`jxY*7Cm;|Gv)HsphcmYugGN529Tv5VWKwbdeq>O1SeuTn?t z=d;v}Afsfzyn0;uy=4sz@kY;VrkEcZ#z#hr*JeTkH(bdhU)nsWr|84RSv>HmwZVd2 zbkg@t+J&mvHS>JNvSZ=KKuCRMK?#bLUAJyX%iWDNx1>&VuBvNVd z*rJ@iitmCU${q`1%c|sCueJ5gwqHwm7>#i$FZmSAW?{Z<@0x{oZEu@}>)75m59f4;x_@sxin7!8 z&Uw_V?X5GhakGNGcOH&G(JIHTpJSAq(j*^^KTfR7M z4|xi`iof$>oaKD7B;l0#mi6&0=PBQaM;eziYTX0w!TwuZA?4xQvN9jmNfN73mP|iu z)Zc47MJ|gby=}^=kU^o^ru5}#)gs@eQ%~FTVo`~v+q3co1~+8rjYeHjPfmx7Gq?5N z()|N0?{HFgZ1Sr6cB-krofDS$xgjeF*CdVoOtWhyHEOrujG$8S7(iT zPM-x8+sduU9&aaDwQl&I@mZX17VG1d7p;up3OD2FtH{|k_Wb7H;d(k?Xg1!JmP$o`#b6+Haf`bMlO)%p14tAmw_k#B$V5^G8}6DcRU?cW87FCL{)eb%=hn!|G% zU2|Mo`s4h9qplmgHc9EXBXPTbQF+R)SpQa|cF9#iUDL+m`7NHjO3jzzh2R zuAigFpfe-vDxO_&RW?SCl$)42GZ$8k!m4o%`?_wQm!hTS)Ve>4 zwl&)@3ipx&Q`<3$j$&IzVO-jtQJ9yuX%yxb+ojk;kuv$Eq9rX5Wi>SVJ}qK5EQsEV ziyiEZQA-E=C_nC*hG>}V2_omezE-Z;`Ap=~(Hg zNZ(HCz_>G4$Nb<`4W>+6m*Rlk}cRp!kNp1Kno=d zd6F&+5z?3!5sOO2q#BoUpXGgzQQ-D|H0E`^8BIm4SEF$+^=>o;OT8RTL66?%>8tcp zWAPqY{1GONbM1($5S1m+D2y8GsB+Y7Im6DiLYke%Q=-yQvmarDG;fW?_9&_yHQRAE zM|0KKVr$}UjHuDL_XIzBokWL5k4ZQisZaQ`vDOs;*|=IN1hQ$`t{@nJnX@`}t@>4 zZgfw~uFN&QmL5yJy-%ifX`I(uWP>|&Kr2cj(Sw$uu^?UT{d33r$3W;TgScL ze~T^}5fzR#Wby~pvAwdYTPoI7f82j=Qe#;=E2a~jzsl;pu=_Dm{lh+pS|{F!;l=?N z`XSfn?BqVEn0sS9>+gf6)%J*!zIE*5m`BV>p|r~@o#`z{om7Mr5hcRsh~b;uU>bRQJO zQ%kGLTh3r_?3^APeoKpZin`VaE5FQs^`80e@sOIiJf&_>;cu>Nel4&BQzJcDs4gzn zI)ZP49?w5P?wNahmQYkFE2nXtyU}sq7WO6gM`3PR9Z5Mk2Xxpwa<)JcKQu1+(5uCQ z+(Vt_%U*~(wpg$pc%n(;gG21&-MspGmJg2)S^L>Lyh&`AkHK6!h&aBV{<>Ey40kRX&i%tH@L|Iaehb^mF%JJ2fMw9 zdSOJf)JU(KH#SfDdN?M}-R8}GsTsemm}IIIqwLX^){Me`OsmdC!+xzBMd|5UIf|mC z){es5;)&vS7>ejM?mMrI=9Q?>c>8&7HU%XEub$I7?;VNRcRFk5!6T_S?YuafqV4nK zY|OWxH)m7e`~Ky5^a#9UtCxGcY|Lxt+4(rBLEAd-&c?oe9-fa?C(rCQr|0G4a3f!J zf7Fqf%N{ZQo{;>R#_jX?>>0P5*XPr8mn-7pVlJ(|#(At2CwIc2F{iRq&^4A=DQ_kP zN0zP7Y&7oawH&1>!mW|BCAZ7dUd&nel%vU8$F*3>JBkwPQtgs8x4PGJ7LHSTL1*Dc zPUwE-6_KS^bRIt4cv2-)-rA{pjX0S2Y}#u&YliB}=Uu($y{PlBp3DAbEAx?QW8Hlg zd3n-Q1%WibF}PGxyljSYb)bgM(QQ=J-&0L~3U77ppiwa9eOLP^A_rBveD}h7X+E}} zOT1n$IQ`th8VFh+-)l&5bg_T(*B6LUdRKt6F)S?l#H|HtwDX|$b775M_HN{9{pIo} zHjiG7iR7@~V!1XNz23cT#r!o#@*?G}@9T??Y|cPcPvth)kBna*U;KOXx<9gaOQ_EM zu^EEjk|5?MrlNQDN7iYUu7ItwxP;gW>2F5Sjd6(kTkx4X9e($@joW-r)X2S%dd98v zIXrT9bf<2ki5FLqD_((Ug|McXtr70hYYsKX%Mk&R?PCx2!YnX9(_cEq zLL$RJer@o`BVGmje=>>q4NeUF3@wUsYENI7)r*3c<24mQL{aH`D`Fj@9)CLk4#&cu z_MC`<4aJY`SESv{iAP?V6^oBXh$P8j^<+@yHfP>*CB`byP0?S2jI%V#D@I9^+eF@} z#O_8x{l#RI8pmjOSXc5?(QwItAy?6Gf!UWonoFuUo1q1QT~176^gwXQWAmy*q@``y zSEw4wb0;kjlu^*=km52If~(Qt$He1;snKE2qtI4O5LAs0Jtn3TOpOjZCgKoGjSgFJ zM^ENL5H&jF7>_HM8u!SucIcm8d~bT$*CFIn(`!1?_**eVsSgdRUU=`bBA=QMr8?*5 zi*F5Tdp=*5RP$|>aKX8Eh;2N2#2TxXuixwa*E^pPleHGKx+0}-MgGg8W^A1x$I$uN zVb)J0dC{?_>0Yq`jGa@U_ohxNjimRhRIL+-DqR-jP*&Bb3XK0RQ2W^qrP z2mI^;$iFwue0z|#PY+%uG6NZ((vM!*`OKhyZflyM z(_4_Ec=Ne&gglX|%I#|L(h`eyHpR{0xizop(9!+b{Ya!(qnxvftav zmou@L^MJW^Ca(YPom_bbz$kyPhN)i1zaHd-cq8JRXO69ZZz71_A69|NiSJFmzw4Ij zr3dbn=r2tJy~^M_)4yMgpAit*J$pCdde#1H>lL?C_EXB*H+t`a2PPZr;Ctuc@MB?q zWj5nyqmFaGUi!YWwR&vu9vDPyk-A@zmGHCM`-lJAUY#pDy^X0}-6-9!vHg!(ak4yL z8_&okMenF!=gEGlq+j>#Uhd2N8 zyBNfCcD<_zWKIrcW#eqhHsq+?f@7cp>&m#|{!6>5e!2UTapk#jXPP7K<@Y+&xL5`F ztRZn~{F>I#x7W9yN9$RtD69A<^BJrxwH*%va~{axy^Fsabk3yM-+QDSMR&Z61JSJ& zQB}+-+Be37tBXI`?_N0o68RC%C)m4xbnpcIx%fHUXgblm8~Yt`>tar8t%1>WyL`Pc ziTEa9?Y=jOD)%+NF)Q73UsLhZHzuXr-eNCab_8=v7qdfP{T>)5vO2tIHO&b2a-TD1 zvJ`dhV?K|5F9sIwJds8AT%(_^k-OsNuxnVH*ZdTe{UJc*l6uLqZO|<^~Zla z$kh)vzuxuu&iH|?fg_Qy(5hJKqAc}ymM8uue3c%)^MIV?J~TgoujS@l1XtO?;0+@4 zncp3}Hg9b`G|9a-Ix~FtaV&B1UyUbc9-jO3996%b)X?I%P|v)V#u}E+5x55vQCnEU zCLhH)AN#S0tZfaO%oKb6w}%MoakqV&!oD%c8QsEbtL*{o7dlL zCY-TFto0f`P6ycA_h;@rONV!U9hMDLOhQ~qe-mEf?a#}`tsxNuKEAKBiFP5%cTUd` zh)ut-d3(e|*0SHy>U73Wh2!|K1^4TBEBCb9N3@*Xt5Vo^U2WaAv_=|<7&$+oA2Vyx z(<@6bHGXO`h3D%(V=WNfV%Cz?pYfPO$Ae&f*%kKsyE$xj%0HPVM8S2N$IG~^9`^6G zjp!P0<$9hSP3yP2%bC0~i{5_+NN}|u_MLb{jf)`Dzk;3$Q9RfE58?ngAD#OU-KxZP zTghIY75&s^6MI{-;G^oB$~mGvov763`%xV9+=_RBr*eYRi8$DGRbass1s@;oopXtF z&^U4~K8J5i`(7SqLlw+Z`+sqIw-@g;w-slD@wG&9ar|_6QRiNgW{(Nj39-kBq&Bu< ztfp5M8w*o*8GkhTsfEI}N6q8PB-VKY_e=F`6nxHii1dI=HY5XXtlg;XoLlxds<-8m z7DISP$~ak9&Ut!H8pZAUme!X>{f$-P{%lbYw-q^Ih^lpwBx$dKcj3gPyp0+)qi<}7 z&t2V@b~=)`Ds{GaUQ_yi4{tgmo(c6_u;0`rPX-lHoTU=Vvv2ew z_KZ#RZsorZIFLiHxPE;&&yKiDAPM}|=75&Q@rAXxGP=uM=O~VojULyv)jIB5j!=prTWcdG4f~8 z_P*e}GR*z`8>JQhW6-e75&c#Kv&X;b?D75~5=2@acl8<ecY;E9%OC$e;r;q^U=)W$W>1bxx`W=3HVvGrRH1vLjE;kFe{Fx)CzRGml>* z&(I`=a~#;fpAQ|qRLkJ;V6-iuu8tkW>vxs^b6Xs*cXJG6=$K<%q${4zG4ZpqGV$}; z%K!Y)BvW0)zqQJXFYQH9E(LugPuxWdDt+_Y!=K$5$qrdo3$1YwxcJv_O!})%bhkCMTU_52ypc&-vYy z-uVhcd=}zIqcASN+`~obG`G1PbnUSugW}r!L?7wzU6E68#GRm>itUlj?ByY<=y?rM ze4As%M&b1sshNIY|BdFOuYL~Hy!CLZ^-e86Q3sbb3%bxhGjEXvADeX@kQtxLkGRy| zd20^2$L`(n<->V%|MbFO5>n(gGirvhXf_Sa`uc!N)-el7iF^s`I zw}NA^dZfbJ1Kfb;O@_QEVx8x63P$rC{H1s87_N`!@@THg%Z%Y?PL#MVuUY%Tb!=IY zsjx-*suH1In=OfiENZ8*lzWD?`0+>clyAd#LC2|>?)lC~!MZIcVmKF13Pr^Vorj*< z?w*H&b92BgrSG!NArX%MceZ;y5C0wizhC_8nefY=pN|8g;6KjBfj956Eh8zw_Dyq+ z@mc%1e~9#eIv4uIveN#FeI#`GRgKeKQNbA<%?Y2zh&hQ4fEv508YSi_Iv zi(qT4aV+DUF<9sOv)YHAhl1im$Kj9PA!>~^PS<)omq&B8;QNoTNL{?tIOS>VE>So% zPH{!=5`{(M6q|f{mOixDW$gxer;Vzi78vrV8p~^Je>j}LUrqYsfvN9}oPy`t&hP3v zlStTp%R+CF)x87yD>BLqs5HSe{43W|0H>i=W~;GZUjZ&02P<0)r)#va8COdhx!MqrlQxjww+#O9ery}{K4N) z`5M>pZVCB1$>Ru9-TyjzC2H7Nnq1>RUg=0g9Uk?__=Nr){akYE`;c_S^Vb!wk}Pd) zUt_#Qt<^R3xOgDv{X?w-Zx+%EvQ(VMt?}|*w-t<_1u5aZ>txVm|FdSnq2Qcbqt*@0Jd!t*FC zgLk#XUD|bW2S25(SsfoDm)<`;UN-2R@ycdFr=Gmo^jIIAx^R=18@*nK-UCtcq>;TI zwV~|iRU_JK(F9wgaQx9U{ncR(+yRMrtw9(x3e&d-Zwn-0YD{qM?Y`RkUc&M>R5X=@CPTVDYiUWO1FGgn1b) z=iw?nKKfHrteKE z#>{Hk`TQ@3{lLdVX8FIvq@>4kmfQGbY3Q}s0&TwM|J3%W#cufxE4P<%9*9nsHwTKz zKF#~}M)~6@mgzn#rdylrSHbkuyvyT5J`JA+_g>o1_#yTi{(s}=M}H{%-L+{)>6#UV z=X2ZZdtYHX6|npN`QSgJ;Lm*-`+e^=ye@}PcypnDWjuOpyn0~V(tCE_A0(mfR2rvK zUmtKGZ*ef^ukB?>SFM-ogBH7!5chM}yT$fuZ$^joS6$XyxfAy1+?8mdX6^}O-8>Fl zX26lS>vykFW8tW+!TNn_f2&wHN>`_}Oe=`)5@mldOH6h3mHAL#iSu!(&fJ`XiWV7^ zMv!H~gT_d5Z2gX8v2T9t+pu#n`Iy{nNyZtvxL9XRqO1jxmbjahpZc}uWw2h?A$}yM zzC-eZS0{(`_M$%hii?4KZ@u&}QL?%PzeFoh9mmBn$B4yQBu=kqvP84=ua&(tfeaC0>rhg3XS#f|^0ea8SR6782!R6(8H5?;mtBY999u zwr7XvOhaz#c^HrW>p(rU|9rs-UB90w9OA{HAB-EUGWQ)Bsi}`UZxJr`Kft9zf?L4M z>gJHOb5BULPn~JhiuifC?%+pe0m_Uxk$+|UqCXM&#%~XEqx2I_fpFA!_+<{uLCwPg^0XEq~p{lJnTiNx7)-Mc?KWZvVezd7}Z&NG)a^sHN) zEukO0S38j(_ucDUM&j3U-i02I&&{bQDRlVk>R%kRIT|i?{L^=@voVmn^ehkd7W)*1 zkv-FWgLq}XJW9X=$HfZ2KS)^IT|s}Vn5ckrsCe1rwI^W9r;-`M%e_sCrRHZ6R3v_S z!03IOreIzccQ- z-bU<`w0`T*<8=YKpu-+7HblqAR4rbcmX%Y>DBg22@rh|Lkxm9oZm&GCbtR`t7f{|0 zihPxle2hoeZja;FwP2|Ca8bM>p73+qc&Xv6^R;n4iq`x+<~D}-b^Xd@o}J6rrnA%m zKMmtk^gd7Ud_Spp+3#gwak!TZ=dciQvCb6hby}RB887fY_X~Tm39rIioAFZQ82mMG z-er4c>+ryK{&BZ2z@T?lrWuSVy{6wniD=`X($j%R;X9MhHx{YU7bfy9-b>>(Rii&0 zWdC7s=1AGVIDYURQ4}=K!;s~}9_VFS-W!jz00l?9-taP)dWQQcPHf(0C2Tez=p#&M>Zb13V{B_=}B^bP8rhrte$3FZZV~YW*(ljq#Mo>kkL+67jw^f1t=W zPCvgh$dAppasGB=nc1RIQQXIJla&W@4L??+=={htns4NCI8*eT zCv{=dePone?7Zi?7!b*_rZ!_}C`S#$oxTeHss&pB!wd2v;Z z<;!=z0zZre$&kA_j%npMrS6Fi#;7$O_g3J^;Nr4MoHdJ=@|Pyl z|Mwqts&O;Hn&UP6G#4-Ov;8@eSDf^Kg&nNaf zYK3dKk*Cx(Tu!^l-u`spOeF2R8D6sgQ9Pjv>GdJDByPG4z7wqz20sIi&Z4(eVTmHu zYu__DBLiPz#~!GHu3HU%H$6w+v4ZURuDa?{1zpvJ!V>p>u^`XKM(#e z&x~tPQaHz_pIH^JO}a9?zDDRkKIXsMs+GPYBj?;Bf0F8WiTKpkoSg@`TXqKAevEn} zY%bULY~R6)1=IonODpZ(rGg*@1ff zDA^1EyLc|{P68YRhu-~J*q+9`bvz?zK~r>E#c9UfQRKwka&k$ zDyj}I?Z2@*QLWZN*M=uAvZHI~+F55NFYA1K^g7kBudJ4j6MzRcM`iU&bcBt6 zZhnz$8Cj%n4y*bkRf=lr@@#OuANAV5tAnk~TvJYTJc~j^g?6w%$g(Qib}U++=mcQ7nYALC4eQMn${?~kw?~XY~AvYFpzw>O`UriSE=7DNZ>KR7M$S)=z zz9Wi;9JA+)d+SF2=~L5gBBG4DZNgFCtn#SR<2)oXn$PsQq(k)&=4{InQD1!LS@iK70tw$M4IGq;#Y}GgzkfXXI)*WG04HQo_mP*Sh{a?jx5L8 z?Vb8OW9r}HS95wFhx-}SANa{HoX*Dhwb_*y?{4Ufz)o(HlNa`T)R>ap{^{^%vfR2! zQ@^1R&w(u(s34$vAcN~`IN`u~XFZ-_m+OFU2$ym`XOfj`B@?c=`J4D}h5&7v|p}f6gy&^TQ;-!GTm+nO~?zc)W zb7N$3u-+FUoj2|9sN9F=aOw^s z89e;P){81%kH)>Sdzz=q!RmKY;<=}~Ua$V+ZgU2X++0JQwe*U%XwO`-CFFzHOL8hQ zJ@w7v9I_U9V?#FcTbsvE|2RMQn-Sdq;$5@&*rtX*jdP;c$}zo|oN&bNl7*$VrSY1B zW$}zcteqBIcAIhCF(g@2=-F}X6dU%8-XFYHP2G#%l*rIc;q`MMe|0HdmVNzmrL6YV zUz@}cwbv112KR2q87uKMzVB_0)Nn_y7BM280txiYBh?|^xhL}GxAcsvfT!;|Nq=Th zHa&;pZP5G_Ed$5ALN|UE5ob*O{l--{qS4P{N_A_LL{8zWb~YFKT0|71!_(QQ8YRxB z3x1D`2!^Vn%bH#ux*Mu?tFJe(u*d>v+`{Hoh?i;I4|?nxB?rQ;ahr-|tQ;d5!mP1p z1y#NL!+~4WFH#ZkG_+>^hXcg9L`1KLu&91L3;vslWRtz7nu7iDBbz6gjA+ae>-*fO z=59~#n|{(oX*~6eZ$hO-7>J;$ZI0%q;B(TX%CS1qjMY(L>sdp=kiLueM6ASF(Jwg* zkH$|83w6KIDl3_pO0Tz-Pd0d&;954kmiS0M-kcCVU9yT*SK7d zuuyMp6|>hcPm?q^jXfGAug6(yOl%?@h1-!mZUvW&IlAt5TG(shap~2ZMXfaRhXa577FVo>%6=?&_%WVeGZCH$uE^tKTc&ao!yD16v98aY z=EP!nBs>~D79$J)sr{xRk+fZLy&F8cJ#oTTkStpaBCjamN7Oq z5LdP^mwJp?S=_?6>@0)l!BW4lEQjKjxP0Vo^IxYYK5n{=jy#M#@+)hQ)BQr)@u}sw zcryyE`)`wky3a;je&950bbXs({V*dr3%6E~NYsn;u-PsKOa@liAu zZm8w$aJd)CX%yarKeHo?>~x|P@6l|*kcP+g84+YzyiY9~_Ekci+#FAy51nIr@Ggf; zw;{Y2c?%?JS}zylPv^Lap{B30jFL93XLbjn`Hb-k+z`98)A_oSPqaHI`hNAzjhk^=ORcmnI2B`Njb1oKq`E&c$;=YVG3+8kB-@$7Z;iHwOqmn1?tW_3Bjrdp`rLbF>Aywsqv~qnNy=T>##rE$Rb;n5v@#pUDJ-9L@OP3Fh%*7NS%NSsMO-iFIqvz-OIp}HU7Lj@<3N{ZZcyJCb^JHlPR`|9KQF18R_jyO z;cpdOzFWI%qYsN1*{5*MtNG6vvLd;ZrOUuEp9e7!zL9Db?_hav*h;VAcDtI9BbR)g z!T~s;w+uaUU`I9-#jjVfZaQ50m@3fZGMS7Z0XTY5>9S<9NoI_zub^jzA%I~U)V z-pX^3+)2QX`YAs*ZiqDSsdMT_NeH?_1nV8;s3NDU9G>2_2s<~5mfLvb8cr-iXRtZP z-iI07MVd=?OE1^(jt4pHHLV&Y3)>W&Q?E1aX}ymeV@vUtrPtnn4b$@}-ro!IvlExfNl_29Ifbz|H|Egm+NVDn&pEy5)$45>e=)s_dCO{!qlM$znQ^zG zFM<#?_iPW{Ul%)t#v2$&R)6MX3$EXqr|!FSuU0ow_Fve4vK4rN$7c8O0=$<&PcW)P zVordzspH*4R9ZNz&f(^;%>)-$Z+sn5e%KVyEAzJlQK9`)I>a%sLi zOG4u1f?Bdj8{zQctTtH#DCC5SKTYkzZPLq)P2Llo-WBltNS?*0DdwCdQ{rk|`SM+` z5gTEL<1FA3_87k9Xlt2Gv?X`Q@0&J^u=9m8qcnljk{mrKkn^=5%VTLhqBTIfeviS} zoBepGThSYBA6qmSXO~XF)}oD>>)J~5IpgfpXwI?Lr{Nwg#}V(JmmR=ua|GL(9N%S` z5Y5?9)~US9_dBOiavm-5)flZMzZ+`p!4_X9c)FIZ;~x=S-6nb4l$~EIwrD$#o@`Nb z4$XPrEWY#3@1CABf)X8fMYhi=$=PyCmCIYRx?|$|HEefXOV()~BNOYmN6SQRtURBi zM>^5B1-7Gr)9|lJ>D|nb&SQzw=QldiKb^Aip5Rp4;&W83>nBP^dhS!XbRKKDZb{KwlyNJ)zNvU|)b7eKO+ysr34#6DbyK8JC{BglIM#2leEIBS1GNko^Md?(s#7dZ; zzEA$^%3@jP#VF0F@r}`qf>GL$weEtS>tE_M%hSKYwHyuf&JfWn;ZWZ<4J=gW?< zf^U=t3Zpb6YuyDuUjsFxObrx{3=NEruz>h}X`Uz^p>@JFM&lftv>{g8rm(X$&9UWa znZwV~u;>W$xy7vSn}#{XV>HZhjnXi%WocLqJ72@VmZ@RD&(N^`4g_`m$Cce|ZPBjm z^>M03XpmzXqd(xvlR2PgNL-72oaaX5)uC#7e|15&G{eCWvJ9RvlAPrQ3f%lP$3}&^3=g(Q7-h(_~rCQ!!@V zENQ3Vu1R3eOmgJ4hoW;y_u8u7RxO-q`%mZLxz{jy#pu35ucNrkh#=mzHJ;KH@%CV` zu}j!3vp-LK97B7icCJyFqlaslvNSGh*H`9{LHT2gQ|NjSZ7~YmHO)C^%n3hh>hdJ7 z#*(*o^iShOD*Bn{tl|{I-CfT)o!Kip$CtUrFM~b0%O-vA?(%v&(-1@W0o%0IC3=lebbV0`T?d}#PJ(wYua?q9kbJvW1Sp!v{iax=LEdT7`ZMEafIa%^BYkWB}Ut@@ud|qUaeI4qx zL3EgrFGST)*EH&!UT8{NmJ}4OW>`Q$zq`E6+Bufh%i$|8wnt_~q1RzX>zAP3MHwix z<>^Y`rYMirhaMe8GPl1x@i4!n4bjhKhau2y^AtXXv@>Df+J53qHUtny#6xf zxnqt(TlShU|GYK3yZVr66MgkE`1&~f%=ESGj&*AMzSK8NY0ls0)*KqK&n@`0&yCB6 z*Sr(-++B2;O_nVy7~(A%u%n}V!CbSBx8;G)Q={Ng10SGyjK&GuD2;P`(urt|n?ldm zILDW%agHHFwh=BrFbyHSJt@lJ!Qcb?@+h zaR@BG4qmm)B3Dbd*ELUfkKv5>X=~WB^9<{fJi>P@XRCN>v>atcKg$`8keO@q{rAi> z&?h3YqwAWaOmVf?jPvC3a^^BxZs*(|C&zQl(Q@slM{)6_!;3ue7R2{Q=4*RKpqh*aU zHH_KhXxRJaZ=iRKE`Vv2CgkZsft;@eS^H+!u?!91UL;*3Z%n^F4$XPp9PV7!u-7#0 zoM)tK6lU45IiIuF@{`211oqutOQ&$eS~{k9&s56`ZxTnw-*eb^doBG%<`HY@n4;J6 zN2?OKJahzmX%;co#x7y6t>ta8eWqVLjly$!fi((qq-_mTuC&E^fhqlN`R-fv@ZjPH ztEjnWxzR{Fbqcl?X+KYX^wim8NA{=EkstS;MoIkccpu(zwsBpbvUU`waUtJ2z26#~ zzY||urxCmIHN0bVQCLQ6-Uy%XI-aXlvAcz-9C(C0%IB|RdtWWa_H4%b{B1f$`}{3R zWC7y#yr)R#6%EjF;Kb6}x?)Oul=QFq~8a<`!oKiidWQ2?_do7*sWPM!|ik-{#p9K%v zbspKQX&5JmHP#U__xKP?dY=xzpG9mL_k!`d9BpBJA8hF{+Rr1Y+Z2tH+ie=oDaF38 zzd7vru&!@x=XXx&Cd;s-=KYrX?K#~wZP=ppE@;UXZ6ma2-26w(hHCf`KCIw*AM|%h z?Rhk_reU1S)mTT!-y6%iT^%Iwp=BBi_S}eh3+wwJo5yHBM+Wm4J?GJ!ZHmU}(KZdy zT1B4*GW2gOTl>r+rZ~-+LvGQIb5tPTw;b1$CoPm`o*u2ym#w>NIP>*$iB%`MWBHq- z`ODcwYxmQGPQNgT=FUN!ZcpK^>Bl*;mwR&S0ae(j+9^}DWKugykZ+pkMoOZL_L z9NAaw{^3mic6`gzfk>%3WyVVikiTsEN5w)5cc3lbPPS8*r5ml>I*%6JB?-Dq@^W73 zIF~Gx{LnMAvX2jW#K z8Na4k_GOKmxmJc-3y(~YIk>jdogReM`$A18~VaIAF^*3q`b@n&0=DVEV|Il`*=40CM?uX{bVJ&HTZPPK@T5VC1ZNF~Adw!W1VdJ*AkZbkc+FmFxmU8Itt5s{Kew@S> z8pm3#f_Jo)66S2HG{-Z3O-EQA%`)1`JhSsEx<5ZZoLgn}z&VDc#p-0P@o`#n*7`Wk z_%(ldSaav%J&UH}_UKe5Ax{PN-CN%!g;}e*#1y@jvEAc<*0Nq?lU~>v6nASM*v{Es48#u9X=ph8zny=xK-xnv1-& zI&Qb7)OL|)%8p^ZDT=@C@RI4R)C@SQ3_E_63gX%EG${HohtZ!+=b5tXJCEpZYrm5z zuqx_KoT7ge&EQ5Q&ZOSTuOISv+<(6wesU!{nHwRyzDx%SDK@jWL;i}0U>XI>OFnk>zy z9(gMtxd*Ft`T{=k4eA&_FQ~WQn@23M^nBIeddT(D@z(JG(vgvAQw^+**^!ZFDO(^1G{xT!i*&_UUtm zl=!l%s)WR^s0_=hDL)S!x6F+HhQ;wIYu(?Y9%SzCIpS#Vh(~d4qpVoA(bm0f^F`KT zP&ecIlUT`ll#%s}e)aZiwriQK=2&uUIfuru?Sq_b@bf${M~gk$PkHeyrtLFq^d$a= z?{8f(?8?4g& z<+@(^WmpNacA;#(TJoL%F+v|DCi3?;8BLt{@r@cwI4wrE>}e09&03KdD|y^=u17}o zirovGgL@c!!?Ok|y&_>ik9&7PxtVmHDJzT2BQk7_-DbR7^|!=%oG!SM`3v6j1s2Xs z;e|AuoAP_p3boX;{Mw3J(?r{u&OY`$o$MJyf9>G3_g}6vBWPz?bT|Qma|5j;sz_0b+mPV+u&xe=ZYw9h@-`teb#0W z+(=-&SQfJOy={d1tGW|_lQMo0xoo#)ua^EaX2-GG7{%Xq4;Oz9t8AWjvo>Kfc142K zO6>vA__aum$Fh0q5yRf^GE+mHtKWsi?NndePuuU$cX1MDJPNGb2*uRU7`Ek|phqvp zt=H_@GKbD{^gTVljB{jE+RPyR`@yV`*4Xl*ab5R%Pa?vLnUQJQH~$_zUfB20>}?*; zyFFN~nYLf^!nXBdoh3`Pbu@-;aYGnU-?NIqu93D{vt!#FQ*z_iw7$0l(@DQ$&CC5Z!v6QzvC{NUXW#p0 zb;Yi=KY$g_1JF7{^Jw?$@Xi2U`aq=gyf32GpCZ(JtZgQTO1JUsL1$x&VLbWO$NU&Y zR3dZeT%VhcG{XyZ{q0FA31|@S=^&PPo^ymJ&Y|?^jq_Z2_MXRN*c|iUJ6R2UmVbQb zz|c|NcFn%6qu7`s&#Gf2hAqEc{Ne7c+GdZoGiE%S83M;ve-dnkHv6=dGGp3G5jeK` z2fcK%^&Dn> zl)yTsTHnHjsq}UGMR>tfr^{5hLHAK}Z_ zLVA%S0*k3P#@ZNkjQffDBAmX{hVPD*Kl6?WW$T#pj1ha)(%MMHs%@POSC8xX{*m!T z=EtasZ(ZN&#uuPdyEXN-opF`ySXM1j{B7rZ4)4{u>O*@q%lBGC8()5PYDf`PsbX2J z=$U!Smbuk{VzK;GTW9zuUM#|kEw{r>PrL<-dDfH6TQB7d1zsWb_TepvHLq+Nk1>~D zA1U5W!&ZEzM!*5@q2awKoV9s#eVra!voWqA^{k0HUtTm=9w&wdYNOw8o&?*7ED4;L z9GT`Bz8NyL%g&F?*vObE-9pQ+lacwuxlka2m*b#m=ht7)Z zb9nv{{hO!qr=km zma$@~p5G@1^Y5Zr)Z&;kdw*9Q)|j?mQ|nqC&a4iWSi-0M>ZLWLBO}vhH=NOvmNsVk z?wki`zKHSZ=>B>43g;ylxwUM!=6@~M?8fXkwcr@V+jf}c;LDSjvgU>_pW4dL-LL|O zd$)HoGsVgzP7-2Y^~Evf9zma;zxK-kK749QZJ9&;dWDv*L=(KTQeIp+-sr`y2QU6T zR2s29dd%uQDUhc(-0DY;!cI4;-51%xVZ`GaPE01)hTN|=ZRZ~u-M6x0d(l=Nu^x0b zHN{5Zy3N`sg{KRwcTex#Gh%BU2{Qtm*0_|Iek&`d@07T{6}w-L*wcf^2)msZ+jF)v ziS{O_st232vOxTJw}>T%dA=BuGw(sCiBBuVCI<1^o!+R7%*Ipn^Xa+XhaQw&W7bOJ zH7-20riFMmxuN(R1i{UR5>U#V@{x8|{a zhHr=Zzr?$a)t|=JMu&AeW6N0a^i{J%+dXQtLycM6D3qA7VogHeSRAK1Jm;$5i#BVv zYOkU>X6;p^%$O=t1cr*f%09reR1v;9vR1h>K+I~D{S>FB;nqr7V#`y{5-L+sH4Cgo zkv1o5k+ah%VP6A(*PEsIs$@U9;onajk43~k3VI8g!5j07Rp<#*=eo9=fzokxg+(Mv%WXR@V6OWjz@&WT@1hK3%tvg`?KAc zAIICKHvo_?@dAja_l?``&yF>Q-1-#iuA#E))H*Ipx6>Z_EBDP+tEde; z-`vJ{TbupqzGUWki+$Q}y~jLVsangvE!Nlk*mG-#sr`60L=B7K&FX&<{a~=K`FOiI zp6Zx6mfo)ep@zJYn%J-Y(Dpw6S`A>e+ne3XUb7T1M)CG2Z6CLfuEc)cwzGDpJ8^MH zHQlUbL}pGYM|?UmTqVt0gcWBJ;{M7c(hgI0Ewjp4C1B@UJaG+IOPuri-N*Y8>3w6iF+Mm(-}1#*D`Z`lsn&RN`_2NDUn#-!R>)RYrN9t=`m=p<|S=ogtB7UMqBr{ z&7IT;dY0SMLaEi7oqCOCC2RJ+4!(qHV~-e-?c8Rk8*_POyrM`Zb4j^Xct zuTr6V`X1Ul;&og*DJzzRv~_>m_KymI1aQN@YKo~JUQ-BoRQt2NcN%+4h@j;%@zZ=2z}%MVgddDhOU&ED*lGvin*NAS0m zo(cNPvGPU5uWv&1XRE}FWvhhXYb)Fb#aU&~+i`iDec7sH#;{e1;B70<7jE^v-03+E zuR3?G{hA`y<}EFDJC^u!Yquq2b~QJT?L7}#N8(lC^Vpo20Kc7jn)mFnT8NlP)cz-5k#^&irU~kE8nzCL@tohp#ZY@r& z)gS$g8rZX|`+ar}BMdr}kB4uJMmj%o-1W4SkiJ55KR- zBW7!hHJi7s)aH4$)neks&kqSJA`=)Ic2@s#JgJte%-Zf(Z;2r*ij2pZF?%(PnEY*M z>=9$d(@TkhxF`2GoC3iLqUUO!jm{XOUmutsf6$}_HxnPNxQHDmPH+&D&O`_~xH+qeF9%#j)k{N8S{SMwf* z9TT>`jXivxujhouQBZ2%Uol6bVw>|me+MZiN;Bz>fnQ~dF^Y(xz+q|KfA00-c;6Xw zlFt%V{A{M}oyQLoUv?BvNcugUmJf6210>k6Z>n@waE{Vw@rd%;`HeAV&v)RXh80s=DH^A`D%)nq4MpSmn6}UG zFuW9vv*!3>nCI6SG-HihKjL|8+toNm_*xA&#_y)c=7aHL&GVUQFB$%GMA7YX3D`zx z8#pmqPU~B~t2wD%wofqUV*cx9KV8oqqqQR*#I%WVV*82V@cl%QB4WHEabmi*?Hj{C zOU;9m^p};dcXXHqoufIt&*QKCdFQj_QM@|)D(fQn zM%S66d!IUpw>j*H7g+GFLc2O{o<%Jyc+G0JM3{Zd+SYI%QO2&yy$?{ zHN3TRDQjzAWsm;f^%bc7)%Chk{kTpUD9e7pGH8pOdm@_D>*j{maC zBjVsOX21K$$H&2=c4+PW{ft!HNa%Y9dP41AQW3`2h06@i!kBA> z{l}QzHvcS90d)#~_w)U{Rlhs!%F@_kEpqz3mX@okWv^NDtT75x5ub;A;r9w)X+dkf zX2+I0pZN$UjvQ~VV=-;&xmusIf9l#F!H#c1pfGLsDmB|ZF;=JcYd+aaY|UBnZJMJo zZQJvRNB9oQbH0$O8*A6UzDX_jsfnB))BkcDj^`PIe@;v-_iaOVUd+dVYs0?BnHxcE zlvyzpH3w=v?-`=C>mNf?%YEu1=f{kpIS$AF48cE!rk4A*Av=cVCCXc&yxn%eH&mi*Y7j|JP7 zy^l6Kw%S;;Vym_9UA0=f{&6+6+^05je$1Gf<8Zvt5Ns!$r4ehtX3zGg0mh@i_4p|o z*S7w-(t3bA)Z;0goL_{0VjQ?O?0cMJ{KbEa@8zVd-#1I~<{#Rt*{WrXORTsrR_6WZ zzpG%qx{*=8k~sqFbab?VI+p=RHfr)TCyg5{O-c-}G4koYf?CTnx@h4sXLgnl^g z+umQ`UQ=HgT{*F=g8q8j-fC|)D!JVZJ%1hUqU5ib_JuikRN{_uyXcRe5?e+Ix^w_>4CDQ@-_U5 zLm|(@SX|1OJP)sk`NrUB>oqmDoHIU1zoCYF1b9}gVflOgv-HW^j`|YtbVjrmNm_it z2UW3-K}?iMSP^l?(6AcwP5wd6@0p8h?bp0^&GNi2&*b*aMOb?NY>JTg2N2c;&@&?M~Y}60zFks>GLF4<{sktTHUqzMo%>TW01N z!{YdqweIgxc(u=S#L?cFJj(d;J<5tPPelA!AKCHhW9;}b?j!vC(KTiDp2qR9+UXQWAV{GB? zp>4M`|2y_LJ8$1*u~Uxqe9BnQ+mGExOlTbeTKM}v!fPrZwyvq_91!<^-*uk z@oVQlbBtPf)bEu+b##03jb1XSj!c_6yToptL6uGG2*C08vw3q|9s^QTPU2Onsb#(w zu}(~B&Fz2g4c~|OAytvzioUgY#H+aW(VoV5Pr{1uA46lsXKUX-{yxjMRdG|}UgFHF z%o}j>;6=W7l#1)40k*xtf(`_@mA!!DADt-x3<@t zMvN5{`j_ho*>TIg_nZ|)r{lm;Dx1GQYuxxm1bmcuh!fgR5*6_^vTwuh$UVsUpXDb| z=H*5DPMTl-M_qB0MNQEyD};~rg%hVg#>rQ6ccP>>xy^Lzc5x5mu473vs`J^wE8yw{5Bt@Gv-B_7|Cz5VuD>J6{j z;ZA0B=Q*32*s}f0MuqQLisbK8WxGsoQY4NaY1`f&3?PKtW~8N4$({uPHx%#+qVA_}3UU#vjJ0EYBF@`1{D;;vJt| zD@$z=KEc;q!ivzEp|PxO?fYBV@9iE$zn5xV(Q;i(IkD`5{`%YGNY-ex9*P~M2yX^T zgdeAKY@Rdf)G9Z(b9OJ`*ZwN^UA52|%_rHLx|V&;@ACVL>ndfv{Rr*PBlG=X4*Q?7 zyX<$K@orebnyxrqUMS9yXvp-l+ zzjS?D3!X5KGMy{O^lpk<^UIcX=Dtyjn5*w+#IIHNa$?1_Jqq_`xD3XREImlt>!f*;@Y^$Fez`kaF5xM#^L^XyxBN+qLM@)}~X2 zEPGCo{B7xZ&*J6R8El;U_FX~Kv)$Tme~KvCvD8qaFqF{e$TG*%dlWtQJMc}#X+B$9 ziE49ZPPww@2a3Y4`GI45dw#RjyjAA93UD|+gO+_g=F702{rI16e0(4NbR#I=-PQ+| zFphW_&vwg988%%;^tbhsDDzTmi#J{TtvqeLKl|1=vMgJ}^0z5YrM}ARDVTLW$w^hO zhzV_u+n(;$#G)P1Q;UqeZS2q+ZqDA$gPqm4GV#v)dvUqhW!;}RwZqm?;76x(dvF_n z<*!-R^%3KXh7#?0tgJuvah6xz%X@ryqYtZ`{&_fWJtFTf_pdXF%l>Oz zEE+`XIjuNE#G$oO;Krsk^oV$Mc7Kkm1WIc~)&eDRlxG@FydOq-9m~hCwu99G|M+Pw zN7MrISV!VUgp!sSM`ikD7I9gAi3iuqmXOW`e%Ix-`q(T$AK2{lSP9HrnZ6HkQL zZ0+(R^E^_V_Ln(C#keIFOqDL-a#h>H!f~-JjcZ(FOPDc=@zw|KkKy|exD9?IH=_J% z&T-2}s=hY+J~ig-nxKZskAqWG^BkrQ41-zIPkYAk&nb?#hw8Um@Yd3;tQ6xbWjFH6 zzUcBGYsath%gIU*oYhO01o^tz+h|jaRu7?)OqD#!*|0s1$N6eymGq zoJf$`=UCakYv=ccMzeNm8Ii>^l0Zvk>;1)z8b_u=#^b?OOPnXCp7G*rX)XV=l=xaC zzSrk>3i*pfRBN+0_2sOqDrdu zHDYzNh8t&1Z`=KptpDs zG^W=l^2{-7wvUeS_}*F~`ulCYeWnMOh#X%oF?e2mo*b4&gg>LnqY2O=y(FFW_o@%e z-k1l*cnD{`vFwqzcRb3=ws>2^@0A+tA6uId>Mhau>do;u@i0g5xA&D)4}RW;YU!?8 zgS0zG0AyvwvX8dzZ5zA>sX7IG9I40!Z<0RT&?BvEac`>@|tnOAhg&U9K&EmaY#L5AY)huX#OhOyeoTCJ(K z^}N31=D!X+ZU4w9w5!~F__J$}8cUux*3g*V$T~5CIF|b2-vg{&JPr3NQAPdyRqKDg z_$(C_DmZ-a{$BQ+KMgh3rJNAJe}_2%>l>8d-mLx7wS{ zN^TsRl^Fgu%PT2APkSvYz*-!ezy?Mlqyc$T;J zIW|8so^4itbfm$Ty-$~r`1^8(WxHeE9uspC#d^WYTkrwz_qOn69{BpkJ#F!bS25Ql zt*0^Glg1#ze+&)3WBm?wsizlN`~LA4D-w6V@`o?u)b0@_wuL={KYdO*AciLSz@w0 zvPALsNUXVD$y$)7HLT5=C$?O-dmUDo(OceZGWFfxR&O#7_B=C;)qS+spPh^s%d4z? ze;esoLSwg*>9Awv_v$ugRJHbN_G`OlQ)kJtW*v=X+ecX+fptA^{iC6cv)gJAbxbdu)Fc>z|l}}%bX`4OT%b4-3 zWe6NwJxi|qm-xerb?`JFKJPi+J8vO)bNo{2e0bv&4+pBO0SJ zHIes@qXxXr%ZS$4JGGW;-rahIspQABVLlEF8-A13r3d-q8qP1_HPdhM4_?Bu+iSjF zzJ}JLt=Bp8?7xoftqH8(dfqwG_Ik6cvEo?HTKBaLd=#s~nCDzxd><<18_C1p-7@h! zLd|~dD&{4g42v$|dD|F!_E>%X=4jQ_X3dW6a_L`^XRNoJUA7Xz+g4a_@H_SU*RXzp zER2^tu|wL!j#^^H@hlxf?=#td$t>l|P|M)nz26LU+-}WQE#qR@>l(Y3-aci=o#oZ6 z#h&9$F*$xS9!uZY_Hgls#3Ao5`x#8p)#3(6V$01A35T5%49WXM__$r(IdDVb_>-#B+k^BA zJzw4CQ4caNQ-A3#@+Dpz`=s{0ZRA&(>v@2`hS#`oHXU`p*V<*RZQfg6+t6wn>@%9< zoJwhz>DZw+>}bE02R|EZW8$AJ_Te(K$u=gOXtoX8KhjOTV7kt89uIEgul!eV>(MvH z7Y!xa^Wyx>->Y6EZ+PZ;w0EZ5YJBy1z%g${wTV zh@-ulN3~mtMvPZhw-sr8n;r+5M>Xdcl$Qa7YXiNR%_7LaX{cdEBZKkc);$A;l!8k@}*s!=Z#mRf$wf@o* zt^Jx^&yde@{;@eMM~=>03EQ#;D_2dC;__3%j_~J%#`I{M((`*Utc=mxcFmJ}iO|N7 zZ%;$w+7jOny^{#7IiKT$xM!H7^ljfU*9c3GF_|{UyP0>SnWnbwyGp9c#43Wd-m`uZ zN4{FPYT0Mp1iz4Q&B$JXj&{AeBe zpBBjHkJ`I7Hf?3gh?OH7S7rpkp2EB2q}yM+3#gd!W(mG|{4DE8@8T=0naVcoXkBe* z6&pjg^5VyqtxTA@Mf~x5ccq*`YunIWKW}@~8DedYQ%%|!*v`;+B>GJ@G8n1t)IRka~{fb-ui~ zK95%$rT(M*a(^OY{xXx;46ww7n-8`iM`ed4F8o}vgdQDhBl1YSzY#rsm3;CbvjNW+ zDv%ZJOq}&B;hZ~Gmi=fEBTEKtVq?xBCMMPpqhn#tAua~ysN9HvFR{JKO;J{LY3(10 z0CBd?S;qQzjvVdR2|GgB35~00&$p4vtoDA=^UHWI3jP zklH^V6`X5et%S~@)kf$H^W~|f!McqGDst$j?@l8G9v(9TzQjsSMY5wjumr+n&01L+wh{K9qB3o78Hi(XYrSVXCyspUCM>2+zltaGPVQUlHJh%@c+meR*=u{3 zf8&hm{qT330g9RneT01v9DAvt(>U_ICSftX=5N}0X|#M!Z!zYPD+PubyE34tw(`o! zh9yQ+G*MK(mnsg^LtiCNJjns{bqZ8rxb7RHS0Ds;OibQ5BSkF%>|L zY1y6~BXdliN8~8pUV;5|zgaU{`{Ux@2rVA*t2U?2Yz)Ad?Q1y}-^&u3`IoxoXN>F6rs7!OmdC=UY~%fs@v`5p!|riY<@hKK#>>|cML z%)69tgl{QQjAsFh<5zjhY_9?q&!^Bj!=tWb^&WdPKiqRy-m-C)8b;fFE7%yu>eo=C)vLG>+Epa3a(@gbG+9-Iw%)gT6USKn zCfsNR8*+q(4U3~<|H|P_VlD6Vphor@#t|PuZi|11XSokCJ~TIiQJz%Cx3w?tXu6EX zpKsOinbD(;$ciEGy*S-sRp&A7#$>>14Co$hKa2FUODR8~HdeyrVX{YwP0e zbPKi-5gs@(ah&5HYf;_i@_K$2`&#O5CU_ zXRHHWG#?{z;yI_)SJ{iY6F+>G-#u@kswbAMv)8RI<%~++n<-wLuWexxm5J85Fnwbk zJwn;FJkYw@oa*)SkQM#!gx9^XL~G|8Qy#56W6UJdFUHt0UJ=N@5=S% zTJxE_^;#5Jn^p0vNxZ$4Mxe&Ahe%g)iSNpx;>Qb(J&Z3Hy;4t1SpVmcYRYRH&s7oge7Q=T$K^yTtj^%8%=hv>w zW_NGao>c`-DNNsXT2o(}eE{Xkt}hgYUsnLf>IuJmnYYTER{)3OHE7w}W8S6B^!~8l zXqWBQJgH@d8?)osYmCCM9lp}1@1=Qu85Yd*mTUIwGZNOAv*Th7lOH9gsQxi?9)lS{ zr>H#tELHC7p;%RW8Ez~+i9gwQg7F;$?6l%LX|M9jE9P!Pt9f#}NW7IZ!~R>b{Wb9_ zZ`a<+j^U-OgWbE%-O;CM>ouFNMQ@&e&vWF)>3Ljste!)*cpX31EPtwLkLf?JGDE}s z6e}ivuT7WsDlw+};-2w4jo%t-&py4*ov*)j4sp6Xldafi3736~f5LDXKRP0hRT}RX zTra@Rgnq??6(tiwTx4FttoeG|`Z~`k%giIxx6B|)jj&Q=wfQPuV4eW%*ow16kG8VE zO|b{_F5~GLQ>;yH6q!e<#f`Rev?Z=A4J~2vbW|JrwlO=61An9$C{B!@`k#%{2$$Mr`z7am zob`g^hsfN|vgY)M<4s>IX3>G^xq zVl)3<{3vqHb9~g|V(T7Twv5BSYuxEA%EX*hnO3FqI&2+53_rT&oga}Nq60S;{DU~> zLff@y(ocP@3?rq4mGa6-peDt?mWUUy6)W1G0uCoYmB|Rxi(7C zwZ=N1-gMN1YB{g<9+s^ZzMSpiIz=&fP#vKZr67$RLXbt}1d(J*@wdRq1*3e_t z?EEpt<7bc((Z5oxx6iCaN<@wqml)o$6|JAkM`~{T zva+h$+usk-;_cv#_(uDiSQ@9Y?F(q0R#--2?g!ovcRJ^LImNDd`&$0yDr@iPMfvQ0 zUaWkzp9%gSo^OG!>uh?zx^Gp*#QinPwNeaIA5g-Ryzq}ZG4B$0p&j2jP8!uZ)}_Sz zo8ctRi(~+>;6(ea=i$JyI%}SyZBJKYIGJM(t!IB(;vDN8^Gx`@Fptew z{u$H}J@+wt#97|o&Nxfp8E4QQHA+2hYpK+~4>Ps4*k@8?ee@z~-ElmbV-l^}^I0!G zo~W&umcdELXIXQ>8Jd^uoR+-+p5`K)D6<{EXT9v{1z$yb*tF1=syd$s#cFGce@1Q9 zdeKf>H8xyr;jG=I%w4Z8eh#sVrJ*rS)SSvr4y7JWk^9!|`pAJl`=m=gN8UE35ni-q zY-0WAY+7osoHjb~R)y#HR3Cp;ko#5{+njt-uI%WeDEt#oz_Bw)ewi?DmAMuS9F8}k zWp9sp9%f5^qpa=q7JuhiwLCMMSg}1fA@RM}usA+ot^51IH>te!Yt)`QmCtfUFtuCr zi?#|Z*|BU^qHt`7*Bw3czgkXP?bB|?jcGN*@U|IVWYJNb-lUw)4gK=dKHQq!TkbRO zWzSK@UKX+1sofCf?er{<#TG8-(%7~hwb0!*X5L!V8{rR9m%qB6a4OqnM#MEF=#yQa zZsEYMT-RA}biIztRqc@o)MlMyJW}z>jJR_FYmIe={I{rYj{Hu`-q=p zUFAU-<4n+tobuI|bNwr^?U>9e?ISEAD!7mG<5zbdW61Hik8xu7>NfAHRg5ij)t+(u zJmg#dGoQyf^yZ)C2!UN+-Z|#i-Z#gLT6$*t(O~r+bIwP}^H-FbB5ys?BgeRLzE&c0 zbyH%o6oUI-KMD1ww%cpgGj?1H8VbvHah3cWQbhf}9Xm1x?PC=eq4x0Ns(%lYXb>fMN4mzFFISMxK4T_$(Fslhp0@y$MuI%(Y$2eU7k=;Kbg$vcAVeMJWGP z?0V%~<)n7`HsSfG|N7-O$8r89UUDavdmPp*{Z>WGc6;-~DLb}DP7(ROImYB@e2n6q zb-r~M#leU*mfq0D4jt&d7(Umf?ppn+-YwGmkJ)fOv~^X&P4TpKl( zN7b-+KE;`dm$vd}Z&kGJ?yYxu5mpgddq1yS zC7#)FKa*%5??Wm4{n6aQb1^9YZz5H|wE4BXFOKyFd~*T&koeNjyX=8}mcRL~J|wL# zW7eW;+e&c{^T?>WHjaF~t}$RL_hy)_;QLXYU!Taw3^^M!BEy*GQBD+CL#=tp+Dsy< z{@B7j(nq%NiLE)-8RZ&mrLZ{9J(HSatLCv%qN|`We!MQ3D!dzF`@2MU%*}D1(szwM zRSh+ys9drQyJsKV-h;@zvYl0IcG=2{@5@`6MEm&pv>uyv>UN9HJ#`kXRLj(8{;o4O z57m&O^3XQyo_%wB4
    c2==@Xe%$iM{i{k?c?XudTbu5+buc|)mgMsEmNc1++(fx ztop=}Z{LLFZBx%x`&+ql;kN>X))*%<%XVuDYq@_gX2;Ri7{y;zcd|~h|I4A!dGwt+ z>}4rib|q#w{Mt&9yzj5sNLjngT>|X^PyPNkQf_586ko^1_iyz?=U0heH^Zy{`mJTHq$4(Nm2v%h!p%l&e52Gmwm3)4c-7!RetB!x+C8FFzI_ur zrZp1^$9AZoQHyxn&f4wMc8ML+b_s=JyMKx0aE8d=da30;?U(aoS}@1)x8V=T{8w@s z67zr;R}U;(K4QaGnSRQgKhK_GM$Ea#s5VdbjKp&o-0V0;WqIfr$KONGEV_Z6T- zct?8o$kQKEt6UFY-N?F?`M?!E$D-x>Xl-1|_piDBS7NpDhbfCT-Z177=L=)x2v3OX9k(}e zS!9X?kHI_YxDoU!>%o{6uC2MZnWdGHrrr~FCAx(}wCC4G!hcq`-gCob*0}2|B4R>q z#Mu#I>ZSc;{YzO*!fg}puc(Zx`7zFUV^=v|@l{K0B;(`OcoZyxuqwuBuMIIWW{rCt z6S2mq?06N^t0&_89;+#s;rAVRm1Ep!vsZ%r+sA&G?Wp)gmb}%7_&vtsN9z)iqp&ds zUKjLzw`>lfH@}|e89VY^|6{j~lP7u}``U5cx3o=;5%2k9^yoNH;zww+MD(s(yl=^C zd!FB$E?$HW&eF`hLs7=dlc4Q%}#4o zJTKoRRu6JQ;V!}hFH%3Mq zV|d%l?^X-uZg*0zGFt4-?$%S5ho<)Z?fvL(BW)2j;;wk;>Hri5EcTj5j>hq9%z?Ex z{#NeKV|c#ez4D=N%6s1UOD*x^6O(PnwinYJr& zd~K-j>3CI&+FD<>GER*AVQn*Pf;Ufe?GbMrVZGnmO5^tXwA+*=(~?s(f7{+mr2H-` z1HiIX2%H1)xAHPlfA(JE%d-3$uD1s8O>mqO@$+bFu{WD0UL4z{_PuR{itKTyZ9F4y z-fGQO?N%+n4mGItSh;zgZJcZW$SAZpf#J#DqCV;e{idg1B$z4;R^-&D^JBvF#+bf% z_uyXg`?tZzkoW!UUA9~Ergp2dUnS4wy=GZ%jKZ*;p22t=PB;3!Dybt>w%wx*#|$~y z9CjQd`P=e`_#f{}o3Lmdp=Q^$eBUP1dG>&r>y~(O*TI*p%<_#nRq;IJUHp;rJ=%Qy zrYL9h6rWzicuSpYoX6Dg*XL|V?6c_B;Fap zT8-M2Rlklg%6B9D<_gO=HEp$=JFKzgM2t05X3X&3MTL6t{UpEhAgRTgTGpyrZY0v4 zf)_suC9H@DWN0kKTl@anevq{%d~@hgeqFid#IofhYHTyNf=A|Ka)deCkH-8W{Ar9C z<5wlB-J0~wxMOP@Myw5ak)s?z{|KV5yxk9Lr=E7n+pYOv+sdb8$Fg3D;%~d}t`Q&K z3)z34yj7G}w*1JcI1$kjNEW?3Juz>W8x?`X@+GwG?LjZYO7**=7g>unPih(Q5-*N@ zQv3ckdUkx(>#NMw&|b|xYqP61k?uHOrmxSlYO8XyEjury{_`_~Vfp8Vamx{kVZ|S{ zJR|yGPZ}r4@r}%%vi9M5!&2i>4WSmHTKiU=C(ojFJZ~H0EWBU6rKk1x*U?PhGfwQN zXgK_ss7MwO`D4e?xc0(V&z`pv?H=0rMfi4cNEtD#!MLUNx9n?O%WYwNE!Xc%TG=XR z#x=(9*66#eRzFGHy&ql=@ru6sl1ys1uN*pM$I)>)w%)Vmycc*!*S>nB=WL-yy;aJv z=@MSe)@w65Uf$R`N-Xb98dx4Po?jd8oy@r3hxs;61^T`6R2$(jR!tKvbJx@=Q^xF= zFlU#DIPoC)@J(_d?7~;W6s>>+=-#JesL-kJe0$16MPsSoKvQtngtq z3vX}o=C^ZrHJ$WRpDeNF?naF3yCo*<{fg~eBK&54B%(Y9yQ#SOhLu9rI$`Q%j&>Gr z8FOysD`Ot*Jf&oEBRlc%l}Fh@@cXvYyq_+ipHoxi2%-;tXl%->PtC=w+|Na^@hG*3 z+-*m|yT@sy%X;o|Sf9ilMZZk7j>Q_=Ey!^ri`{X2$ISCY)!)Nfa;-AQi+>5Px2QdD znUXc9LfncTrw!~=;d~;P;m#;_^<#Xnj(i@y%<+#_&KwVa4J8lpjM1r8UNp`z{Kutc z0nZVtwa*)?I-2jOy+}T~xxgOAt=y~i8mqF^z9Pw(8zV}LG5l@j`Q>-8MSGfaJT;J7 z?N0+aH}CK2lp}g66`y{$=NzNJiD>gfx{g4D7^g!ZT9Ie z#*DkJY#KK-_G)p7$G?G8;<2i8RDU{x9Oc_f*52y3cKh^zoL!6;JC{F$^_^7BzKTPOJu1z3MPv=b zipdv>7F9PX8c*rkzQ5M-B@4f^gos|+VomF9^NI0d*~r@Ww$bgdbL|(6w7uT!W2`vV zvDW=w^)X@C7N_&B#Ut-!eGser`b}QXTiR;ld7R6OPok^qi@)Tnu6S$z#x*K@ z9?l=udpJd|>8{sYzrSbvN?<>yh}gWBmF0KO>~xd^x5n7ZiWl$p#0T%UOx7dR{!;|4 zW&q0;W#0nXGLr2*KHk3#v86Y+@!hzZ1*bYAA~tZQl|wsZ)GX~Xdwn#bW?gg9ZjM#7 z56tn1^@<)85;F^5)byBt(Q$8%M{FegeWF6k`c7@xHhU|Fc>gxUmfqaPcl|x}L3Uw0BQx6jcS4J` zsL^)kL3_$N6OVH!7)i#O8d|Q6=h})o*WZj4zpgT*h-yi(Vj`Aeu_BYU?jM2h$}Mim z;qI^ByHuDbl`S7pe5=*sk2B}`>XaYfYp0kI{yRoBPxj22a~QEUWW|kf@BAYOn8Plf zHnBEqQKN0$k(jY8mJk@W!uc5A6Gry!lbWs8YMNFNKIhDjojF=W)J%AccsXsI8z~bW z&*Ri97}Y**a9znsBG2e(Z65JBuB{R?eR%;jCC)JDNqo*2)R4|LGBv(!^W3r}!IvL5 z*EvK)(=`^XsI-RbUmYYwexDfZ@lFM9KCsViewCOukHkF%u@}1*S$eI?j`S@e3hlEt zMm#1i?d8R&2KU>{SgQtM28V8)jSIZsp05 zev317F_x{L)@z9!HT#&;dVNIN=lwZe@%}K!B-%rMkIbf{**3Eohqjfival=@8G{V7 zRc6zkxANpjzs0$x7|YgA!=)I&W&wThAf3ok(k;(GScleGJTbx zZ4}IH7NHA}XN)<7uMA;pxKS^6T(U(u+iFrdIG) z8s1}4oc`@wvW=;>U!?+t$JQ*vyEC(l%GBR9=J(>`5p&x#?lC)$2`^gB{8-6Q>9d>{ z!i&5btDY|Y>6}TY?bo7W>nforyO}Uonrah^iAqToD*lQ$kp2K&#rZrEX&r>c(&!NwOTJy z>;2j_=g78dj^%CB$H7nS-j84OE!*wQj$?Kl3yx8|ZTB?1myF#H+*tRsUD+;d2L#Wr zh0G!JrjvP|9L3DzakbO?xW|k(XN-~6o@8Ie+dr*co_|ge`qW8{CsQ#sJg#1<$f_f)@Kzo$+E> z$lCX}(fja5lV84mmg*d@J{_?UW@b$kBkz8+ zvo7mQz`X9EDgXC&>UgYJRI@)PF5&Bd`ZX|bKTIvxJg?=RvDz}{$FyUP<84F#)lh4( zH=8A199yOKSvIA=eorYUy%bv}xGvLqh);L7W&ek}z@fYt%j`Q~SvO3}U=3;8Ix2RY$V|!AG#`l^W zFTz7|JdQWy2>#0dTTaFM>RwwsqWU(o^4E*RnC}ej}+jHE}-VyKO+Q(S&J;;zE{711^n$y;O756GD(b!kPZ6};ZzLhyCPKEn@ zsbe0YFI6t_WLR_wk7MK8VK?qZtc$bfU&Y4H@{{*rhYk0*-hSENUbpp%Le|s0_%(gB zTQ5A`3#NMZk10c2svPf_VXZ=&H}cC@kK@*BYFk^4YEglz4!5vx?JG5o+yz$ct`M{V-$KS^a;y7Tb^Cv+pL4HOar7JVrFXWBIqWALrK*{>=4b ztU_yD>$k5qwtJ3o*2ppLLu_syqeo}x65src<;QijIIk}8BYeC>^!9js@##fou3vtw zR$%4AbB0iFAD+(kl6hplhsx<{@{KX1p&42U69j3e2B85fqB z^3}aGP7w;9$7iX09?@Uv_!b^c%zG-^+T@)b!U?E(lp`M0W@W~^7u-0oG~z5@Si$KgpdS%-nE#P3-#OFL_}HrvrWGuyuqpdGYohv|OiEZA{k*V&c#I7)3;+Idsdb ze2Gxe0oEmu=-m*MT#xuSV9)(vv zV`j73E7QHNN1+$iGTWo{T~xeAazA|2*1snVk2+UIiJ433`H7fplJx1Tm((s>_q;V6 z*uh`(`1-r&SnJp1r&*-!T7=&!jDL-JvQ;_8^H(YE6WvJc^gM&~ zFx*noR%>c(Ig=>4vFw*d)qlo^9^iXa$gIeF|8U)3ueIKz4aXyJDc^y>Ysvm64(cOP zqG5#CtC-t(@D#l^;xS5RtYy-Hs4F$r2e^*n($R> z5xgUCd%?~pb;Kjy(rN}e&NlNsiua{?E-n0Nj*aa{d#-G3wTfYD%!ck!N#Qlp7Hp!T z%bX=zL>R=j*s->p(t|%5mp=Sa*|bxk;(M#`QBJ)0DGaUE@>9z-Ddohmo3`z5lV_8R!0c`tW%1l`MrA4T^uVtM2qg~fU9 z5jD4|+Lx@zUx!^rZ>wRehhw6!p)srPrO}S}Pj7F09==}vD6^Iyj+rxW^@vw9?YYLS zWgS*>@H5sr3(F*tIx=0~Se=kp#!CJ+ zd2zk5fs^z25(MrP?E4B$tF^VGe7P}Wj#b-LZDYlssTfkkTtu;~_VAo-lr2Xnh82~x zC*IKqr|k5uEh-4yQM;7rfp0FqO8j_nT)$XDszt1pzP8SnW9fBVwgz6pG%!Z$T?13T zF&dcSGBse81HS5xb6~f_tqFcqSjVmDqva{B+*h}-=c{E43$}KiL*fT|I!5YUA6xkL zlJn~08>8UqIB_)_Rrg$1Dy*7j`_aI3WLW#$Wh|}XvNbTyvVQZBao@&By=!2~HzE$a z%KW38jriY7&nssZbM@FGgjkY<*OAOAGX$ewrAqV_tk{O zw5iTV?k{{_$;`bpN4OLPJ^i{b{vdN8tdnrmw1(QV%GSBtsvdLgZI2BjBHa3@MES{i zw9{56{1{nkf@%L(iB;J7LDa%~7gzG{8~F*VwwcF^@UwOjV&5ad$!QR1I!{_g7Ei_1;pW}^Fyf<|tGhVzz`dNO8FD2o7G5EXIJ6N~j zyvtK&%CVKSCv78ya%dePz#=+A`ieBhXCgd)b6T$|%f5_~O-bu!M{ui5zwr>1iLv6&}Fj?O9x zo1HPHNO9R9VMq9NLSuXMi<~s%`7>I-rnm3;wU4rLzLjW)9{iAedd_v5vIo&oDIdM~ zxRlWOk!gw*8JiM%Oms?k5%CExqD{T8pUTgCEarS{LW>ytd#U+&E9cr5#W;hFjcJ^h z8DfO3&tQQ_A7js9A9W^*ouGK0fU-W&#+gseTUthj8-3+;z$hX;T@2zBYmWQ{plCI=rclsS+DVt ze^P9_3N23esI8tNBYW0#PkciKZSyK$ZT(0*>mgnz_OWp+;YLR(LpIN6&Ctg0kws9@n>6zS_viAvto7=w1-@;dXQT&ccKvNB;dOP zbGrx+v^D(g49=Qgxjl?i{=R<1))M=;MMuOv?ASHQ5iWgdETXpAGg|COeJha~H*Rhg zBlTBeq~6NlypfvO!$>dM(~sy#y@y+Tq~6YsAF20q=~H7dk$U@Ru_HCUlJJMDrm$9d zALQ1&BGe*nKkJh@&lnA)afz@o^W40Sot_Wx`BquZGtR~l=jOF%uQEGEq{r<5u0Q8U zXYlKiZ=CH*+z3C%de05@O$pUn!l~K+3@e?reni)o$N}X@##fPJ4*Xhi4l%MK9CL_^ z3Aw#vBEqZGV4o!>;9Vi!%MRvGC0Z>y^j7s<$`=Om5rT!TWW4U>o;GhMc5b}5ns}6+ z;*F!v@{fK=xBN00_UT!*!xrqCUe1tBw)QPH!)#?0oprYHVrQa#EaI}$HeL}iY8#W* z5sP;gXTAUD-Ra-jx85R_j(C3&E8pX9mqBwDJw+_=>LX%-N!y6^SHl_$*uQIyWqs=_ zVi~L6B9`)MU1OCj`iWS|tA~iCOj<{*b~V;tGjFy04zoh*TW=8yIWqn<=2}Inqt!lQ z^;u)-==2e>B5JI6`4TAB=*pKqc$Ie@uU1R-v%dK#JA$=QimJuu(7D-+y#r?L-_uq~ z6GBa2XUKeI4`TCMi5*>c=jiNQRl|y_p>yI^%LD~JYu4GLgS9Ted9aj zbyiNFBK2+W8s7-NtKqUW@G6xZ&+cfwcUvpR82c&~*QPJB>iRyMPGZ?LA@ptAoM(i6 zb3Cq%UuD(cN@g5-vS58a&pT_dpGi$KXQ;G~^)$93KgKb-azDb7UEzO(Q`}782+N4b zcZ6485t6s=zISYNP54<(5uzSM#qcI^{&$R!+S9%xq;l*%LV{&)5i;l0Z-fNP9wQ`p zu_NRci7hyUUxof$sHHaQT=L#wE_s(6H@#nbjtcy!MvnFQ4F0J- zJ4-H6w&%$u;M;F5$=RJnt9|DZjA*a9g!i@A1XLgkBn67806oh z9&8UwMAV1teiIWt*Q^*DnfQ^m-e2!t{w-h4^sFGYz4Jtg#FstGOGtdLH7x&G+_+_C zBs46}EX`W?_o#PS{lQnm{Z@efmPoH{);z1_3@B&DvQz3i!&Z1#0M(|iwXD^ez1DWG zxju!j+G~$%+*)g?x;h}wP`KZRJj)8nK3f) z;#exR?{A|USNq(^df)VJV9SW{CJ_>x*m1&e_|ZX;Fc-X%y>`Dkw`{j&$F@EOJ6f-6 z?Aog=XBVTl95GsXth#BeLq^Qwa`$J9H?2H99qsk!L&}PkL$!5(+u*eQjYMNl`&gU( z*(fn%*(o7-+v;6zL-<({^A>W~B(DafB^% z&SRX|b&lcis~bf^MTPm6Z=6UsclkEqd8GeY+jrNsE3|gRPg;5PF}7R<)j74CTbWh) zkw*I-5ov(Lh%|Y-e4Vf&4SM09@zLs=w3|+(dB*r@>$OPK-jB)=V9kToXUACHHvJ~^ zE zzsD%D>Wrgf+QR$Vjsd5~PjUte^Oy(6I!tYvTCRCO%Q||>kCQQ`I2;?={T#naTA5qB zecCUvV_Gkvuxy7p7WN>J-!W_cEWhyDljn|v*>fJNR~s)e=GuJ;nWqKYJM;HtRFt~U z*{`h=L$+-blCLfOycT;He>v9t*50MGi#M7uqtI&I@AT;sMqjF1X39|CGNQK%RXZ4S z?y7t0760~Q!s6P;-xhwS{4fSj@=Bvuez5lN&ZM69#TwO%09mr z-@jn*seLed?47Qqo_p_pZNcKLE-j(6qRpIL8Sw`Snx5 z*C!Yra4!9WZA4~?%Od{gB{XKIcGi!=w&YhUrZ#V+WzV_pxaFF+ zwp%mC_o!_&wdX7?OmTR+z&V<)vKHX^W?eo~dHem^vt-H1Ysso5nzwDgh)S_zj7slG zzLx%{>pZU3dT(~EbL3dHj^%Gtyj_B?JwA)C`kj}0lNQUCr*lld%(0GDQ&OMvlWl!r zS8v-`@K*@7@UWRltey;&Sr=~UJuCXGd9U{w+*-sq%bLQ}PW<^*4YP$mnjbK|&oE+r zEy}aJbNanxa=+7jZ&Jady;=rsIcG9f94lGt-nMym+*<)RzxPvcA_48y?A&Lil=!l% zp@hVbu!dzZ&o8cyTV_U6!{T_SwT|EYZV|u(YU*zp<-iE92Vi~3Gp3jAGUsM%NPUm^ zbYxg-bL->8PhxtAVjr`Satk=f&BZVO?JIW3aWiM{gzS;Z}9pq414K^EkD5v^I-jd%zM~ zo_&{48MfDVLthB>YTh#9`K#@>Z+9AnCdG zXDv?W#C{$ykI`G4TVl%5%n~9?J+)D9IUh=6z+VZ-vH1Hp35~5DhE}l8@Kvfl-`xg` zN#1ggYFy*jaz0QWKhN(a>5D+qG0?C1Ol$PD*wN-D-TXM6Lvg%sCoS8T9Z6X8)~UT2 zJ^3o@m;aKOi1!VAE2O;TnnGL0oRS|epOrWq8~)4PcK_;b-3b2hRkX_VH*MeFM)yOF za3{BpABH>PxXXYu>EB$>J>X>y)FO49nh*Ax>DKzeJ`=S*k`cA`@+kP9p<3&;Au{)p z);O@@^zv9UB7)z$ar!>dLp5Ue>*2~b!jly##$$nnw@2~X>9s_6&l7y`jcNNw;yT-d z>nx*eU5-D-rNklgp5N!h zOmo_LEt0gY3u+u$R;*z$Z2IJSA{2H+i#6Nzn)?%Denv=G5&7THEUJ0t{&Cyf9Bybl zZ&THJd)QBzNg!%oxz4KcR%@QtGD?=*IQA+rylqw+(Y4scRKN1^J-LwDJkr}XClx-% zImSY)sxI%lzuq}s>b-V_9PsJaY}PB2qpq zb;y_*Bs847_pV5%tshZ&tBTZzJ+~sQapWu0w8B;reLZ=Na7 zcjxTO_Eg?(^LP}!dxG!2T*=CU-Y>+i9PX08|N7nY%cSpuv4%C>74$pxas>T6k>P0X z*ed}?dGY6o$C%lgersJC4`vKMHbR`}evC_ewEVpy#`L|yma70;G{9fm+{U7h_+?Qd zHd+~K>)6yT5=F!vtQI}VED>*Y@awcOT6Cqhj(_gRyRCmMsmA_UsO_ea^+*`d_(NS$1tN1!|3ona2yc%;4i^%x5g;z{8+`=R} zHvUdAqVL*lYW?_?z#4}h;#G_2WSDJZQLD(qjx)CIhtorE7u+griWOOhZ9zY8l-=69 z=vr(mulVS;jhRIsUOl#tM`RS-#w;c>ZsQUkEq||w(RXE5(?EP}R%g*i{Av@e47GJ^ zY8Q!O;|_O^w$wMU+ErV{7?i%DAW9*gE4LVsML{*xG2ciLZv*I-0hNow1SfAJjeAQdhugS69~2 zf0ryV-@CRkZJt-_x^j-0MWCp zL@PsW9h=%kB6i%dFJ-4&8qggtC)M#HtF^5R&KrOC^f0<=+ruqB@@;2lk*U3>M11*i zdg^uiNX5j??R?rq)ZZ_f^j*=-=^?(Vn`hEbd}|lY3^}sGo3lvUNaY)szR8W-hvB_q zzpu~aZ61kdJR47$dA`b2Z~e#!w0MunkA1ZLIPVqBjR(*FBKpB%j~4)MC6eH+bG&r^ zba5wZ>o4;68;LIZGrWe*o53Y}9p_T>)pomtK0RV&D<5Y5T6_|<-HPhGW?Sf|wb`FG z5;K;K6N103e692%k)@}Z29%?mxGN_KhdoO$Bvj(?3Qr%yNLjlzJGMJpGVXPaUE2&X z9=jO5<<^Q;9}#T)lZYc)T8isc4S`bH$Pr4GvKd(e-qC| z?47eg3%$jtEXA>wm3%q+5u?o=U9A0KZ^3V#SdNgjpE6S%Qi@PXJ8Kne4>@o;W=DDw zm;GvdBRr#q8{;kUxi0T(be2ol@%b%b$0~}Yqv`7BT11NQW3b}PQmo}85v*-2{s{9( zo3YlaERUyg;Q2(ksb^{rLSEo#S)G3M2bB`>bOlvr$k$x#&FFS@qz~`U7Rb%zn|o9s`DEeQ_NScfb^ zTan|xud>g_h&p*6ILvFRS*zLjUJX{<`o~5VNbKq-VOi}HF@j7zUP_d}_kO&h0(Qx7 z(s%)|ZVwNnXV+!FT!nB{-wj8Jy3H=ljt< zpUrdYA)n2$YvucMeA+~VvwA`g`AjX@M?O<7Z8gnM@QtkWJ`OW8y@8I~e3-A{Cc5v; zYzgUzm$cYr(ww~3lZcG5#vvv%EVHnckw_gGCT}%niw1acVtK?l5kl81bj?`D_0c^x z)G%sMVQuxBZ}&P=rp4=sEL-Cg3*I~O_F8@G1TEL>+HQT?{JG>86T@@NxX6{Ga&>E3 zVyrDcN|o{?wab;`jZ$bKZ?`|Cj@j{bI!27BEJ{q4MoSd`c={-NAD0)OrIyy46xegc zo}#|;^m)*)-VTP&aDou4IM5(omARBiiM`uv`8#fTdd6d4W<7d|=g6af5q{>)ImT0c zS@V3|mnSP);C*21n&RXJUTNcb{~S`U-n_&&$~%`h#Ch91i7;yOZ1#+k>C(DEEQOFeUVT-8_v$Icu}H#tUs(K{dal4*pc zOGK`%%hQ$?i#3~H(X79W+P5CoxZ4>3Au73*76s5NnN_)`9PmVbYi z8R&!jx|Es}Gg!P_h4-wy_lEYP*HqnhW%x)RSXw&5B}!A!k-m40-`gl_ox@r5wWTMq zD%--yTWh@YPve$*)5Vk@$J$dIh7E0p5i6uG!>I&|>6gP=DPAu3dp)w;Ra?f?|CV#c z4r9Bm=TjCv>hXAAc-lSAsh!TxY}@vhjIkS{2KH&vnyud)zaAH+h&7$G+)dxkJ5nXJ zaf{K&cgbCE!_Hr%PU@)V(8@e__BwxEyYqbb>!kY_MS9fSsCau-T9x=eWc?j$BYrV< z+IG!DtO~s!RzUEEkj}8}?QZ7kRE;6W$~7dOE&or6h`wv$iNCeouPqZpwk;D9&zAp} zR6*|-pPWyrBzxxV_iNLVCEKng8qc=>w^;Rgn4vPFZfd<>yXG9(R?V?^HvO++&j(rE zXIRwQ?$@4)A={b>iD%3ICMOd7zCQ%3*&dL#+^-#tAKQwC!?WT4BM}x~17-N5wcW2B z6GOHo6B5ss|F6`wSiR=>WNN)%d*&S37R|BzZL05OVLu$RaL(%gwd{goSG|T(vvJFn zy)~}v)tNPX{%XuJ1D4vC6v9-!-dym1RU#k75~F9uPnkyQeTp8d_Ax$R>tjTJr6W`0 zX3eYIy7?(l9`7VR{CcSjw{ae1U&r%X*--aZ%>F!K4|Bd}>|qn(Dckw5JZC!tjwj)@ zVBA^xTf7D>U*nPCX>5$-uOX~hYi7TesN)%J%9cG}FU7Z>a@IHYWBjo9N`|#kmr-*v0!-|GwW#|c+9`bD$`Gyd*i$s z^yypwD*G`;zp9)vWvg?F$gwrPsi1EYpsv9#0cJZ`bNMVkM=n*X;EcN7=)9Ky)$<-Y zYMiHG#qt>;3F0BGwu96%lJ3E&lYQUt`ouM68Wk`-oU$#g2&Ir8c~mQ-)up z7PRPxy7BGef0gP)&o}XOv;`f}FW%?2G1JosR>`#U_4@3(F( z*Uce8*>Sb>JPYnzYaW+jS>1iJwRPVpRkoeJ0f6_^u#O7 zJ-=R-Y&b=UwqlI|+m1ENNK4|@j-Kel-5SpfX-cj2Fvsk{$|au`HZK|YD*!Jz;shs7 zLi@(xvSsd?#u`@2Hoctq-olu_ledLM#B67s2g~=?k=Z`izcwRI*^KQFGs_;M#_Y0= zQKPi&uVloAC;5eW^_%>{O+?#&urKs6?z!*5jjAK|abnqg9}A`;aF&}@Tclo&QTmIf z%UtolUWxzcTNF4ifKl_UxR)64bvU&UQ*}6bfp>v1qByIIdl=Xg#e4=YjC#Lwn#Ohu za?k4A%0E`I+gR~cyp0D_*)Kz;^_+<(?P}ZHIodi}&)zXNLh;qbJQKbb%wzMFe+G5L z%vz?N^jTJLQU!h~d!y-0A~#|zA)mowYy4xgo%z=?t2jSj<`LoZaeZa<=Cj-w)4eXd znxXgOzYe~NIWuk%qSp7_fqoavZKyT>Yn1w)Xafud%(1 z7+S+w6U~3D_e9@^^DI*gXobHoyM!JyBdL!t@BH68 zkJC>LpBJB?*czg@esBW)lk6$n4eG&HJ@hNO*jd2*U0*JG{`GnX)@8mro}W5`-dlb< z)(4Jqj&bqV6z1Lk%~dDbA}eBJzY>i7Z!u}h|J{8o{=0BxX$WJ1_Bp!1=?c92jBnX^ zt`+G>m#y~~D=@ZWj!Zj_u^2Xe5%0muA65l1SF$f&`mNQ&wsR;o8@IfDvc{ENZ?EC= zE2d=zJhjg #G4hkE;OshkkWpnA;V-sIo!;s=-VS_a;X^?aB7MqcYfjlYf=QI+pu z!?O7H@rba$y^fM>&)T`Hjr|&WD^0BPic`lr7jK>14!c+SMQA-uiW{0Z$)%@D@hut8 zmln6-^_DTWvu5Yc?M(PtbSsyLQ{G#-aC~_yI^TOyO;1mT^S=8x?fEta@bDH2-O4UX zCtLZnQcn5xGQauKR^62NeW_;5t*vIpZ2T4TC^LpzIp>Yp6V^ub{Ho{oes5)VQpaGP zmED6HQKjzX6A?$Z+l962w{KM9)ZbsNI&UlfUh0ySVlw`G@3HsrB>PABh7PkLI>z8k zI$9dZ))8`zTia;4%#Kw-993jw6rI*DPqI$=({W#FDK^capUEqi`oLe8nAg}6ueRPe z&n(^(=eb0BVSK+MduQM9eU()bd=(A*f#n)FPM5T)4UTa;Q+zt!!) z5XJrsk)^l%z_HC)y*pwrl{4aJ^4wnhG(U@5&Z*kx|9NgbX8%2Gd(QoP`_XUa-`A7A zqQ^e&{YR7ECFd{MrQeLd%=(-jd>(&|^iG@EFI(JRtHZs$S6Mm0*RJ&h8}8&_rvUeb za8vnF{`Z(lfWiDsQHRz4mTL^GKDCWloLq)3QGeo@||Et9MEzOb1Vna(2mfg=Q;@nUGXh!nnSz~gaKg#NXac(zcNp-1V<@c0VDyC0r)%~)S(|9W^^=HFb-`7%Y;dM;D&g~UW_%H6{^i$!L`Ae;k*tXN3afsEQp~mR%9Py}S z9OA7$)$$QzeSD0AuLM0&BaR~UudT$hMU*l0sQ6-d{K$fPzg&eWLjMX&JXI^+f zYles4nfm~`-ptItSPA<=ttA!V3<}nhC@off)y_Bv6Q^M9ZH$7?VSTV{6TR>bVx;By zD{XkSp1HH6?f0&$#4=u2h8?3TLu1DiywUQRn6I>b-?P-^8nhwwuHBqxym}LQjD8bd zjDpW${WcjEoTs=xEzzOI(&12sW-I$$Hl*eVDztO8;v&k85Ox&BWkvhBgSa?4|0~)5yT9t z?{gkD^2)SD>`WsS6*&`PjMo07N3}msqBG^$N@YNg)EMw$6n2g%(=waITApurROUa3 zaYft5_s8DlA715G;`FZLDcks1Hs%l$(Z;CkxTbjL&_kQJW+N17|A@GTleQ6YC&%eu zXJfW4;!epSN_QnHKkjhvkK29ZzGsDrWec6b0#Wsv^^~nEd{2?Nf~M{F9>)^PcwHHG zOx!XwzOJx$&aBX%hTVC*0@`=X%6Ya>m)apJZl&I`6^6G>z7uDF+J5iqN-X2GW!N#1 z%h1@m!U>&gsr0`}{Cbu@T~TkK*U);`+#Kh41=g9w=&_E?SLd^=d_7U$OoQ$fqkkpl zOk3zL;YX=1A@a5MD&GQnk*@>s)>&WVR;FU~ueF?M3#}#mD6J(#w$|`!(Vff}p5=Gu zdypkg@0y#kjo00nLyY#us4@CuzGkI8dg^|@n)7U>HjGlF&VU!8Ft)y)s4pBq89+Eg z)HI%L6s+iR3Rb)r1)szEVA;y{k+CZ#a)-y%3d=Bfkskzh^Q8kyL@$LRHtidyH1;5GK zF4p<4yU&az(-!e1jZ{>GNr-H%{cE@#{7kIB459Zpmh+5PSwfG|Siu*1Y=;K+R?ZJf_`QIAc$B1S5eJon&;q}*93-9kZ>c&Y|F~!w_y%` zH`Jcrr1FHiKS`~Kz4^c8-(RFMeGon3T}t1Xqxscd7Cr0rNM9mr+9Uj8J#IfkzEAGw z#PnL+*43}`_}|=3Ibw_wkzuEp%yE2`8MnS#_eEmfz0@w=YhxDSYDm#F+8l>i@2<0W zl#$1N{iRssUbOvH{`@BP@I9}Sv|VSxk0W(luEy`2Z|UTth_-vq#;;P(D8tAcn^2>D zOM4rkV@2{;E>=o#pBwKtdEe(Ax80v|>kPfW=Q@vaWJK_e#FNYwYODOrXztq!Zsu9y z%JSAFOupaps>Nx)ynBOp(cgx7?~6nutwwPB`Bwh+N&dI%i@#(=7pEUQcWCyY*8CI^ z^Q+9%QODj7)fCo`@KQ1M*^vc#-nfrDNAdeO_-h6^3#0rhqpg;}ipk~0pTkdW=jyZk zuMhIebmX_90y`gJz}2)Dr~VZ0*E5zs_%ZCoTWTuMZw$7Idq>B(H=8poj2#TD8zU;> zOEA8%ZrnPw!Jls-%1)7a)|_H_Tk4BsB&;UdeNNOq_mZFRMIvM~{O`zEc*9NqE6hhO z<-htlSl7?0A7zsI@GQJh1KrBM+f_v$?+-^;ALqr=pw>>RJJhbITyZKCvkAN+kDVxg zy=_6QX{%-CF&l2<&ehR24*r_KN^zRuJW8HHruig3kGuBBJJ8Ont75id_NSLU%-O4@ zU&Kb+Y{yzh*ux-7x%jH(vqZV1;cL-%gns9c&!XZb{;jk;&xsX{>pnHlqn)l%nc)ry zGJG3-mkiFU^Jzc&(fgQNYu%6eczM4WqAX29{uWlWCW>G>VcY@)recGb6h_hRP|j`(q#eOS*PFn6ftc``j2wos{b`)tLtIU8(-n`i)XdgYmuzw&SIS-$M$tBf16?+fC>}08*!@y z-wpdq{)`iP*mLn+g~nN=b-e!UzKu0Y3ER-Q+CUcbymMVDL0dm!_cm)gbL`o(=s8?| zeyk(gDy?T8$klGTFdXJQv);o9pSE6dJs2mx?p*H;mLq2EpVf zoHb8OdlBmoQ~YRu7&BmJjyb%z{4r(_5dp@S>qc<`5GP#;ELw@J;Fg)@~2-Mm$bIR+rx@a zVju1EHfr&nuE>1*_*py(>-o5!sDHw13>Zb*h4l33b+nlI;x--; zKC^`hOJQ5E8H&3TrQr@DzEFwy@IzJzd{tIMh*H)Zc8r?p=sX40@i?kMjCz(c5pSZJ zE9W@}UUwO{KYh;waS^w=uiW*z2m4G_(B5&4^U}Q|9_7FLcrog${Wdce-0EvTFJ|1^ zXQ!Bmc>c%`v0G5rQN)`kc>AN4XUu=RtDvby4XK5v#(JZ_$Kjqm?v&#l6yN=`vgIRQ z%(ds3AIp+s9Df_Wi~YXK$p*Yih&!ctZ`1QDun#h)sUg&C+VYh3JWrNw=f=g`#+V!Y zHS9}Z4Fj)|)y8!lrq?f5P>^?a`=?}@fu^Ut-FWoFEsW9WNp zb_wOkh_qR2_wGe2yRu&BSsow0-$wR}*BkwA54-{|hCRtIyrFg}U-m%+!R=W6Cam7; zNw2c_)%3}ZN4XsF=@@VLNRQZiYChx6UqABT$*vrZ+nMj#`OV9=e!Y)L|6aYnKk+`j zpJNM;-pew|pZD@&c=PKp%R~OPy=>1Lhls8*zcs*74X1{C5 zS?ui(`xXFRYtegEw^Da_elPe@)+yHU;0X~CX&V=oowuR;Tl_Dv?ai=;bSbvQ%j#HD z!ra(1qtB!C7w49^vaG*^$+bD&X2Y5xzKDjC>^!?qk@~iMjc>%6g-4k?CZDXah)`h- z*I$V^y`U<1l3NwHC4v{0@8wUvJETiE{i$%7EzA1LsIj)!HzRp*X}Pa8?eD+s!`-de z--9-~sg0$#mhztI%7lJr9J_sZ2MQ;p@a3yJi7cKk0qw)>Ezay`&t9q6&xk)S*vl#Z z@BJ$1yw!DutL`m^;$#{AxpS=OefXT+BM@Dtqs%&E;5FuLqp$y4>E*aQN(K2a)R31l zlf{Y!c42T+M)!sMtIK2feQST~D54j!Sz!;e=p$DDQ(V4`u2f2(^_K6^5S{>vOSeoyNvt9=n3v@ z=-F$tMuK;{7V6z6-;isIvT2Ze3K8cD*vx>N8)O@5r?&&9y#?`tt9<@WVD%i z!;4`H&p4;WBj$9u=guiFV&A`vA#+S3;{P0W8{OuoXrp6kj@>r0luWpmz?XRNMYi(W zDK}E9;`@0&X&G$|H^u_%EMi6`0HXW5yS!=$`h)*%2N4&-$X2fIG5iEw3XtAR_#*P_ZoLIY)U%J+F zFLOc9SD))Kq=p$g)^!FkBd)Lc;~TF0oS0+j{K{rJ8jsy3Vy&&L;U39saXDZMd;YnTe2sDoqv*Y@bxw$@c%>M7Cb+|O zH|&{sejB0m@#Bo3$KEVDH+tZD9$Ux6*N<4fZ3OOReAyD6!O<<8TXKvYLzWa zT4aiCykc#JS8R1P5@+kmHQ*@kUgB^rpI`1ti%77{C|Yajgl3$!66V>olrwLmotyzz zD|Wx&Qs%fX5{Gm(>wP%Idn-R->17R9HN)Js*v{aHetX!#;PxNXNI%z)Rz5q&r>*zK%&TjW zv>Xq{D14V0GFH7%pW=>L`r^;4?8)FgDO9hxr~W2;3V4AHUvZVR@1~$@Dq2 z?mY@Ae&A0x@=MPo>+J@dmwgid^4#; z$FQ{x85T=X2+;^H`(jRkJ)QC0LVR!Zg>cdqG4?svM<5 zE%%~foX5kQ#&gd{Z7@ZsMZ4BH46k8~M}k#L&5sMq#)x7#z3n$wwLVzOz1lSKWBT;v zkSFwfJj;d{aop>OsE^?d%Xq!odyO?$1#9U3+PIPEjq|45*~&ahO$Ti&*JZB!%4ius zqVic{z|#K`CR6pi3Ox0w#OPV^Q>Kx6pQ6XAeT>i7`WVq)>G}o(_Ol*`xu)kUHeV%o z;9ZNn_5Rdd7OtoRt|iMYQz1>4wqLHv7v$i5bs7t?vw5p-sH`gMDkP zX8#yg`*6YpKk+;%11qkf)$H4L1#k;zp5@oa%UctO6R0S^4QjxfA@;>5%zHdn3fGW& z)51DmjtWa_m0OFw3^=5R;=3G=V0}Qd9H99XPyVG zY0dux%W{v|Go590{>)|x-%jDS(p$XB*by2h4Y?815ROF2NINHJl?&%0qB|30iud&ZtQjGETk#+!MjEL+bbdfOT|r*LNqZ<^!vkvEy~ z<5hV-OV1(H?A$V@%=6?}c^;2p<44IBc!N))GS)D!<*N+%iDw2upO%n%wD}re_PlM& znDJ+C>nwO)uxtT_H|YIPTLJ$rUV!&Jzlbl`>F&?sFMnM8yK@}ej$8ATmRS(`*u$Qu zmOU)|wR0`wfT)RckvEbtp%~rgz}JQJ-EEAN&e-e=**IB&pMG)G(?_}Vr?HRsNYCM5 zyuZ4xUcZWMKW2H{O~)M(c;pv2#@r5$@dlOq~{*R7)+xh9o+4mVY#`-y{T6)eQ<1y}22|bTs zs}Xx2&bD9|XS!C3^HsPF@gl$QMQpq%>;1y#wpGf{vgU|0ID2{aklnnPBhTS`6kp}v z__7jq*4myE*_IRi`rp=n{rYh(U;iIXcjr_YS5tSA0dUs#>#rF6C~=52uc=|RnElmx zM5v>V%(XDnqG&sQmS(8YyOoL?p%X>&*Tt=z0mJEW-5rt;?J7^mhzZBJG% zjS6pVY#G(x_774?;d>sK%{|Hd`p;oj%Z#hr5Nmp9JHOn{n;%QJbBTzeTiLMU=T`hE z@6q)~Mom)hX^98VX{Ep^lXm(WbBR^o7~Nlc&rhG?# z@ETSvTDQ&K>x@}?SVxZ20?yZY&WYyw2AuO~K-osyUs2<1k9dG}dHo8O-BHF{)Yx0` z{9+I8sMcfA8i(^}X1yoTdRk``r#0-GA?rL6cI4)?HO4%TmZr?3wKHYF&l1%IvDK;=;fOiKc0Ts9iEo{id}mp2hORFN z*tZh#ia;AayhyfnM7WXcydEC0Do}b&3r{tF;HlpbB0Vm~tNc9&ZyLW0XQi;WfN!f| zm(bHTb%Y~c%C+YlPnKop@VGYqB-VZ&EREAx`rRXZmyu=hIXtz$?@NrDTTRXD$VW8T zsus-E!s%I@cWvPx;dNVhwDCsX?&UTev3rf#ZMlnmDRTyVCstoo<3*!2BGG_;J|zlPTMM`>t{2UkOOK3?B%wfh3`irUMgZ+-3WO@!L^ zvudwByd;aa&$KdolAK@T&nJ~+m#YC zNB=m>KK%W6(QnT>ZI@wcUc=Vi+6n|O!>l3KwBA##vd%w7N$WiL8o~;My+h4ZOpa62 zM^Cx@RrZL|s6^$tF=~wMUuE5yx8Hd!b9RiSqMXC-#x*JiPZf zSLpb_vNPBoR(-^%q_ws&>OnFeUP{5u zBvu8sW}TcvkN#Hj>p?#yqqYh{_1H=|TDkUD6DhAAl%h=9Dg>)c_#Sa9l^A}Hu_bZq zK`n_5TQLzkI@@1$;3+fj=XA{jC$#l_4rp_{7S_)W&i0XvszSGqYowCTWyPuDY(?;1 z)QI~KSF+lG8_Rh095>eEZWN3a7ZV*zRk4+m&rY$@|0b2xyJSjUG@3)~UAObRV{|c( z&(;X@#FU*rV!m&jkK7zy@9KjMYPmZ`dvoX!$~%W;`p#o&zeuexlyM{Vz?IYi-nVvA zyFF^HWXFH^?W@#SHqOjSI!9rs1=ZWb@O?VpS|+v29Gx7+e{@h)<0~zmBV%p$=R-L& zzW*dNR=iCJ9A)61q<+2UQlc&1kj2cvTPt;xKGieNHAYYK_z^qt(AyjXo0UhY!|=|f zlbm&1#pFzS(GD+e0>1x zb`P?iz@LmaB_<*>jY#mjzQhpSM)c4YM1 zyp!8U-1&`Upt9Q(R zr(8X+hzbOi2kH<#nR_cWcK+>PMrFMfyI&=5?*Usw+nMmRgnN&t=qGSSKEG&s#$0 zY2&-h7G6XdI$OreCFM>TLmz9HHI4MK612{o9Z}X9_-o~h=mQGD3Ek5n)r<&H<<}tsXv9##Nu$5LHh802F(CJC7TUeb{ zuiN_8Rj&TGgf_fS zo9FHJmoG|oEUT6%EZbqf-LpPW*1C&7jT%-1w0L(5=SQI><@W!x_qIK99Y?-rzg}R! z!+n3DfhBw9dS1$uWD6sCG^T9d1r}(KY&J!y`QUE0NDBMmH-0DhpUjizRCU$Kh^(fr z(V&uDVnswo##2T{M&_=Xuvwv8>jGsfJ!j=*Zl|okAlIp=^{c9zsio7Sf}E+dK108uk_qI#p*ZZfNz=p4-`K zNnxI(w!FxZLw3O2Y$<&#&!=ul{%#R>FS()RH(mIbkKsSRIc2Yh{xaQdSw#H9l%j?e<=R@E%(RFVzk^JPTL5%Kb)Fqxu@^hbBpS4&7Q|u z@fzx9X>jkG^v`Dvb=eXA5@Ws|wK#M1qs5Xb_xD+jMoSU+#>!CE33@F-@n_~hr!R{4 z54|v+ks*n-piB5`l6uN|$8|1buIL(1GHk<*ttU77z5fH%dhdc)cMZeZ`ku1thxJ{j zGtcU;;~TZc=abKl?25UYheQYLEACgV&E52lUVHH=R|e=!c4OJ?(jzrj>v!S1-}11V z>Jd`08~bTwL)L=az>_I*@{Adog6REJv5UL8AWd1L=3|ws{c}34)a|D&S0?q&Ky1ue zq)hPdx7-veM@T}!7QeQ1L*^ASy%@zAEPF8@KbyLrJhFcgx!#B3b}_FU38w_GVh}7)VBK9Y?D6TnAD}^ ze#NAs=|0-a`(PbK!~L>__r>zX+c|o>V!e<@f7zCE`X~0;mG$wuv7bm^c{euE^2jL2 z>(1|QLJQaWyz@E{9KwYWD}DW8#V>8GOMj|!>!kbC?vFjQu5Gb**_ZP^gn~8SdfJ-? zTj!BwMYhrtEn`2M6j7aaF-eo#74OaOonWDp-R1nv8oE}Z=}+wcMp~N)YLLC`{Z|y7 z`4L47r8!yG&9BZ{KWACwu#-8ZCRuDa_-7Q%mA8RDEo6dEDGiBc;dDcnX>29mD$| zrMv0)80B)`Y)&VM`=}Z(dvy7|53;w9j*n0F_Rr>2vbUeAQ^?-#*dZ=Ecvn=fcjVUX z@5d;6ySbW`d+gztptK8zB7Z4!#e`N#Q-E!agxriI-`SB~UwqvaI(k!7_H`w1F^1>spv~r^N%VfK$7Z|NIhf{XibvuU65o`SWp;o`eb}H+(q+^^l zTjI`LXZ9Anot&kSg3ns|DL?v;ouGQedl7T0&nJ(}pYl%IGo#4g0nDrIncLAqB^WQK zcR4t9@b?EFS#M&0n>OzjW(+;QPA-qJ$y;S0{^g%tvOokDY1!tcYq~+mF|`)*Tvo<%f7vIkw1X{BW#mHsZ7~`Z2U;NyjlXWXlY% z=#ou&W4jcxDP&dl$r1k|PnNb&bKhlb+nmP8-Zt7|W%B<%`@hcqzq9`_{b=;x{>S2T z{r~@Y_Q~|Kq<;%EK^4~jI{V%`vb{e{hgce{FaC;UxWDPX> z4XtOUG2FC8Z??*xAZm=WEP|-<^Co-zaFQMJWl^;MVX~FH-^zJb6x6u|u5C#kuds&X z-qEb5=0Az@pPqeX9DHe3jOY?;`JMf@WUJ!l3v9fMN3svC>gvVWpA8rK-fM>>4H{+l zxv6?H2ejT6v zr%9dUD~?~a)m+DN|{%nd3BB4H<>PslqzIeTak0H+#Q_UQFwU+@RR1g<#l0QD<__{!*x z)K1{y?{Z+42sGAPq z_sNF6HsAN$IW22Q8N4--l-a^vn_N=ZWiRRo_CV^zsmbjxa{c*~vd_U^zGMwky)St<*~I>6WepFWlPqt`Ni-x!4<6c@Q4w)tCyDHQU1sPq7m=53 z=g2>@(R%{Fl1vjKClzqFKlH$w^=`% zc<(oj;&x3nhWW|2pCAyWJ?pmK?R=D*d2TZMEbt6mcrO<^Y`lz4G9&l$S_O7V++@sO?>fSXTHg!f{PU@PgbpI)Xr713PLEB`-05lM zicD)K)63O10uUzl>^G*H(eSr$Se}<%z4UGi9O;_o{A9`#uuJ3ae}Nq}XDl|^6+BTC z%t+fKK5XG-SrgBW#$-0Ic2sb%&y3<2XC!oXAa0L>>G!W*m*uI;hBK8!XyUB?Cg>egyWVOgd;N7;s5TE7mB}F~3G$)O2 zv&a`?58OK;`Q8H&%5~o$)~@ZsFMj1>&G2oS(U#Sbv_!7(=ceNx5!AH|{U_g=Cs7PH z`Cix`f7hb_%vi9=QjjfpX{Q2le(!U$^5h3!O`eZ93~75{xX4@n6lU$qNn@1%C=g@L zF2!fn<8t23dd98bAF31LPtX3%Ze0*-^q-U!%F79~!}s-!^V#hTqSg}6`zA1U`NIcz zM8~l-VuNEU`?P8g&i<$Ue0ui3!YC!`;B4|gO}ZlGckUOX_&SGLylobI`IERv`GMI= zuRZsj$+g)}^7xT6cx@*-bhyF(N5Lg;OGg~pdVa_HmwiPfCO?Hjd%)3Ol`Q@7+JnD+ zpd0TVNA*c<(Xcp^9-h$};b)W|&lAt;<&PwW~w{zP`;X&C? z5udyclhG3W{LWxMJ^ReylOuX*zrC~HqBN`E`Td0FcZTP;6Q1aZZa1dJg= zJd5sKhB@($m5Dza?Qe{BcHlpK$RocE(`wH_JPLzX-<-jFMoDQ4H|q=lF0-qq#>w+m zUen0UeKh^tY@na-Ffc`cAKY8 z0CDl}!-;k*#~+OI>X#qK8#n{>jI)CFy=~9*MxH-XSMRq`qBw%)5NrFs*<)?5aaV_? zahS;0vdeyFd%}MU(+9T=(f4&8P8P=_fxk}ti;HkXu}2y4*_}Cj;+dxwRYtBer$kQu z8?(w-xhUw|D(6I-`_tf1x7sN9oJ(>~*emGb=>C49*w0$OG0Hgujgy#l=$z#~HY@sU zx(CSXVsTum@2`8uqex!G%8dGgP!oad$8o{w#I{7i6s=#IY!j8g3x^z?6BEViQuEMs zk_rXw=%YFF{j|4;Jw>Hs0v<4?N0rW?9V&6}Q;Rq7rx$j28y_3R6EI*0iD;r={@t_$ z?vjbQHTYP>IEvSy&-q1Nz-%q2!Z#@8Ah;i>GJ)(rD}^(MAs}(I=P@JQd7#bly_0 z)Dt7MP}|>+;f5rMSlKfHpKao1f#dm*V`z|MXw-a&6{7F7k(=nX5iOnU?7gyZtJP^C zZsDF*z|W8`r@lPb=8f6!-)vgbmMY#)^sceIdB#l!zbV5=r~BAS+~}2O&&kQuh5dL= zf!9~Oi~OzaMmhhx2;2+wtf8NR_>Dl{eQ$x*t{-wwXGWZw{Z{ck^{wdtWY!hgbok zeeX!JJ-%xtcZ>APpO@U;7AW#Xq6le!XS0N_*by>&H!({h`n_JI(!Y}$ncjZ;!qAR?x7kqj;WjLbkH^TeV!BIo* zCO1u^VfIvCtZ@N4BW*%05ntQ{z`zHtmJ} zpJt!l7`6P@v$34$d+h+Hvb|O8;=g$AFJZG=eaBhcQ#&Mc@7@~qqh*2x2Q_TLTBW|_@Qq(MuiO))W_rmPRG2d(>JFI5U zEmBE8Ilc(%TdGI4S44;=yd}1L`w3|csl1%mtIg1bNAHD0)rejxin8i8%wGC>I)IAv z{xYra>-(b;awK@fH+E*xJG*ihsNXJ;U+LemaI2nMX8rPhj72H7==HSMIOe;pHS#_) zoGHPPe!gCsmy6i{aC6^1aIe%H!yR$9zTI=o_r+ah$V!Hg%CpNZzaiM%1loRkOx;QEYvCg%|Lnn*xZ>>bZrV@^R$yio)#1Z~hYqt(NawXR= zkfG@54;1caS&c(@5Y2b*RzST*Uo~$l zJ@P(O^a7c)O3t%?nlzz4V}Y{ zU)ORPQ7Vr-zKNPc@5>3Tgk(kxYp~C4pU~3}Z;gI5s6<0WZ=-llGXeA} zur%1R$ZSfDi?cs}`NUTejFA%}*ai{xy7Tqa^gcdU7 zUeoz8=m+m={b)3Mrr7UeW$4Npo{{Uej-T7+S7xof#_wZTFM3Dico&B}a!n%SOuo17 zI~M`BwKFyRkBvvF0H8bWW6L#0@JIGnqhM$9Mz}Qg%tLa+Sz`~JR!+2&D`yR{_y0Bb zXVy2E6L+0xfTva;sEiMeYX0BhPrTi|((kYAFXQcYf68DsI0i>VKKM z68uM|us^wLeHXs?+L1$NS}@1wokT;A4UjEC-mtJSk*+AiVv{X#Y0lAe?|F7*wEJC+ zs7Oe2h^y&q_0Y3&eJ_2A$`!^@xr)~lW${1yR8g})hnNK~enoR0*zCr$ zWX7$o-=m_El$~;1g+$FEBIwxaDM3f(z3S7?NAtMV<3+Vg5f^%3Y33R^TcJLIzW;E; zEA#5Ah&Y}Ir^fo^&%KqOQbe8a2#>}Z9~PDB!@Sv%f5ZCaVQA}2=%1nP)5PECV}JB5 z?rGGibKbm^GxQ2ZxX24DIdf5c5$D7;ro7o5qrHlsf3lV3B$B<=G2d?^E8Y=zyG^X3 zo4n}7_E%se7oSfMy@LxY=(l)*O$F%n)Xx_Sb8RvT_I_W{V)Wa*K>pgem5r;f->5%s)g6>D)h!NUt7lj<=pW}OG)a-@Qv|>odC1>&q*iv zTa1nQI*~WX%473d#TxYTLYx5NGvoYsCWXB)soTo4zt{e3ps+`bGq+W5w}#M@d#8vXa%w#T~F!jpY5`%V_{rk%nb}y}4)}DBp zJRkebW4^CANIR(ugTw8IEifcA8U?GDA8YV>tBG2eo-vQ^FeVNxq36|BdfuWqF0;`FpgXFP8NJ(oLme@o)`$ex|W!|lQn&$Ts7ocGq* zlMJkR9`$yr3s4XBc)PxYAJ3mTj$Db3?Xb7><#YJuMcAm7ImSk5uSzuscdL0t=TrMR z28N=1jhrKJH+buO1joC=@U|xJ>Ovc*j=fNI^4||f@5)8c)Zc$tT+@jT*@ylY!JbZW zIC;`ue)U%&Ylcs!`uXhgQ#heCXyw`EliL*(3dvai`Q zXFrqZtKbnl@AZKl>6H|J>$coC+rG;bZ9vp2_d{!KW$%1}*B{qwuB>Ul1UeM&w-NEO zn&wBY!fTk^sx$Yx5}pp}5x&Fmd3L>beYW^Q`Bm}_)S6%ww!qPuiljvy?9E_FN>8R!o&j6{mCny+($%VL^30J0z5x? zbw3s-AJW{&mrDh+wFbRsR;{FP&yIVn+$GlSVqQ9@6BhO-YBjfLwk$-6u(m*HXB<&| z+QQ`22W_BFdKAZyRdlQ8K6y*$7XImE!u{PA2(lp>Re3@71*j2~x`KTSeVUoy+2>UC zm~RWCc>Kh)h}qyP(AuqD=uo)yJS?8`SgC8VcsCH2xNUa4h3FLlc!(`Muv%z!zslaZ zZdqm7BJNew`DF_XNs2}+5z$?#=sMmfwR{rn2PTd5#Qb`Cr{jCW!i~u|_+LyG@6j1m z8uWYG;wyI1{)Iu~wXH3cSckx*iU-(U{k65Gb2ldW?p`_7TF&2dj$g|!r}`qIqbQ2M zTttg;k_YUqRqwmjtW=OiOM_RYMDf+3mDl3@7IYN!Z%mWfYf>W>1@l|eGrh9-mBCi` z`#AXA6ug~uISS@?#)Yr{d$+)SN3XIuJ{sO7Z=TN}&ZY{R>gX5tH}BCAHH$fpzBk%; z&Evn$9-1WuHWlAdlzZ2<@+K-e855JpqVhTmcPFDLemKQ_#5z=N=0HW0IC(?fZE*`KEO$H?>|ACYjs3xwuz7uICU$;>W}#>W7Jc*z_gmAf>krk=+$U|TI!3K|rZs6TS8Yu)@k*{FZ(D0M^nJn8 zsLwU(bB+3(C>&klMKLntqtQi%1`T~0jal8>N9?9s={mLjz;bP^e3AvwoHXiljrv@pKF`$RVmXNF zj<0wXjiOnjKG&$vHR^LkxKz!f$J{{~8Rv}ERYrq1WMp30(Cv}@Go8h3p@%I=5`jZ|^c z@yB;u*}5gH8o|rkCHVHyxIIpMV%dbzm`6s4;ACPx#Ts2{s<~+tE{!$LaXDh0d?Qoiu+llrv+SNmM{nUObl#9576 zb&dM`!nlp)ioL%e=o*DpqdqUaJKzHyYe(~+vqWy~j>gMuJ&yEsKb72b-_Uz-=0}h1 z>JxvR;+#voCZocQhSS+SYOOBp=QOg92Y+Y#hAUeIGTYROo+7iY?rx{y!tGS&shWn< z(7?S-I#%O@Mq8rHSVJ++JR;A;C>x{kl+jJ0fjpD<0HKa<)Cvp#>7px9(9=Cav4UV} z)aM%Yc{`4d?1cYRkzK)mipWmbPZ!yVg3*zk@Zv#E8QF=BQ$%(J_t?lzcuybMiIUSq zb_M%sBfCO}_w5}URS7$tX~)G=!n&+e9g&$Ac8%*);0t5Bj>!9J6#jNxr-yU6_d`xD zT6#_y^^2DA`AlIxpK4smJSgKgE@LekM(hG~%I+~>eNr|8yL3V$IllZGpW?5uYIN+p zt6ENE=n=iWYn>nqqegwMQJ-tn=Nd&>>8dwsha?I#uI1uSMN7R->?KCrZQQj&H7&%I z#J8v7i#opWvgv6x%5O@NIifNZFMsNFT2gfC)uMt-wWFu1Vv}}f+I(fY=BL1!7+YOE zWu{9UCYybVOqcMVF4H9nMrWIZ_f*-Yg8vlRCSgBawn-F>&Nd0}sk2R@;}qGZf_rSX zNqA46Z4xD?$u<@2r_DAMI!=*o685p#CShIAtBj7#*QpqvZQ9S3@!2NPbINR!X!y0C zF>CL`tpaKqM#kPmy=0eoWrXfI-e(RfMr&f`td&hGdCN>}l3R_`Yd<&s>OIu_nk`}0 zsMwyJ{+nv8%lKXmk9v!ctVU)!`-QCO^Ti1N+t98w=HG(aV#yc7G zm?0PPeRXfi3_Oy1&4ujIh}&SomocZJ!`S@N|30=)wPw(cMEhth_h<4dJ6o)*8yyng zoP9FwH^x)o_eD-i!-)ILE$4eASXz!9tvh%3z?4R7lyw!A!ot7qt4!+GoZ-Z^`hvBX&gR>iiuZ&xAzPDx)V!Vi4e_S z{CwoA8=3_@sCZ?VJ4Mknd*8>mlf`4jqjznZLpJZfGX0;|UVUqGq8mLD9M82mwmb>e zdgNIh$MD*8katvhuQslWhF1S;TM2&~9$6+5{?_!Js>$e;DPElSkp2Euk;O_!-gDI)efO<5GUIjyyWpRjia2MLQ#xeli%*bBo9xDl@tCND0HTqPooe>tt??=DY}HQS{h@C=y8 zfzM5z^6b#xOuL?*y*-mm3v!|KAM^Le zE0F!{^n8j{$%|r9GH-W+Ro$+s=is&kCuz|+2F+LF+`_QmWT)>HvUuXmet&*SM0EpL z^49k4w0h*Y>l5)jd_6iLPewlSHpF{rrTv72XfE!QM6hFK>~B~1z-m27bd;z0J~;d9 z*%xP@pM7WCyfxV+>+kuv&+QX>CO)xG+sTdRos|e;ifB{6JI6FB~zn_FR7-F=KtK@XX(crOV(T-+L{uZ z;yI)F^!?eNtatx+;c!dj6isoD-IUkS4dwZZK0T0Sk%b|%u1dIC98sJWe2sG{AB8nu zf;1nE5$i2jT6Zjs*B87qt&PMWO!AMy#4B{33&>jk79&~nIFG4$X%x0d-f4B}bLdF; zE$y-|yw96ggVth=oRK)T%*8}vX=mW4CO@0Edu>qxl}PMHf3my*JGS@?V~d|ClQ-5* zg+G$F%3)|OZH6HdmvmNR>mxkBL5-Jgr$*ZTJq%XN(LKdpd62C|gAC4zG4&pP{4wxL4GS;EuHDt$ADOf0h!Jy*^6{>dHLNEsa{8 z!)Y0_Hpt|qtx4-I#_?l61tYi+VbA6DpPYSq_8YU+5isRrHU7b@3qIXFEn34}hD=Pa zOg1hjivEPo~zE9VL5NfS|?%& z@|FI+LgWh?;-SW`jF0Hhcjk}D!usjaAE)1lcrx`$Sxxd%~zcB1Xvt)O3IXypt*PWcKIseh-!;LcLr`$V|#)+V3n=@xyq?dK( z<`!pzkP?=6sAa!(Yx(vZYseVox?@eKh{rc=yus$Mh@EJ;yBB{IlVCaQ2Oz zNY#}(S=hd_nQ<2W!f2^c#()}j7R<6 zY}t2%ymyOhTz=*I$!;Qv^T;T}lIWI1Zic{pkmgll=Pd1Vd~@#Q;i3YOm=at3Q?q7y zSVu(bf47;v96YnX8y~NYkC}cuQjn{lWXZH+`8c29j^c=wBDzLqc~yX(j-0-g z^V<;=?AM(U6hv4ESs$;TiGU~>k+%E&L00SByhFYB%D{##E4LO|g$yih_L5W3I6K*L zaE@(R|2k%tWEi>O<8^r4vKlvMa7R`w?dPy+$$psaA2PX2RTy*hnxG7c1-id9c{6FE z)=%|mS+lzfZ|PWjItuU~+`KJ))bePdiaZxOb)epjAud1O>Wef)=Qh62_G(AY1oCF+ zT2gL@$KL!{&Qti*S2=%Xugwx*W#f9`w6o$R1)_aw!r)}CC7)x1pX0p95_hH-sk_3w zp7={lny-!Pc=KZ|ufU?a{+!R_B9!=!D6Otg?Z#SiK0zLl-f6X`$k(5{DHy9$`EnH* z|0s`H=Qv`{5?V>o?|(7*@H~ogDmQkcxz0pq`4CZ%!9Vv(rGs=N4($|0|6HD1%;d-5 zrrXuHUe?0iQbRm*J#}TED~?m6>Sl7D)HM=Cb8^+v`Z2AvG68rK_6UB~9J$Wou*On( z&s!YpUM$ahiVBVMc}K6DuxRYzt8ChZ?O4zQ;;9~Zt;7=%;?;>>{Fa?6o@23i|9kI8 zf?sWKL`JWHI*n6XO+23gzw9Bs%m1b6QZ^nmmD{D9)S;U_`1#P4z;o1CRKLR-jivAkPvpBfZpGpynU&k|Xspks6Hjiv z#LEE`XS}=C#_gl+PhRBQW0&Pk$fVHyj@Ns8;I-a_EW>A$b+6xoQwGQNg42O`Ibyw| zJHbZ!@3K#Z>v-HqzGrDs4~>$wOOv%8IIWE71Tfml_0XufvJPGY?)j1)n3}gnZUo_9 zh@Z1EmBOXyi9HS{6;y6U@mn}rUS;*7mN)i(*L2&a3kdHB5rYy<5&g#PEj7Q0wU=By zGF#0qjTl{f;RyJmRhEuQd-@MQop!kN^Shej%y{msWAw}otAli8^oSSU2<|Oc2dYbS z9)Y7>p`{gw)=P0n=RFzs$P12)DzXCIbv=tiyyzAe$+Rm?gce_6C*DNJ=2HIYXiWCp zZBzt~mMi2x*rR#|uEvH%e`T2Yzj^j5BhGB+7{RyY+FVcG&U^JmU}&kXR|eZboL{;F z60_iK9eY5;bLLbcr>z->CEU zSvu@`TXF^EEsQLTcntL$$*5awEhSMRoV>OO7Y`Pf`D`)0nBr*m4P`C0d(7fy(e8W< z$}H8MCJQTd4`i(2Br-`I*$92$t^j)qc~EKmx%DL_A3)w0S;~t<={Sf5Ahz--%Q=z7 z>G<4fs}}`BzUQrR<@&M+AFzxYOXW`le657W7gMdx#S|Il>XR^Xn#GxkpWnZm@@eG5 zx!J&4k)Ms0Y~ely&e}%FUiv*n_d!{6EpnfQAyOVC_fJA2DXqJ9-O;Dfg0Hc~!+K|A z5q9It**WaSmt#>|e36o-+M6-2=t&~zXD;c>k_zF{*y7CH;jUxNkw@XuIG<^bObVCA z2$_^D>bz$jQAS`8wbH1(eMpNbBY(Zb5oukQ&|@{Gg&(UME%aC|XrV{SrnF$52PnIA z>h7*W;sVX09b>(83thiQ=NJXI9dqQW#gZdmEtVcRQ%s2@kXqW)Xk#ZZTk@%md2o#M!kmY0gDJkrb^qGu=Kdx#4^xQn4kB(f0z| znR@jDRTuGZ4QW=MosG2&?L4?6@#BJ?_`!^to_)CMKKT(cBU~EW`Hp*op~Vb$Gwk86nU?rC>lAAjX0J;0 zT&iwojnXLj=c^C5#Io>4%Bp-co)r3D1np+DJ{k+2tbU%tBro7+#l+8O)A|nbkE-CrSq>H$d zh+Hq;P%V&CXC`?NJsOvE`5uo-xMc_V3<(t_8kZD(Z~S4Wic zU3_P=p(B7-8Af6d)mrU2CcS+4+RokSvvND>V~)JFlw{1Z_=UN3$-pI!YF^G+&$VP) zHsNdWXUtLjY)`E-aJBOhKU>_|4*;*O9UNhL8E zw#v^pBOpr#3kE+IiG(d2;>@_+jW9;eMw}vt<#H7XUA)rBnclPMd^y+W3*TJAqAicG zXarlP=MWx^*_Ks!G_rfAyQA_6+5D7DQcGChH_OI5PNT)ydUix0#F5-J%%15SFI~_w zBXP}^IGOw8#}809n>}MP0NQzY&T%|egDZDWd(#^ zdAxGY<|nkQ;xm)kY>5>eNG{%mm!~}IG759XDl3v!r9lP`aZclCNfKU-r;;S$*BHrb zts^I`^g$#K5(!^4Ys60|&Z0h>Zd5muKIGlct8rf%Ms9V0^Op&`duqKZn>qJqhMCiI zay^pe_{^(jtFgvPOma0v2_x|GdJ4USydIUA$45k(rN;c&Pna~;F<7)k7Y>coMZC9N z3KjRd*FZG(%>F1A%ckKQlOXWbIpD7+&lcA!zl|X@f2wlOUuK(edqa3M>iiKo#!`9T zA%64sFfy%01nd&+#phAPBSipNau>-5x~lnob*})4fZfWfviRsDwSQQ8dWA?o<0H

    S1J|rZ~8iZ-G0(7PJcrr zYtC?%Ju&;z=N4g5kpd6$ydJg?i6)|f)9Un=H^X-yX6PLd*@?3gzP8oPlj~N4h$lR1 zqZ=ZA5R2^D&#alF%jgqPve%xXJ$~9mC$N$W$)kW(W)BH2bSf`~qnem|l6e@)eRV7` zr|fQ-sf?sfI*W_mS=a3m+DWJ9%z2KG%^HU!Qr0K317qvy3SKnQ`=Y~9ubOrV#S3Mv zrS{`l6|6G2`-<$Y|VkO+2waT+Z{M^BR0)D(=0e zR%2XC*t_J_ecxET$^ML)u=n=v%eRRf^jLs6C(mDi<7x67fDgZ(+xRvyp7E%Z43x5N z*iYWWJ@K9vK(tW#c#U-rH|P)LEVci|+o5-5Z`O50~#u zM)>DC^QT_A?wi4$#mgFNUS|k-Nu)4qCcrK+PA;Aic1N-A7_U$yj{7Lyksv!>^88?y zJm6U$n{O>y{1OZ1`o8xjhxn#3oQ$yQ@)Bfy$eOatihwI`p%H7!nNswc3qG)QuZnm- z^W0zO!sHA+r-EO~n%SkT3GkWSrTrkmp4rIitSX+Cw=MGKHqW_c;qiN-tX1AR)i}s3 zvSyFcq+*VZERb@Fs;ow%Unb($EqOnC$;op`5;O1N791t-oh66ZSkHg@-XaT6iH0}} z<5eU-&Cj&JHO}_@FFs1wsv&*og5E0d{Q9$L7veG*e*#$Jeq|O}ceBX)UZmte<`~gz z=0<$j&Ob|{c)glk9;ai!Ot#H;#aZ~sBGju`)|W`)h5caNGJC^{=VP*J!ggyb>X9qD zil?t5vn*~$Mtq7WeVwoh$c)F9oQS+ni7&O!RL!)!VHU@B;q#Zj*ujwtjo*t(PJ|!n zKvL*NyPM2iUDrRDy~VFwoXu~$es4169jfzX>w(cg z1`2!oVA_RnEd-;Qz*4m z4{ZIp9Ws&&-`fhPn*XnpJ@E56y?NwBoZTs;$z8B2w z_#l5*$AuZyeXR^e%7SXR-%K`~EL9|QaY!?m=LE9y5iM3f2fmnmMzrL$xY$XvFY!tg zza^ZFGh@Xh7wnonhRVPq?rFTS2sQ6?TUazk%t*Gh%`UJDobQ@qJBDcvODtK^w#1b2 z7Pu@$nIF=Z;n{^@exj~dlC!uNDRb;&iGDe8j({uPYb2LXK8M<0s$jVRLVknD7p&1d zDtL)Z$S1K+XYcmb{>F=wc{w}1G*9hw#tFCPMlJw();)}JBACTZFnY}yvK{xvy=J5l z&aG9DXU%IDzIk7qa#u>(x;%Q&5AK4Mibg8;bzdttH_6*Sbf6+TNB&hg;G8ukq3e@&;4bdL;3Cl`DFDyg36ORW>PAG zSbc71c)fXig=ty&C0?WspOkr{tl(#j-Wsy9<%N*Z zaUpM2_?_n3Z?aQhx2&2WEc_#r=RA9Yt&k4qd6OCoK7@M=#N*?4CUZ)>E?E2~UqFoR z{i?_CT(!w$NXY0R{aM<^++W)qy)FT{Q+6g3BhfMSyzKbY!6uit*blGjsc-A)^=`Pp zt#3b#%9=Skbvey?lvII{ygjqa%rL3b1*bVLZDonZ-Gbz0R>1kz=EcrRRd+;R@i$B5 z8I~C7GnFSTWq$O_M@w17EPP(4?Rl8F-_65VVlvmn;_LHpmYSe8R-f~K-|(oF|9TGFGxhB9*%8xq zD^vcc!s8Y`^Hy{~CVkX7jbrwiqVuDqg4^=mFLm5p)jhtvio%SQ`_L|im=L>{wd(De z@2J?j?!Q6|K^$|6H7H$RmQ-+_lKZZ}elE1PaXjCeYai=KMs*=N!$A_Ww z@lhkbfQm8Sd+V0wyo$_E7wPly%KkOG@r?dEqDZ4x&ehdyI`z)7fV(l1$%^lBEPNWN zU_y$Cp0R$aXoKIL1u6No%!pa^tW_7Wr|=VCuLj4jX68l;E$*M#$IP=v+0szg8%ZN| zcFDOvI~v!Gw`oy?xX@M?N<ZT9jNWmbD$-If?q}s>E^@Q{ zg#%6zmPK#X>$H3v70^)PtMv@pc&e!J#0R2tB~v>tx1lwv_w>paldGq%oA!1Y{rZJp zBX+oC&gXGBu>`dhPmQi;mV?OX;4L~dW<;vGeabx5ufEiV&Ts5x;8%pNvCU+87GdrW zJ=ZGj&GN^hNTb)=qqTW4fN*I%WK~AZzomr?3-*ERls?JGzl&9OZ*_cPfwBjlrtJJBr-T(4e zb&kJCHgTBeKmGJ7o96}dynCFleDON@9NA5Y;*4xw5c$nJ+bP2Eh=Sj;t8*ovkv2_n zgzrwdwH^sw6l*Lsrk+_-RSWV=bw%cxuxQZ887x+tel*_sHGUr_TpG#SlMUdWZGA%| z^Ath2H9~`D^(D_49eIRJhiB_EP+N6a(55!9maNC#y+n-VOEndvy zlzKK3E>#m2J`I zwG?%=1g9YM8^F^N=a45Q{>YLs@LZn#-te$bA@72H;8c_>;Ik>hV->RSNBCp;`FS-? z1Y~)7jqWw*65us2oEMSje-h50g3%>`RiOt+^g0NSXQPoj_lIa5_Yjo%W9G!%&Vjt$ znQ``8YhSRdTV=Mz5=MHQ(P=1uZum{2>UmvXiDt6&Q8SH{8l9zjbw2x` z*tbJOv&Om_Bbrabp)pJH1YaY%oq7HxJoD3EznRM!bVl4RoEkkw&0aAo60)juG!u>A z&GeUwh(DWuAq(}j@$~f+jmP^qVb?gfB3_q4RRMHR^NCG+Z9gN{xV8bF?ea6b_`Q&3 zuMz(~dbSZ(RNmbyugV@Lr#?T^sei%4uh7WRx6qPzlBX5HBI6bOgj9AK zeaU#SQCL+^&56Bd0LYE8C!+>C(E%wZ=PkFK)8LigYG>@gOL+e77i9$c9NPBPo znR}SgdMP~RjR==GVqv$}c^L6X-ucY)YGZB-KAt?G@Ot05YvXWcPXn)s%I4^kX_q@{ z{e;c4vhFvK(I<&gN@WlIgT~EK_}o+Fyp$>(*K&fIw~84x{bg31aC*o6oXq|lQ|Vtx+@2*JaID^;<;n%P z@aA5%NZs)BWJv_0_b(WS%{!aq)&=vf2wsW~Ia#v7+Y3M2T8)Z}3cj2isq~9VMNZA& z9{b`v{>||yGvV>x{LW@34gR_XR{5FDHw%-Wfx55eRE)ilcZZ-lzueW$v@k?Q^i9aG z8wQhZyfPzJGjnFh>5D{Jtuqf<5>n$(B=39lj1v)QyK+!@l3!=E>P#0M4cSMK@yPSw zUX$g~I>Os!Xhi&LKEk3g z!b-_H;l1#}9!Et$7L^*Q9(glGN~MC>y%t%542*v3gLASHpGC=>W~GrX7q9FzhWixi z_c-Ti7Umw#sg8(D{x6op=z%4FCzy{7roYHZtQ-MZ7{v2A2JcHlWl|KU@5Wc&iywv4 zEjJO@<;34S>&-s0-dp)T+3yo(o~lMjie%95R6PxqK1l`kEcXozsHYz&{Q#onOS51X ztNK0N1Ixqpe%di`FMxMVGV___p~(wp%_X<`Nvo5|A`BzHk>wD|w5 z)$zHJv%w$u>nZVmp|_+2#^e#0pemvCyIbT#d}{p3F{Pb_SaGf{RTo=2iqBe#vo zEdO9Og{YxKs9AgyW{t8h@hc<@8Y5On^1Q51kAU7v@Hb`^$xnQ3s}yZb#1-NAv*BX> zIamM6;;OnYf8N>@?Ckk!8Zze`u~w3Vx@%zs+*Ss#oEax+TK}5H55Nk2`mc!a`q^d8RlEZ-mVh$7DO>B1=JT zM>1p=fM@6}T9M$3h8_wcI72lZFT z$h@};TKp_TYDDdQwhLHwZn5`pRMMyUDtdJfN6wk}Jz5sBxGIiB)7!&g$!vyRYxTIB z`aEov1QqYjK7unhbPeuCakj?sV(JP(4L_#=rN6OcF#{(t%OjhU-~Wk=E;KL2zW(|P zdxq#)WW+|I{-+O|QLIZxLyyD!l+EM4tg4(jc24*>{fg$FpNU?ceKO58{$xe)qv=-e zJQ{9~jg#ES))7&7y%2Sl-p|GDM+6pOlh0~jVvOdD@XU7%#ZKkr>3_D})sUgJkn@yq1BI$S=zb5yX^PSBGyD}jb?Enn)YAJwfC z-6N~aT^aHZemgWvPdJaE!|y13+-P}Y(fv4#M5zA48UEehD*kjJ{s`%*rSo=@j0mW$ zOcI%9-X@8lS@{I=bKHK3s!W$X)W}I(^zJ~gHM-rWevLe)->!(O=Mff-b)7JLj(;-S~Sm|h?tP-6W&I-u-~%y%N_+! zHSb#yeaBb))p%)h%9Bra&Qd0)B~@L zInc?+Hg~*5{JCRYvk}eR9xjNhHGMtsTCUbRR;&O%n47KfGS)-2xS-rm>}lX*lP6}* zPJ>suGBjG<0DafUUCFYCi?@ZP4wALLb;HoPU) zvUr$iEiP3SUeV<*j3Fy)J^7*Sy`4&M(>j`xEbao=J1o*yP4Cc_mw4l6qP|6uy{;uI z9=T!Ph<)iJ@;H1Y$c6l9YmC$p9ir3Gt4)Oo(DUReBlD(J0QQrGZnEp4-} z+qL1ojq+q(^&NShSI^g9G{F}6xn%BHGIJG1=`6BHoaFP3mdH6q&rRfU>pqu0&ljl@HkW(* z;u>B}^0%9^X`rVo8G(4|M-uS>^v=<})%YAE{W;@0^GNoHCG?^<_Vpj(T3heJu;}#6 zp~HRjde8A(Bt{r>OnPI;Kh2#L){a%fpp}~$g_j&d)`}I#ppfRvS&xE z9#_R9ZJKki#O%2|{D-ZREKr7ho6jmQawmcyBY10G_6M^PzEdDuP4vnAR($@QYF&&tp5d4B`H5)Hj4|E9B4-rc+Hs&bIzX#DcOZ#eYM zR}eKu*1HR)#)nf3z@4-HQy{_AxMiKRKEzb?lprg`?v|)w3p~*xxY!Y1NJlbz;0dnA zFD*XneKkO^+VK!Nl~?vI-G_Wb`E!j_0I?_F{7h88#0)-j~zwf?v#;mS) z_d*JpeGf$*8*xg}14T99yfgu)xSy;(K7I6I;gom6Cc!(ejmv^tx&-uLK);{b4?Wg< zc~1=iyn~>E~gaX965zS^$hrhWs9#i?QrLCw$t<>3+wye;leUDwpi|M>`J74B-dg$>NM7Kkzd#v_~{5+HIe^cP%E9R=;3< zW3zXDAP3u_a}H46h$t^aY==l6A*vO&%cdao&_F%uQG#`yM+^uzXf_2EeYU$(t>6Jx~hkQ>w z)VMs$*m~}N40f#s*!#{L#WRh|`P_${8`XU`QxtRD+pT4lkWqgtZtMDR_nI!x+s_=A zyxmI6F|r~_uX$KknH>X5{M6{((A4i>>udy#dMPql+c{hy0VndDm^*!(ZO`l3`zeJ^ z`h1sL;XM+j`EzCzwJMVP^J#4^yEH)b#c6^dZjs_RD=I2vMK#X#Bg&HQ!dCPrA5*Je z*%)`}UpC%#{mjN%>u)x$HT}-VnUx>jr5nfChd8?+JD`z$t>Ta<9t*Zct!L(0Vb((+d8K2pXG+`sT&H8-O40h1e(KxfF3)_bZykI4tx@Z* zOl9S0@Y8A-Y~{6t_T*do@SWU*D?kk=mHM6s=|1szrxzPaMdzlylx@ zNy@0Yq;_qx=5xr}70Hi!4@lBbPMo&N9+jhX(eBAOtnUxDQsljZ-dY^fqv7N|yzfHa z?YLHsw$ADhInk=->3ETsEPbfuDGSHCyk%jlV z+O|k#oD~-p$_Qv&mz5}uJ`C$zma=h6*0S*wSY+y5Bc}f4UOLA!0YViJz%$bBBbDThb?UTEOWZQ-Lme6w(Z`!zs=CoH$``zx6cV* zqxU`Bd&k4t#vMOpoS%qpr){ZeTV?VX&5%YB*Se2T?~h}kNWV04mdF|Oc5Vm~-3@u4 z#3S!Hh5gXyS&psz7OinBD2Q9uKr0H@{8l<$V(4MVjg7vjS9JOPd1~0bvw15!{VP*2 zwL78SRTAVayw!K({H-8rlsD9PH>acHklmEDD=`Z5mX(TMA+3nUV&kIuAPgG!+rVt> za~qkDEnCXORgK3;mu#!vlE$iDn4dcahV)A#Ra(Te{WkM{_@8{uD;)c$G=B1-8?E}o z(>-fjUmeNg{g5JQ8hbS&Z10@jqi33*dcF1RY6MGTJkJEVXU6fGD5_*THP$?h*2!Hs zmpmVZdCQtep8sJn_?s?&A$~09e;*=a%`;waHE(h_ysYFM@95wasma2^9S`!h_&I>|K8}pqXgQlNVvwq8IwE#yp!P46<1o*X1e8mh7Tpx4fN}k|K|z=~&9^XbO7d zIZuuyw;IRsNfE*^7ko{EBOWC`qlhLE24&B`REH?tWziY)yN8~AcZU>4B zvhe-X`746doA8=b#0p*545=#hw@3RaY+p_7jWM{xaut$^WaYw z+`fpYcJTW?ycZQ=U%b;Wn&-FCsF;hesl57qV{1)6Q`ee^)sDCqE2|&-a#VrZ*75Y( zoMR#xQIQ|ftw%V~y8%UW8kZ4JHpaCGDI4eh2r3`@GQt{zlMb8Nn3fS*KE`jaM4;L8nn$SF*!D%R*|^&XHy_`w2sj_}t7-SA z2ss;{BIs;9^9VZ|+pY*a8}E9Ao{yF66?vh2>$V=@XV38C$aU&=A+My6r`xj2&~AhL znZN1LyVubRX@2{M+mE8JKdYh0kN1&zVS+n#5fzjzC%Li5-Po&;j1%XYzJun?T4?U) z1aCtD3ush%`fIH}OuzL*U)Q=`$yjPtFT^bppTDmzx|D6F_UT$a?&szK{O(4ri`jVh z>uNUkHC@iexnI|_v9H^Le5`E_E*s-9wnV3m{Wl`+hfUOc;;TI4=ckC}nDx?kG-|%} zj<)Y6q#$dIJ!KM9jd@%Zh92$;M!csLWR1!xYn*dE+F}T+Mq$;sT$Rxj^sHLmYKcD@ z$MH!RHE!Wgo~?+hS&&5XF5_d{1C-O*(xtpN))tPrjzvLT%0v|8Eo-j1j?;-E3t2Wp zI-qf%tc;>zE=!}ZE@f>L<}I=qFN2Z*jrsf#CXL7NLH2ik>*|r&IkH~IK$2E#T+X}K z=4_!x^NWu~*U$HR=P2$elOF{yel42id5(4JOV!28twKMnjinjfNX9=Ph5h$bAbV-g34WBu^TPoMd?vQ6P*O=W;X}yX0&%-lZIl zrl3bo^W;-fp>Z6agi+%b{>XoQY`c=RTHh$kTP#Zy#ORwrvP}DOJ5jW+MQ`Vpf2@1n z^;aTjMswR5HHUpQhEX_Vm!sfC-k%V6m$oU2+AX$6RE@LEr`>z6*{m!dDxaXS*tAg? zTbnlu?>?J2iiX2%<|u0BHgy!%eKvO#4YJ9j@YObZ6wakhABA~~%~!nXon|hlIz~EF za|3$|JoylfOK#i;52snrqo#=YYTm`ZN6lA|=hh<{x->Kz?)+XrH1t}}qhaSsuvRoe z?($+bQK`s9W0CMI`?d@7Tmwd9mxheSJJ+Dm*lP_NjeDtqqbcao&^*nQCTbkVCt=jM zg+K9p5j7guqW%##r#^-X>Y~qE)=rlBiPeHUw>vi6gW<+e#GN4d74Fx(G03`iKz9wl zbraF`1=--;3-0UpXUn+7`P``Mhg84R&J6(XEX9qZe%NzPa+j;_PZPn_H4G8Au3nog z+)j79`?(qqd27&~o1Xq)6i3o}Zd%Vh zJ8tOs-8k2(`5REZ+=gO0;!yiQwp&>x?%tDg*q4e@c@py62C53g}_g$Qf#J+#c zh(o-G)JU8ed`9o+;zy(*D?3t(+U_Z*p@L2-??2e3Q*fc)caP+LyseaV*kUvF-T3{B zAZ!1k@ff?AwKEni8VgOezK_Db&F+q)?r^(&e`q?zI&QV=qqx>*_eW8=)h~>r4?gN$ zw7cV@QFJZ+$|%fR{7$^TkuBG_md%Ogh;V6?AK9Pn$);kS4a&w=vPIdL_GOc@aj)fq zN;l*YJ*?I!ARCoG^OCK~#-(glHlBUiu6*2dg`a1`vT@egkFwj#mz~41Y1y+sF58w* z&0X2Jdp&YVxEgcM1cgOo$rndS zs32>Mk0k_IO5FpQb^M<#J8)Z_{Z0T^i(nO@sVzX^{U{8sy)nLH_q7$e*R@ zU>29Nc-+I`I@i1P-1+CpM?5!Mj$JQ1`|Y+D-+=eiOIB~XyqHd?@MN5WQq|5r@~T5J zsmLMQ`mB?(X9ne!c}1w{@zit~^JwAR;BY?5%S+Tv`5C@j>z)-+%pX(Dv5dI3O5ZWj zQHiYZu`_DDndZ9oNS2G=yj-6r1f?Gr&5mkHN8#hyX#&}`q3;B zHGUCZUUDfvx_Znw`O*G9*#kZyrRGk*dbxEeuHF7SVO12Fe5W=%nitPa zKQp0I&5r%|-CU%!$eYXS`Z-0!3+9FQd46|}Lq0KHr+EK^tTOw&Sc}VwlIOtV#Y4tl z#B-zUd+|>>IO20kk`;|{83;kt*b}oT4&saznLzG1FL~URtq^4zfnMZJ=eg06Qe&IX z#ZUK2EFKXd3!BEg^%aJmbruY*x98^~APTZZqL|p|NRT!DU>G>5@7*^DUOUFT_})U* z?`@_dZyt`RHxnzmca8aSFU*nB8lS%tmF=i}p2ngd(OISzvwjz5jnbb};dlKy751h6 zot6^m=c%Yz>hGy2iO}zSy_Obf+#lOy^HNl3T*h&0vCnv_+MM5L3VLKKPo}hL8rL$1 zS)3A1jpr8gx0%IpxDU%bj--eBQ)MQ{;%(~?#$xx_eH_kt=5r+8^~~r<3if4A$I&s* ztd7H5SG0`8y3EXu!@ice9f`fo?2f~|JM%k^hTWOru{5-q<8j#A%GWA4Wz0ie)u!iTafxP8TF~6D%4*! zjLQO+_ot|5@$tZUKw3_84Jr?e6&lHvA<`kNGWGctz{rBQlrJU47R)C53<-|LV;$`+Lir+o9LpG?c zALxN64r;7zQXDS@z17J~i4)G!PyNv(<#m6__!*+DUOV~NbP0Vz^5gLoeexPbW-0Fx zkL_oh2NnFcsWw6@7F%m6s2cP1S{NeqF)y+b%*>9OfQ!}?*?aTE>38`b;DQP}H!=Qy0Bti0@##x2%8-l~fljqCT- zqE|!MG_Kue%f=}S*xq*=g;$mN%lmNI6pXWVT3?NK->1u-`|kU8`IxgUjkv7w7z-0; zS%h69+0fFlD2mI1u950KFmacLjCW@`{4B4-*-y5 z>He;JJ}>O;xE`L|m^SvjBzk4lT$cu;=Vtwt?Gn*Y?*KqYVmkZu)N0X-hCWX{t5oEc z+Mzyr{Jqvbdai9Y^fh4z-*Xq(>(lyR^h%BQXCXI0&z=6Yx-l(yWRU8}W2=6Po=bz+ zUo}Wy6kby;J+`S0+ODxn8^3j)$5><8a$W;PJF(~UTY$$vlWo@+H>dp&pLbQaZkb_R zE>AO)uh+QbU6jw?h4C0Z#LZvx)#x^(jtipZEeskRQ;8dL@!+wtF?igOjfJ=*4~INZ z7KS>m$-}WL?#aSf$34u@T^5dPt=SdV<;`aq_hsQ)j|=m# zt;dad__EGKruFF0P<-{x|)T|xz4H%jp;@pfHUmOJ~c z@*s#?RyA%1FDeuVXqwQ?ZoCY>daqYn+@)E;W`I4ogOj zDxUqY2r`O8qDA9eXFR^$ZHcm|SdT!nu+_1T$5C0B=Kf+Wf*pl<+?ZV9*21?g3rS~kMQ&Gv^#(-Oow>Zl7k_RmKVI(lIq2E|<7%GD}!6bwHq1yX8C!Cp-E5dXR@1&$LfZ@-WvmCwW*} zJm&Lp$ zDjacHEZ(Yhs=dZ}yRZ9)`3d|So~S3gJ6{p>Ox@h#_&CSWv+6A*f0?R>i?x&^p?*g; z0wNKJ_edl9}x=Y8$rIgP)tE-A*2Wo@@Rt8r`*tUA9xU4{GnqE^Q6A!F9%U zkKTo7P|rEtGHi3CBlY^JLDXlYI0PZ$n{HI zoE;LC$Jir%CvNX1h{}m;Y%Nw)q`V8CBEYz6$NiXgTfFRHCv6#Tt?R1+FVB-o;n*)$F0j;$QT-k4M$ z`S&MNHlE!7G2fOXiiXe6zBdRjH^C4^f+<)JX4&&TI$In)bA4dkzOgNY%e?9x+ z3`u%rKUX#rGU2({9-ci+#qjX#JDbs849k~i-`LEuX7lju|1ti7A(MAoG*I#+tMjc< zsD2gHoNLwbM$eCYO-a^AINGt5tZ42F-yF}M&;A^~&cf8Q0)inga%<6=_#mTIQ?tdH5Rb!uOqVpxmVSll_LZ!*V|~ ze)w<10vBc}uIf66K&Z+$HccXKwbfcca1pj!@$_iVHomC3%H< zUiRbczXzV=LL)LkFQ+J82(HFw=BuwJI}vBA1Y4uul%NJDCfXESjo+B>;eP3#&c3!7 zEep#Fi(=_rlrsm1eqovzr~TrOMz6o5FEuZ}U#87vo`aEfjns;FleNfNi!GGO4iL4w z_1XeOE2XhOtK+NQc?-J6bu51~Ta0J&6W3^*hj2&x3}|>}dO`27L%yfzSmQEpG_nTi ztx->u_{zGp-E5*?^O!^WXHA6o(vFgAjV<4zvWCJdJQ|nsmW}Zkt&pXHO7!4~>6PE8 z;FDt@X}%is<}C~w523PHj}l`a@;$ApMqIB=_d z^gBi52KVE%IelRA0tzFy0?F2@!rf&cfUS4`|TM$&|UwJnSnG=<8~=N z9YvDLJPNp3x732M_-ZZ5!<X@2pWK4yClC3iY`w-`n{`;uCrcs@;kMR^ zY!6v$;^SvWMIL3Ho|va0cSO#JP9^xA9{=^k;E+StEuS7(^*(s3cUi97R!E1ejQFY* za_wfFxPQBK|0Sr8rVJnV^f~7w^Ct=5H8fU_)6nf4Z}F&WE$315F4S~k=;>A^OddCL zB5*#P*1{z+oBq0++F3j|#6Zb1S^A35v$ztBuZ* zi$V^1V(DFTaV#an%)7vkraW!Lor3v_T-YpnZ?mn>ygvbS+O-Q=jTn;sKH^Lkg6e=eR3 z+je7IdTcty*$pSX(KCpMUFO#8wb=kNx!C$*k@5SSo8xaH=D__*vJZ4VL|)@OeGQp8 za4(ize?Qe1#siOsBDp?)f4)Ro)c@0$*L%1{NKUy?fPc}A`+1#xKbH={!h4v#Ht!HT zzYV}c7oIrz{9%ecUYYEByop53Z*2AQ#ZNK3{~vN63tca7d-Onm3i>X21W$D8{vZcu zq}K7a+cWzSy|I>}g&s-yF5k#5Kxdk5H(BX8d!7hOf)(^Akq!#%NmO6huh*@|jMaW>;($_x@0Q)5J)-m&<8QW^J>3D-U_l7h7rE4lrA zOp#HM*0AgXUt9DLH;4HPl0Vpb>1`Au$5>pvbxIta!Jnmdxep#iR)p+lgw2pN+IK|(jruVwv z_w&eB-akI#)12qHg|DVTxOF2~FUQ2iQ=;X5VAp)(=Lck+IN{xSO_7)NJienx}Rnox9PmZMR46Hv+ck)aZ3+U?E53yD=(rudM>Yf6J2)(SX-ycYA4Z z8aI)M*pMA|yWV4hDkYsyNP_a|%9j>#AaZ5%{ko`@SJiloES)NH>Xo%q(Y=(#Q&AEj ztFmx@h8h7;GO6)2F_o~7vr#Q3;+7I$#d~$la4C<`o>Z73rQ>wA@i2Kjl}@)(l$Z0@ zs%n()(AZMa%JZo>w3PQ#Q4*mK`Ff!B)ObJSU({$URPEmbXRo#B(OBE?yY|SZrA_*J z*nVy_tse(RmW5LihEOCDkkYEt&W@aJdT6 z@m~*@6rn=J9`v4+`C)JNfM@6|@jgJu}HUiop5 zsFv-}c(;UP+touTTa%#E(bY=WsVIq%zAUl>nD$r=R<0H zO=VOaqs9A;`<88i zD9f#p?$x?E>D@hk4zz&ep|Pr&IF>8=f0mV9=k6$}-nYV|Xjn?XD9l?lFy8-aN1;(( z@2+0>(OeRKjiRG;2-?*vKbw+LZE$xl{d`*L4o;=l{#d-bdhw5@Lc5cBum00ep~_$H zCS7{@kEW*d`X7tAyag~8uigq6i}ldA1V&TEOR(jwR$`g{m$ek_$ueu4*FM#;CHw2q zGxxU&#$wf*xFh+zyumh}+wEHjW4XSoAM|LMo73SZfnzz*c8(s+fy|t+tc*stHix{` zkiQ1?iwo!@dtY8{8jE?JHOl@@9|gd4vd zpb_)8DzdqM_gfa(6i`pZ%LDZ4UA}dZjeW~o7^7$!X_ea7F2-8o7W>GV&tsl)D#+Q| zp>J_yON4G=(9wF{3pU?Fw-Fhc_k65{_v>$cWOID?TOj$E*WL=r#+siqmY>ylOs+UC zM=b0bOO9*)s%;d%g-0X!s6!}rCQg@xL*u*~dESh&DylROjfY5#DpraIIpkZ)2Wfn4 z>%&X4^cbP*LVV>2FR=ofE|LC@(M#KPK{eKO>xo^->z8*IdZ4{B`k;{5?0n)z&uJ(U z$j(yS#m2(tp4$n6NykyeEYGLi%=US6767bktBEE$^_)Duw+H5x&E2JHI~48=upZ7s zd}PlH+?NK+_xkMrkVl{OcKX=6yfVG%k>QxvWy#$VIXJhL8OH5%^sKPIJpIcw-(Fbs zlx9t(57?dS4q2mT%qx3wj5jWjS~BQ8I!3;o7XojsdhQ2ujU6p~Y(Kp`#hZIx zyhKMm6v}U+q1eMdST5oC@*iw9N~GBb)z=ft)CUJY?zuFgw?5gXHWIz&?Q8UfThsN< zxQ9|S5Z&&JVc2`nXI@q~W;NvFqgEH{+}5}}?$cRCR!_|~^~T_F8c_S;C`tRp^x<`v1!;HlTceI+_TAdGYjt!oiaZ|! zNBeh;%dFPc*$CnmYT~k2qCy!FjmvpPWqWpEJcbWtb=w`oF;GN_#^sz(v9_99#)~IK z3y2N2+jMbEBUPurbnk%#O`~TLB6o#?tdTfcZ^~eW`HlBidOWFMYrHY*>G>{lQ&;9& znHOD9#WgVd?Sr@#iI-)~r$@u$?UtyL-O|`fdbIW0g;#6pJF`((cWcTh3ifHzI2w-8 zv{6(oHE|T?Et)Ft;}TU%bIdJWKYJy+_dd}7U}J&FE{IGTnW$VG>>x5Q)Se>F8)Q(s zl@o7k-|0!Q5CSO5^10t^HkRt=C=I zIdMQE_j9Q?DcJtSvcEmE5M+(gbmUEuC%JP@i&@h+`4h$>hn%9c^UKwNH5!;Jx^`{( zjNaNC!EeV_W};UyPh&Kb2${=^ZAGQBCmL&6TU+O>IAj++%XJz}LC-qot)JFQ<2+i* z;*qdvEVj_I%cJos+8T#<9*vE{JCCGBVci$ajiRC0BloP4H@A1&9L}8;qm7#BI7=t( z)wspN#alK}qcNW!!ldySJ}9<*Z2QwU_Csw8)mc;q9Ro))sBt;#-bfyZR9E87`FsCv zZ_E6Cjq;;q4<3d!p$*>?k*+Dbezr>GG* zeo;)No$4$5rB+aT>!!pL-TY=(%S{Y+L=nA5y|dG6+qL%3xxn)3Wh_mNM(=Ow6@$+Mt*<8oR#YG`_(3c5yVNr4`j zE!~Z!%}mCvW{Xo=nlUeSFW2UgsNlY}zxi$JUTd!zXpFbkf~+w@qT=IDQKwO{nZ|Y5 zjgAb3U8AsTTvD-=?NiZI%l>E@)^%ev6-(V2O+k-t$p^@%_P~<-YSj5g=@c1lw>)R5 zS`Lo}xZ#*#iECUQcgK5_axcm6828-}(>k=HuuvuDnU2UvP1xA4a7Ppi!FgIdw;aCj z)woT=3i+*?cwFo>$qV6!v*yD+UEh=EVIZ2|=GKc3V-~+k{Yd;;EG=%dU5T)QYY8Ze6)7ui{3HqGE3ENAqIo4@Oh4&tHtDWzCWBom;P)t1wH;wI|$CDUWaCmmi{c-&+qeBnYiSSqH!#3Y&3>i zPoiNj^&lEUkDSZ9{xhzd@3TkSs#<5IN26(2>e*-tYCRl{d#R_RDd^E-`K!Xg9%!-( z8eKZ0Z#oEiewKrKaolg_v@P-;wPvuCoqyVR_Gxpu8LdyPT#d$mV;RD^+>NGSDVL)u z=&?e1F^O^r8g&+>bDwdQoNe7g*7|6s8rSO@#$n%A_b`r%d0oUfylvgYD9l^5QISR6 zyJ-s?`AUuW2JS(TQH|~ULf0|6*2U%M{j`TFRk|xuofA<-C{EC);@RXY=)^+y%6lej z9}pEw&~cp(rm^R5OCJ+OZ;zCX^Bt`mkJ9B!TSv+HD4gqhFbex!dNPWRxgL$ey415# zn78O*y#6V2(Rf_NcQmg=g+@h?^NR3MSXD8ekMA@ux00D+CxuVXo@2qaR*{~Ky{%Z! z#+PqZq#+uQu_lT=f3OG%-*ahk*D>E$gryN*{c<|9CdTUPVqRe0T7AwVi`>W{6Td;L z-)Q14rblrRwOch8=Dl(IBi)~;s1Oeu)6HZlSG`f;mUYkfLTeJ0t+RSP{BDWMrevQ) zX5(%pHXGkkqO&m`Bk{77-i7CwZ_18oB!b~p34dz_ze{yszlB-y@7HM&cJVJ;T9*9H z#(0dBNj}wco=gYtVRAd~7&wwrjo4rE{QqM8r~2-{{a{l5!r;8A|GdZj^6Znd-=6*Z2aEY+(;6vO*0|*CebjAsq1-jr z>qBIAJ)Z2rWf2x^R^Li%NtYbD;EJ3-}Z8P`>Et$BH-eW zsrz7e9u1jpKgg56k`xQQ)bd1dp*?f#_4ZrzCVxNPYGk$@nrmz}Ht%Zk^23X3MWgfa zu4#Ne)^&@Jk9U{F7=d}WMajorTbz7stwqYlc#OqT%bkzc{awalKtNHQoZufN^ykE_8}rt@pbW%*Xx=6K);M@> z+~m}mirhT&3okhVp>C&Te|1u&GW+6k-pD#I?uzRT`@20cvm1!=Ck;CX>;=TFOgLA z(Ycz3Q8%=RRkLRG!dB?YtQzM&nOMk{_)8rA(lHqhvpnXR0ewGmTvoG&Cu^uzqR>M>59f-L-RQL#=L@eJR?7Mci(DCY`^L)Rhp&wWD8L%!`hT5uc?|=VMd*^tcb*G{avjOWV|!jI%VyNrnQa}3oPRjmR3&=0 zY(KGjgLjklLc3Vy+;xV#oGE!uEsw(&HkLvK7q4M*b7K>L;RPL_J!e?#ZiL z^RxmBhi%QqLv&fZ{qy98%=!rL zKu(9fWUdAhL!2wy%!&6LF$enqcrUolk~`Q2x*kYyv(XFXgnmNs>&Op ziQ7$;H!)p0{Iu@Ed#^0u39zczhc&L>Om zj$5Y(o4gk5OHNI%*KK+`ri9K3(pw`O$I`^B^@1`2C@V^OE}aDFDy2b)aA%gK+i zGJUZ6tqSM-Z!CA&M~7~z^g&s#x8ABze=|w&`?!xS>-F^PE3@P;O%qh#t^LSW_BH$}O|`uq5NG7%mH%UeW4)yeC*_2l{4ysF0-d04J32IzUy zbA4^M(0j~x@@&ig70=SmeOQ+gaat-QFHlKJWDA>WoA(TLsSEYeTo z`E3i7C5N^`ItF{5&ds@!wc1OrWnnsmtF>J9?ZZ?hiJ!xcu>WFTcV*Ir$LjG^OW0mz zOWf}jLUccqPu%XS>evvPz5ZvbRO`J#WpH}va-RB|CH-);Ya)wC9bg~CLw4PV$fB&1 zMnz1lYuRUSh1B2440`GgFjR@v8Gvn+QE7(kqA}Fe2qvFg+h^Q8=vkjzg8~lK)G`Yy zoXPv>jA9Gr=r|EM9ZT!8!L8b&*n|0dXQv*E0W?*6dWOsSd1=u5ZdI>m=C`lV?u^ow zRY9M1_OSK)&MCLuF>9+60nhUxd&Saj=UnOeMV8$4=qH0sZ)BvsM}Bs}21~gENUopm zr`Uxp+^(@8u~gZ=p6VpwPrpB_;fr!;EwwDVrLt9PYYY)qV3`Nq!q@r+)z+DLH(}Pg zZ)b|*GA{i^=L1DWM)jIppm+#SOPNCWHJpre~k-13ohPlq=t|rmyY$+~V50 zo}BQMER61?xlhO9^~FK!=SN%S36XnM z2aAeKMUg+*y7ShC@57#au#)76)=xV7FzW_0H%~k&Mknb1+9$oD9{IgC$~mj;kzL*V z>46d9Ma#M{GdgAJUL)tC#7ErW)c!)Z33{b>nc3KCtB;WuWiYhM_Bx4eb$auiXRmY> zX#P%3iIsZfLr}50zW?fvQ3_1&5{%Y3VU%cPVWVA3+LnV!lFWFeH!OJ zCn{^Z8{3-I$i}*6Rq}DJTb*pYORJQPac;G;u`R7yHpZpZ%g3l1Q(47)jBYiv@hq)s zHpXMDuA(mP+H%6lZS z39sd61;>0_k%z`d7H`2_>I3}lV!y|!Vb*stA|ThdD|_a>T;pG-SFG1U>zggjHO~3^ z=VO+9WMf*&Pd3J5q+K?G8%Fi*vEDt^Ba3f)PF`ho6swUV((S=x<|jBB1?QP8}ktmxu587t5Und33kNHsUhk;Clwc?>yZSv#L4|P4Fq%l$y3BGOfhi(+RtzE z_Q*MX&ydQ9`Ee3@W3tmfKh96u-k!bQl=$4MHA{!zUYN`lYY1dcC?8wJmG=to=fRDg zN>QKE4{09j_RhJS^+Zsn4C(T8z8^Yw6g>{bE06_zX=B8h;nG+4a&?OikH~9qK& zkqDRQUH3rqW~4j39#dl-{eDCy#=Gv3X5;qE1)0)JOywSGE*9N%!0PKxY39s0*}D54 zYu=piy62jO8GU4D-QI)E#D`Xvx}3-pYFQeM%x?cZ+Prz@R)uPY8rSdPX3b9bbTi@m z3Gv!J-Yl%zv#;Ot&B9vlRqVb8JOVemvg-!#2>hz|rOLS60MZTL5fs#Sm-6uGmTRWn zt8Wlz&7{3yoQ3DE8_0Qhk%wJ3l(VqzzQLS@vAyA(hsSR~=fTH<5L=W;T=!=4cL2*9 z)LHZN8m#pj)|oi>-@wkoy!(cB9>)3xcOI7V`uh3}?<~x9u5i~4@J!sC#&FI`zr<`j z^$qfTTw8CL=Tmmq4fH$;*yqs;VSYnB51+qXnHgJ+h!RWuyX6La7I*V<(%QvpJR}#b zdPDCBqE{0S`KIc*G--|FYWv6fpl(7tzAqfD`LGH`|)=f`sykp zZhqCew6#9Q*X{XKz0!;wOHJ7d=@@yIMqF4%@0jmN4>WTA&?g&$8IpW~vogMKX7} zyj`EUdn&XMo58yx%j*CBD$OPT;dps5{FPz#D$W*r2JUi;4_Sz)ieApoyS;gxoS#ZX z&%|R3UIir5KyS)fbG*Ht&u|U5ze{sFS*N-Ei<>za?)lkMBy7JE;kOOA0Ta=OM|>+c znmku7eT;=e<33#M z9_T_B>ACjdyD?jeR4FGD<&XE_zst#vy*0SJ3~+95l=J54wPl$iEwt{! zD1st>f^RuEkOF1r<2XfMI4mLF8s9>V6E|t*ryX(lM#U&Er|6YgxqVX3KmPukv{?15 z#6}+dAPe4oHjaPjqxyvq;}L7JYIF*W!$O@bb3kXfr-U>_&xAKv@xad~OkUUKFWtEm zmJ0pIdFc#~E~@_8!nw_pt~HSs-S<*tw!wp!ikrhcW6#dq=Y&*5V)5Lm?*)-Fzo(}6 zN^|y9*f{o`ukDAdnf5Xz%JvH2xOtYE-lDCU_AfrSAp6s^LyxQA6*U@_!_|0P<{>I~ zEowB18jXi!CPvY+JAW{ag7y9V{iW=#sN&R|J1jFYp31{BC*x@=HeZ>Q@ieSuUdB?f zmL1V8fMsUp^tA2E+>D}bEweKY_gdy>9Cp2Xc2{QTK2WqPb95hQTV|H-169gA-3MCg z%+!6Lr_Eg52bxM|YZNv6Ghd^q*q0d_Ps2QOHVXftnYB^$+?N>n@XXvO&g{?JjiO?I zW^WV~`!avyXgEAGIF6?MnZwi2;hDuzxc6rs$5C-iW^xo&hi5LwQKW8G`!k!P=vihy zM`7NQ8I8|~D%Ys7)YV1vK^Qdds=Ld^+t%e}fVSbCil87H!F<<&^O!Ra%xWh{T0lUsJ`zANiTekofkLR*fUYRcbHW7uWPwI=eJUH4&=6(IhPs|>PLAc zQu-T@{U5fLF2UtAT=G-s5dSZx(|Ig_{!Jcbe#)#jbirBZ$>)o*C}pMvd-;FZ9rV2X zt2}-b{O#<^&5VBeI5$mK?;1aE$L3S-@yKQg=2uqP6oJQIm8V(?AKDLbi(ku+xUxKE zc3gA3M;$@Yevg|?L8M2{ruA5lolVc79zC0mEgpXaP5xr}mWW_9b#W2HXv&X^C`MCP zq6)=U#p9R%w=d!tO>v=bUqq5k)u9nfJ{@gDla2A1h^Ng2IIZ5T5@9Jz>hkP5kUx%`7$6R7ZVw+2IK4xx*&ZRvYXRQs{xR%2tk3;%RBA=h_=h@uI<-DQ@&E#r1SzIzIvAl!X)I?K5~hK_EIMI={B~w44a_ za0csMI+^zMqng8tqp!RMur)qq7v^_vIDz%spWD~s|8MVYyY;Azyx;4i++P4z0-4Or z%M6eS>juIBCOPNTUf9MPgP+FRk9_)5zb^k#cdM^!x1_4JNwU0*Y_q#cD%G<}C3^V# zE2pYL;LO5r{XQDv)2-77yg2SmKOEqm`)^-p<-~x9j=l&Zw|e10dS=&h#7z4si-qKN zE+>XAm&|j7zu}ldRq3^n8FEjaTqqrFputE77G_s&-P3J_$a7@9Xk4lZ_%F1B+gUX= z71tA8e>A;($Nolrhx2hiHRc{buWY`YG}_25`P!cL&ZWiP6}!og?vVhw{u#Qe|&BRdpR+C9&5gei+|6pe2sCNle5=d zb9y$$7D<@v-zTPb>xiQTns!2C9WmgAd3JbQhu|sCYgy|mEb8^}rQVo+9E*LlBC1yD zje{m+9nj1BOs@U*B6s5}ExuQ&aE-NB*eLHIgx6C#3is&&w zN21+Z*1X43a9!ygKl^zOe8jsIHR$DY$GEt3MD@sl(;)g5k%#_e;`Gtz{yfGA-&fF? zG1_Ij!)kPtDXftzsBOh4$MN=0dB|*meq*MSh<08eP)01x3aLQSXF$Am|fSK2%Ia>N1n5p z{=M+d_t;}N$U9)Ec3=c5@`GpNbie|S>@%{dzA8Eu=${?1UK;0k<&fv_BhH#eSLvrl z@XB-BQ|e~=*XTJVyg4LLj@J&ctgY zb)r5qAgVU&nf=M%pc&`hxfjrTw(-i=`C!sV5wC-cugPzS7MU#fzWaBy_A`2^)nfUx zGex)g5?Nl`?Y>4HX1A%AeUU8ewXc$esrWKk7}mZ{9-dZTC~GF$zEUPO{J?ErDi7NlfYoyI-BcFEac9qw7qMX&Pi`C7R-jiti% z7mLySh5@~6$pdohlN&4iAMPFa>zX%~Ip~i|N=~qx{ER@mv0TApqkzZyr5bburhDAa zjk3h8x^+Qa;8+jcJ4Qobj=dyKluoBzpHKff@wmy$MO0J+>yiB$$sr0xWW{iSNrls^ ziI0(*k*>iGle@5FG|85eGeKe`hhuwEc$k>)^?5p8j6h$nDzg`Fe)Z zZ$8}}q((VC@6CoV$IHDj`yrFhhMQhYyfu2m3v2ko;GxC+om%dAjqLdg(-+)TbQ@2G zAJ35U$T7LE;m$pyetOP(XU}KN$@G{eXK&c25k2C%*YABlvH6tSw*7kqOfu)#82-!I z*x2rrY`p9s)brE3(W-v31tSv8_udPIOmpeMR(E_FxhYBKK6v~Wqve?8SVLM?4fXOl zbu7QU_0jq{$M2>`$X$`N{@JMe^@v_^@7Oa^Zav~SckOw*Z16_I*o;T`^ruNK>4-oF z_qD$>GB05HeRe!p`gry4?0R1Pkho*S$=9=?`IMVVcZc@wkwrzkN%7cz5iO4FWA`n@q$g2LOwV9DMNMBu_m#4eUQN@zZf? zP_8FEfrmxsp8nm@+syPc80&~Kf9He!^-RMu0veeoH%y8puI)iTY&eG)m0O?AMdYAg zO#8;F$V`mrC}4WU2AK)=a%`>+tM0@y0+ra`HByWZx2%~UvWU>95#AiVM(72^+cm5i zy*yGKqG}26i$k>QaZ2f^)xR&)Jg9cBWbZi3Vz2nWW3YkZH_t~PwP-ZudbO%r(psP? z=d1BN)P6eM4m(C-gaTbm7FzR&lDlq>M}9n z7ihh4bv=UE!$E~Fy-%1^mY3gpplK!=uTEc@oKZz_&;In~=})KMpMGb*#`o z`116Paol6`x7}9ctYeKOV=Z6XI`Z~neY(~gk7#!_j__h!C7WiWQD=P`&vSd!d8ROH z6lRU<859mWat3QYF4FQYXYnp*u_bNC(O@l&M`7-f)_5tEOlZ91BSlk2I5nQfz4W27 z=dtv5wtOAz+uFxEc2+GuSoX|nA1xbWi|ojEQx44Ek8Xh=JEF1FbWk11$@qQC$u4x~3cI|E^ zUU)BiZDhg7{*rOvWaJ2(WU$F(QZprdal4hr2W!aQTUqya;#LrmnOaN9IEMySpAIeShhqo@$YFAnE8etBe? z;o)>v5v}3yNFLeljdPeFEoX?WCgVwd8*LLIJ#lw{#dq`bjB!fLp%n?owsMC#&N?me z(4`kiHHbUv_-K@>J<*Hepc1_-I}!H4)Cv{B zpNos14&})Oy=R76HN6_B=Inu?RcaJ0WYcR{b8K#nkF&#<0lKz^9IIJs_-jvlTwdom zo==vZ8rPT_55d;xoj&G$pWPPG^*O>KoyRkO_XXc_q~2UuG_F?_shx%6D34h96n$ny z@xrmLx5Yadtqm4zjqx@^kViD&T$|2i4!?odeN>^$jRB`f%n7OH?h>$#Cr+aXTj8a7 zHI1+9s5#{EfmS)f4}uYYE+NPo)mBhPxH`N<|FU#c+5-h@Ob>S_ju$S=9X@Ld-Pq+!QYg`3vuuB z-OZ!uSnQ#Dr|)VubMN)2IqdarkD5=hL+?&vE_ugC(cw5-CoQe9$HvL4=KSogWm9`# z$WPL!TQ8jOzBJFg=7-T7)DlxZe08up4ii=Byst^lmAk}r_Piuvu@rrG*ynFEEB{PY zIW_umQI@!Ut0tbe&3-s>H^}csWMD-XasNCwcUM-q^r517rNGaVi}N`A_Wu3pj3%Pm zTRNp+oLluMn$^}J3i62NJ~G1b8+lTi(@7TDAYN^L&42ciDCw zMWv#@>ib@`Z99HeU_5X8j-SucHXe`v-D#l0DeF0`ZR_z=ncLo@u-CTvD9k;!Tanvq zlcpCTaogUELX^i+(>n@s4?X4o9p?+}rT0->SntPCn0xjsexH;_(O9Zg&ez&UQ%JZq zUarN@#=fl8&&TGq{Kw$ARO^4#3|j9395t^?nWp3BfPb>@F2PZA+P-UW6!x{37T5V# zwoh6|BP+vKUt?ZZgC0eJOs`OD)~L0wYcu(G?m8T`=574jcOl-@oOD;>UEx=4{1NzD z?pox}r}ZvI_Kfgpx9)0W<6Yk6$i{ZtU5|YHes|~?Jp1p8(JSSyK@P1Cqp2?gOvvJj&dIZKLCud`9 z;dE(A@8eE)V)P8!rUi;*Oyix?N7FqP<=)2Yj{IgmJrn2&DIaoQWT0fq1Eb&dDBak+0R8KK$EAZ{e~AEQaS8sm6L^Q2ib$K9KL}_?rAR zOgc00LImBL+}rf($UF8gUcpS2-#E7V<%4Da?1(#qzUSZd>icmck?6mcG(d>mgbL5s*q;- zkudwY6&v+_pakAujAp=R^lsM98xhr~es@BzqennocCe4U3rYq|P~C~5jAxM%!%UG&jm`t>7=$dA$FA6b`xV=x_s=O~SL zl)gJk+a0Crj?#3odT!k>ebgJ5FFK-yRqUh>ML^P}hHaL#K#3jelOe-!ph zmKG2B{g?+)!TT|j_lG7}^7n%#Sm5_#CfMlrV zt0(G>k9HN^YOL#EqUj+_8ZXz$WMkj1qshnGQfG6-Y;Lc^$(}`9olf>__SEs@&%vvp zw(ESdvG1z`%AUo!I-zX5d+Laen8Ud`qkP;yTiw(>t3jbYZdr$vjr&raQa0{o9aBEG zy3Q#dCzU|+I;d=n=j)`hF>lpT9f9+FomDpWtval1oOPX6HpUj;S@m1nuj2 zpTUfu7gJ{)^QOzCJ{IHiEYMXQ9#_R4Q!VPJQp;{Bqi2&k>3TR-qDR%WI#g^r)#hG7 zSo|#hKE7YIG>+1uM_P`ERGoXWbxo1eY)nPcR8u;y>pu>AE$7*|kJ(9=zNMZxVrA5* zF7?PUSTF0D>^anWC>vvoo|0F9zt7#(@2CFWEfC}nXr%83-DLd^>>TR2PG8P)9A3RQ z|EMWt5-m?!DZ{xeoBp z$}H*Z!aJj}H?iEll@0>WO}E{chPw%wJ6!{wP4j+iaOq6u{b{f@x?QOqc@}_i-dX7C z^rPW;Xx7xE?X=%P#hF-F*b^ zHr<{*qszKKAA7M4*|;v*j%?ifZAB>_1u%M8@qg%tOdi<1>9|T*%|m19d;Wvrq8|-@UDRF( zi$+%SX7V7_gHMq|CQdp}%pm5|M2}XJxJl z&*V}1dOSy4WRl)S*)(*V4{gt0P12@FKaW4=ULNs3HE?L0yOUgG%Db$p-o2uWRyNm2 zqjhb-)E|v?*)cB{`(vxsc`)UvGP##tN5m%HO>5oTEDy8y!!MckT-(Sx z=r@iRK))np^!rq4@qXf^9~Qm;@kX-kNR4;i8!B%8?& zVLW+zs*|-Z_`zrq=;jgHagy!0U;OG3`wSi=M7ES{OXo9dA)JAdYK;u6<-$1*wbk@2Cw{d`2$0-h&-$4LUZ zjGD_#2?rlO9=At4n^<&cc2*@RxWd$%XfWhPaqFBJj-is)Qvd*Zr4d)^G<$+LF1kiUsZ~t7r!@)@TTiskGu9YIXBMHN4lpge+R##Ka{`gt@{FA z$zxNlR{cb*;T#o|@7bKk2M5;5dF zy6c4lo27M8hQ$;5G9?_^z*~9w=0q;WjAfkhQh~!&*ozOInj`Mi5e|GcuvezT2 zWwaMkiyUQEszQ|~Z$0SDNO)UvvozdZ$ z`EcgZKeM$ZEjgo+n7`(S-?k-NazPevZ*xf&wk)rzJ%iviq;YU!)?4V;zqj5$^RZdJs9m6j?77(~c9fI(F{+V4hGXEtkqFu=WS`lb zR2RF{OZ{Ajwxjw1PV*C(C`!&HWMqetvOKAi&p9%~weIfSjXk1AYL zS$ZK^SoV4;dH6XwF8XNc#pGdKdO647nR`Kb80TKn5m=5{)s~l)x4N|#mWhcfg>zn7 z9^Tu$xID~DFE10%?OtFec04SyS9v(rUSt-gJyu=b<@|2Yp6}$lX!M#aeAL(GqxSpQ zrFvpMqkeeIhe~YxHzc+n5_wIcbN1gl*!ut_m$7^ zq3+wtd-zH*YOlUll`#DwesF%)_dS-E*j%kDaa$Ui|1j@ z>Uds0hq;G7@pL(!9&3y(bdWUgx)l+TcV20Mpxw|I&;LEM`DfqPxb?5OOTJE&g<8Wu z?+fK&d_Qw9US;!Ss_4R_y0&}b&%10Cb=?mvOR=_jp0{{n|L#@PBbgB0+i^(rS0f)n z1@y8Wa|B16ZfVged9u{9FIn8*vTs>)sP{Ds)1G~oP2@fR9RS2>S&y`R}DZQ1X*eO~Wx ztvjODy~ea>z2oa#W8Lzr$9@&1UM z&c#-9uJL3;=Y|7!`{=wqza26vFY9%zdwG{T3-SApuEp!{cYWgFJ9J`1f3={T(q`Z) zH+G`;B-#qD_ncm9R1x_5s~J*Aq^>R%F|Rk&Ygp#Q;&+%eY<2L6-fe$DmW~@wru#?n z@bUM(n;?pyV{mi3pkyy=4$0w1b@hqgG2%OV4kNLCLxa;_ zth47pBl>vP)WmP{&fVk(VmiKWVWC_1GlQRH8~a-|Wx!FL*8)bW~l2zk)}$B?k+64v$Zf zrz|XZdG7CJV7QbaQntBB^9QGojF$glzkg@HiJY)8AJ|A|x+l9b>5?<|zr9DrQ~TDv zSvd0fbO-0s%+?vj&re^SzCC?+`qSwP`}NuBo2Hq3ar*r9iG2l(&zipc(taMSpW1K6 zmN|cQ`m258m|MR4#&CXO-+p6QTfSo#(E-REu{C$Z+A00_(_ak#XGY}~+?Rf~<}&VF zw$<+v1#=4Sr}y*M4cbpLfaP&j-%T)|eLG(Nj;sXlickZ^dy_Zj{c_UW4{zBe+C2?w zMUb@C^ApW+E5nP=T=HfmU)PUCz28S`GBoSuDQvOs{WC00`-x%gcMwey&p_RV%sY@$_OIl18N!md6dH{<&ta@7n1yK%g$tMA zP4D?a>(Ut6;h8LYKX1Ahxz6ge%BI$JyJhO1sq9fd1my?&AINdnBl3ZvFdI!}6C=JOkNFX+e$KJ3;K(g*Z#9S;&%V+}__P8+Q)teE*Nb zHRq3PtTq2+V{GBCIKC1eX{`N=sC5g2M&ROqWWnF|PqMJ>@n5oV7yl*;L-BvIFf9F} zEG$d^DGSTCf0c!8%m2#4HTTc*aIF2eEKGSiUOGuk`)pZ9~(;Gx1U6SYg-%Ej->v|!za7doVP10yS|K1;~S1;_vp!0UD4H=w~{)zPt_RfCK4$0o-X|=sH%!^<4&R}a7 zv!tiqikcd=(|=cVy=e+_i5H(zM^xu;#Ria{iuCe!;O{c zE$A^a{f$Y+y_NM}^KR@~_F{}%pI#@&d-^ZaFx0qfl9vm3~ zj|Tt8aC>b7w9oVE*0WgqBcIxgsM7Ze_tHz+|E0(0m-npkwi&pe8R_$^F|K(TyS}WI z&BXq#aTeXe{GIqX&u+fTd1WX&>bIWf{~D(~J-WUzZbp*eY4Y$fk~)Vyx7V@S=YNdj zJ*xTS^!4d~LjCPE?73t|a@icu|4g?Zfc_#R3N5e#o=8`L))}_<9`$7uET^ z>h9+T?X`G&A#Z0Nqt<~0a}NXmin9=%wRjF|{_!;NjwB5aWBMlYi-fD9x#HosYoYlxXjR)5oX(d>a$p4Z&BU@!KQ+ zXa1?#!y!dWuvz?hZiVpkq>${sx$!r`qhpo&yBiS>k za~;_j_gER_KJa~Y&Z}N4C4asLl5zqXpG6J*Pfpfr1{*&Ak}+-p6Xd$%Qtzx zKuzN;o~rjC3sb#ES(x_hVUZ6%>7X~@m2orizTV~A14q29(Y-hC0N5`X?vs(}G?e&l^?T<&8Qr*7?YE-vrG=Cy{G_#kT*@0S)B*Wlt8jqnX;G~PRI ze(K;S0V7d^aA=H<6a`nKb7%a#rIFODY~68il=n1$jl!qCW?lie>`0h}bDg`ctFY$h z!&$SK-#umZUdNQTv*$Y=X07m2N5p8H^Nxwpu;(2WqhUXuya`1CYtFoSTcatj!>c0W zd1T&^F?xlngjsfMjK&gg!=$M+Uh+KUxt2WtOY`ZtImJDKOJB-I)%e5I@7VQ$cDprh zqDQP=6yn>qx`CgjSn_)6?0nDS)V4}4P6_Xv1cScNeGr}+-L6l+KmE~S*hRc)o_Sx{O+Sy8+W2RAN71{N((j#f#!`_* z-OLDOu8hYyDdkmVAgjLJqcf)$XCAE-ADxx2zVv8s#lJ^aNZZ_-Y=^(ELH~+9-%8JD z^e)4kt#e}1POk#Tb6WUwBTyb#Wg{{>27^p1{tho4^wq1Cb&;uiIXEM_IX@X%6Ys;2 z1))d&N2V8lKLCxFfkwJ>{$O)?X6xbokdgil>@;^@#K--6&MPL*j4r=3x_mr9nfs2-%UCMVB_wC;^*xDmU{_b&$4#JLh)b8j>unfFr*f8~dlEy)$)vyJEx zU_P8IN&lX?7N8dA--oF5(s=Um00iDnzdvjhd1-a*rH7IDN_&k)!%ui(yBq~id6E8| zerfZ$JFr8q-mdDI?UUCm_Rq!7>HG{2+e`(H`)VUFkq5jpr#$%_f9KNe%LwGr2vwZX z5^XACC1UXN7x!aZU{;jdIhe3(R3=m7c?ynZ zXN6ItFlwwRoQ<(%2jVEN^=e#aTcTDi_!_wzNZ!J;F?m=@wk8utoz2O^RLIFt>_ z#8798val(elm&mCZOXz^XQT2kVf{+BDi6>3Y*rr5dA2JPOPvkN!#2;BWn!6U)3Wf) zvu#;8mf5&0EbAQG___Ewe`Sryvw3;z+RpZ6;j6QOnV6I<%)+5;Vjg^D8?!LXbBB32 z&SfjJFxJ`3JWQ9fomqHGHZ%`Ioh{A7RcBMPa9zr_X5myeHWNPC+C2EnY;GPNsw}p% zy_qnAh3(EKGaypUTp6KDg)SC1n~*l+>StFA^F7!)pWe{!Kqb zf33{BeO=p`!wWlg;8aXEOGZwuc>$=Lu>p17snE}AudF_T6IQALM&BQvW8+Ta$oYSr z$L~EUY2mlL{QR7Qvm~+5Khq)5Mt5|Fj3Jra_KyU^6-a*qTZGaE@*9=7+h-v=nX1H!pB=rapBje#`IQ?!BQPRr*4~Ymt7r_%-z?Yn=Ji2CP?$X5uAaeJ*bV1 zn`?wAPLy*QSB@maGU9Y&63wqh?OGv zfqSPB97^5!NY%L3=y2ehvAZaOSKG638H&{zae7;8?$Nw)*$T}-4Co^Bm^O zIzJoxIh&A=S2iOX)9p4Tdj{J!CmZV}o0N@v%VuTc+_Gu;IBT1ikJD{pHXhl`WAGH4 znvHME=4Rtu+T?s}S^3O!Hmt>_D<<=-E;+pAjh_}+nvcfuo92SF+_B#LK!LX2kx_^} zRLb|F3jOnWe4dra$F|X}YD~TfKh9660_lx~0BG(a_xt26UX0Qdb&d-iVQZ>x6 ztmyf$u5@?oEcxSra{x{h zBJ#cu?c{*$`BVEf?hpJcCXmKtlr~+D^Pl0*Tl|qN8{(gAJWKw{#A@6pZk1I*p>@~xZ*=WF`nuV#ZC(XiizTPwo%s=ZsX8n-|w+eKhmFb!?#n7KfQ6OhvxN>-l+0k=-jAvrEBILv%Pep`*~ny zYfq0AlC^wfGp6?0>vLZjo@ax2n5oxVFzd1X;jNIUy=JAnG||7iR9@3x%sZlvz_~Gs ztCAU7(K&o16OB9;h?-~r3?t_zSmU$eRsr0_TS5Gpsvg}UeQy8Szg}bv%~W&%dpvSm zjom13-2jb#3EYGLi)+x4`OZ(BdbL%Zqv(aS$9gD-mzR5(TbC@~V$1qu;W}rX^6=H# zHV@a9b<4umYW?!&v1J{zux(k-EL_F9W?_*1koC>OGqwFAGturrnkwrdYZNinm zs@uJTto=LhJ>+5B<6Y!opL-v9Sl-9jx325&i-fy83DPKV54j^?2S zOCvpjqIJp~cCHpEm?E`7ff}t91bIZ7<8q*yjmGu4Uac>p;x%E^SYw{+ljE>%>zm{7 z*ZSx<+(lmy>UrvH5E}#)8zV&9@vi&OD2;g)7Ta)VT63#1>8>`gz~bxHFvdUe;|24Cqmz`0d~Ox3o*@#C^_^JyDL3bMRJroDiO#*p z>UpW)qelQWedPanK?OU?ZThiu3wDFMIQUil+3G8+hh6sF9Xqd6*TRt*;r-LC_(!AG zT2p&(+=p+eH#XOrcSdGC*EGG^{(rQZ#oMGRPgmV0b$OWQQkjQkOKS756{*g{(I)ko z^H^JyOkZuTHS*T`#k z{Pc-^1&q&{zWmaD9;~0*Z^o86e`DW%a{8;y_0y*ButnTW<2Dr1NgdyFyW8@-3CiD3 ze=+=@8MRvGeCcPG`;9E$+qsL{P0zIS+$K_vhT2>zxxn4-FMH1WuGOwRvtQ`!o_k3C zJyn)J+V{_GukPBM^p08ktcdZr*GEj&{xuO;&;2&d{r863Z%^@(r*vWaJOpoy7J@>& z`fTFe95zt9+8LTanbW%`zP%ARxmV@ z^L=BUf#0far<&lf1K6>fxBta1dd%1l^ZE21B+@We<-WZyJ=ALXW;}-=+ODIm^`4E! z;^**O+S4qY>pjoHQ}aX?rae3(iI(=?^Nn^*Rq#fUVEZSdzWReY3QqKRDR>W$?w(a&vp`ujRjB3 z45beRt>s&-j#!7YIrbcup8hlfS@hHB zHBUOnoy!96sU3rfUJa3$T40Gc=61W~d+nS?JRtfcde@nLyDxY*rVDhg^P!#pKeFt? z$o#h8eP}cOWw0B}Hr#82J2KmzZzVg_nmp^mF4@qCF6PeKYtvk0IePa%@HDopPJD{? za(?hLs4<<4y}y6@kDzt6tVg?qjl))w)1uy*)61)-T@y7mp3jtK<$$*`skyk$XIiuH z-j<2Y!%JPtvRY?urSkCZ&E#gy;9RCR3tycH&cd`mQ=C16bD899yyr5_*?70&yDXgN zGSzwbO10Jz8iJU}bB12ubp+0HneZ%{mKw67R=3QgXRYgeracSe`K);s#`D?eER5%} z)mixVX0!9~&ujLMkdS9qiMPy#XRWO*Tb?zG%h~T-tVi4Eb!2x0KPih{XSMTa%gdqI zO~3D>8IThs4&t&d7hh&3@h;iU;NM_T*s3b1%xZXjBGdT*d@Zi8xDhwh_A)BF5=wRlX?v5PFc#!HbzIgREdQq?0Loc?I`>EG=4AMAHu zuLspV^!0ks*F7B@^lfX$27Ncyu|ZJUIyUI5OM9iCQPfA50&Ymttq$$8=UefF#{OO! zf^Z3I;jaHRE_$?UhFazF^uN7&PVr{{myf6FA8y-H``h!Tv_~TCGa!mpt&ncfe`q`W zV-p4GTXxem?`z9W_uTU3UNMz;J=NXg)P6R}Lr*^vEO$=_lILC2eSUIj`0(`mru~!; z&_msIU#QlqR&H#2wwDf}ecdKYJ#4!~@GaS7GJII79tef*?W@<~Bv&^AAHHo9HtxxWjJ+0a)v-VzkAS^Sgtq?9*AXyVxBF_&}FZ{st`kpTZLE|0U zH@cqnNTFbCXDM6PEod&8zCdPcC}7poN#^L4qB8aW$KB=@^ZaL?d~c8n>k< z3)@<{vhZw4Uly*lbY|h1OK%pATDr3^otOSBjJ0;i!n8+wjB5=^u*P+aH@Z5()mUP{ zEXcJd7mZ0VWHj6o9Y#Zzha3%e9$iMm&fmupokq`H(Q7nZk8Y!(7A+c|4_-&P(R1I5 zg5z;08j7b+9Yx2_UQuqw>;=0-;d%4liqbQ&Eu;8&Oqp^Q@2ALuYXpPjf8r{z6Gqs34*?qyGOYx7B27PUFqJwn*%4)0DDvO%c z9)8r%jz`&oBweNP9H)-@8t3ut;p8mr=Q%wK<5Ci`u&gB|3(uA$W#OtNEeq2gNt9(E zvq)^+X165``kIrvwI9&0ljdH0uQsZw-3w_CHARm+y>^+>S)4TAi7XuTPGw=*vy+mG zvhqD&Y5z6enQlJPA*5dl@HWuz$d%UvjGR>hjb6ND=*C{H&>Fozm3wmjZo-HkGQWS1 zK4P9@r>azn14%D+J)Not_CVD-Uiz~zUeXR(IBV^Zg{f$lEDW{w$--1?rz}i+Ed98L zr$|+!a$uvY5jSEO{Xb-sfCreK+bSThquY3gq#*FC{hx9??a=h7(ma#&Mt^_RwBF2(v*( z+x>N6*C>xj<2G-Odb+}B&t#fm&;#)&BXBkq3fz&hLlgxucQqnaN1p z$ay_?9{T4{>y2JMYLW4OEY0p(B}?C~wN56UrB=$sB(0T&8Hmt(k>KS~U;8v~CuLqLs5S)LJ_W(;lrpuFb{a8n=DF(KQOX#w{#5dx*cKyL~nj zy{6^aO$PMk*-kVD(SLpR6O9L}8f)#uRT^(U|A}7H`SYQ8OiQn%K0k`bnBO7eeW*!X zo=1N$)hE!=_+$J1uWCeOYwaEB?;2YyR(zcTl~&vU*)t1i3XPY(QAXvJooDcR)jh+} z)>HSn#fpAonzP!Qw{?rH(OwM&?i_*QtLcg7=X3eR^f~o%?Q`KZJGxOnF`9B#ztl55 zC$h^uYM)W<-L=y95+_Mc;=Q){eiROiwgfd6I4~1S;m}Mhg@dE9+|J?A7}pXI4ZoI< zXbhJmXj>P1O(FG!et+Pf&03H3w2@ZoyMgeFY4E33kAGt}j&t%$yC+N<_x!AMWxT(z z2fO?OjhDVDyoo*h`Gi(^(bykmJ5p`6LHEs~KC?VK-NByO&h&Lh(Mf8%73BsNwI4nC zb=KNjGtWx>l|3@^Jts3xtCz;peRrj~C)ihcsV}|XN;}Oxdg<|*UcBXyE88)D)vxDN zL+6b>Gk9e)pzC-0sy%LmcK!}c565{$=CThYmjjZ(7?UleyQEt{Z_uKMl0y$cnZ&xt~7c(kKeGNRvDjup|9* zZoX=gOL-d_m-0F4Z)z6D%OZ}6D zWv-vHaBSOb(X{nqT;OIdOWvPM7(BG?^3Hz zDBA4V1^ujcgLmJE{^-`!^1a~5D%AUetsj9%uGjelpMjm~z3ZVlet!Do^w-l@W_L?3 z;%}`N@rM=zkF6eFr1k@D8eKPUk+B~Uu+TSe-bxA{VG0_vwh}`e!w_8oXQKd4_&?HHRKy1M_s$Hcw_r-8Ro=VJkeHg`wv8 zTujJ#k%=r^=VT@eUoBHvm|CIr_#Hme@?(z)&TeVg(K&VqH)bRy|ZcA;XE(3@JyDhZShnVt~JkP z;#u-!7N$ARX5pxLIt$Yto>y+*mBrp(;XpLsm$h2+L$9pH>7bg{y>r?_OKGh<{jsKS z7U$GEkcFw$c5r;>`5& zYSW_Os{$>fqT8$DRNEFk^HTd357{dMm$h;6So~fr`HpDVrM4~_@>cC#JPvHcvNkUs z$NO;m?3Jx^UN!u|bnlFJV*g;j|Jf?q`)bi!Y;Sx$dH($~H2=qTFLBR0b+^* z9#cE}q5XbszgzaERGR|bexan~qI#_5jj8g(f5 zy7s=zX<#xok4B-C>J;!EGp2jV!f8WXdhnS z9b4V2VXg7nBxJ4yTYLlhVap!hn4O_h;QGz zFQi7J=O&`K{xWRpjru7fK7r5YcV)ku=psAZvwP6<`L9u;*YWhv^S>u(-`YL_NwfaS z{$8|J&n(x`ELDzZ!k+#m+OMysx&F=OM$F^!Rm(SHO_7w@Bv9zdxi)3uhD@4@@rSIg}n7Q^Q5lADds$FWogv?*Q<#NekK}0rw0p-6)i^YnhdN9F0BSj@MQQyGB(HXq>Nb^ej?1Gzy2t77A!jxqH=i z4k7Fs9fI3^_+lg~Si+RV;`8)tEWvD{j=ss~k1gtD^NHhJQa2lC3-!gDy#C;kcV9k8 za&h1I=ZQr*k0<+oXY%S=<|A$pyc>hy-Vm|bIJ}m*h&IZo_fbYGqQ^#ryi&3M*t9rw z*12uVsx%*sJ>TZbs;HGMD{E?I<7=U|_z&$d&j_`^&@OBAjy2o}!+OwR>w(4XRQ4$= zKLQ<(<>}j83Xvi{40ymo%sy^eJp1&W@&3;fY(?kFB97)yMJLahkuxm0A+JUn%LNEN z`6jXs(K|qdgzNyHK8M7ndL1Z&{ts=|-n{|}kpYbHLL6?%>Wp)W--aO%SN1KMONjoj zCyGD`PA0w&;Bi4+O8q!Rn4QytQxjw|!XB-)lgs=Iv)d8;2K48XtkXx(D-Q2iUlNb< z*0>{R1U^X&?>;Ygfs?-&4d5%whOZuyR7hk~&2My*xuPofh3TF+4ntEaFLq@*&d-1& zVin(qJWbzYPmAT}-qVAw6IxjsjpeP}E8ClAW+BFeyrc5;A+&XsmLZ; z7VR~9jwRR{UmDNOPl@1&eZ%w+vq*Z@qfA^bPS$Hqq?8?IVm96l(-Kt!k>0Q40 zl{J_BH^Z`KaOvh)7S8p}vMfCK2>WlI<<5e+l&rXJre)#We{(Hs2HQ8=vapp{=+e!% zcY*!f&A21xa_;8bF}S&#Qs1mQ0ylo@^5$JWR$1y#4Y%J+&c|79z`Yxs+c*1;T=)Fu z-*I@$&A{Vuu5S+JWA@BVxuKVj@zTx1eC*5o-;wKXySbQ43EdvSTOkLZg>6qpNtsU0 z60z&ljkevAQ#AFp)q4NkPGuh3nGD?*Slx$qw_(pLlpz(YJ>O{NRG-{w!Xj+nTTSb0 zJ2mo|_MRAV^OYEjd#_`4oj~!PM&vsm4P7v-7yzZ~FGbgwDm6 z#CLmQzrX|i=&=|FTX|!>ED!;i7beBsy*%U5+4!vB-D}$&ZWK^2*}p!AG(ICV8Yf=; zGZBPQK64)X$f)Af?w?WN)|ffbtG4v0iGxYDRGluzce1QnKCnyZ3!D4M&6NTdTS1;E zjsnr}ydz|(ev9BChZ(s)Yo9J+mP=19a<I-brqk%)Jn3 zId0WkW|?<_yYyV;gQdumkSXF`I=cPaa%Z#I&v|2Ea82jkHDnPn#a(Q)on*L`D@*d zdh+qEqoRC_%cv(;VpTFZtQJ=HGIXWR4|V8YFWpGz3cb$ z8SMLL7uf=XuEYI%+_H<3|G!Nfs&_WNwY7L=wFQa*0pGC{s$ z()!S3HX;Kom@f_5O+c1f@Omg0mwVJKq*FDX-;*dSbPn^@o@L`)?`bx^mOYQ#YwfMZ z%N8w)2EwNCvc=27S}bA~hVvFP3**}2ai@ICq8^2RZE>^kt}SvN9`8*oU*FtNgPMjxxQ+MhfLP zC^`0u?i@a=&k@G8ZjremQ+v6i^%XuP~b>TlE?t2Ndh`?1*T zxa~-~m-{wHubQ~;$d#73FbnIR*ke5AQ0%4A{Tr&5qU?bnYrM3sI{qc5eAE=fePL4X zZ(iezerORd@AY27s`yc(cTVwpRPEUWJS4g!diO5g@OxyZ11%M&ZRxedY{(@Ss1spMJBGtCcn}qS$NjkCJRrkjdC%$p2@=)j3zsI99kcX#lY>uD)dPjg;Mg35D zPaNxt|I-(>XwQHI3}P`VN=IhJyXt5Vp#2;|J@OoHrg>ctP^;Diy&S$r(nZ-kziO=o zvh>?~YVp?mtEL`WcU>3%umdwIZ zYtbxBd$g?lslS+2{n9c*d%lro*7(WkulDVgS!UuZGJ(8!PgP8NE>SS=8JxS5&w!`* zdJr0K77+cBqvYiDp7{^lVC1dmkq^%aj?If=KI3_ z`_QmGG5L+0`B%mzoV{esoI9O(WRbA(zR|ajjOy$qxmR=%KK?k(fiBs1ED=y&*&JWl ze7*XTh;V$Sdt^X%4*C$QkM)NZU5ZZJGY8o=NldL>v7pd{6E%-)`}$7Tkmni@{n|6% zJ>Q7V-p`;dJ0rS$Z&En#RPdqgS8H!u?WJSRazH+L^zqCLSmuZ|+JFmWDj-~8rRunPEHsA9s zd^JyGVcN6P(r55F?=th>J>N-+HDW>MCs4h7P9Bx&Vd5A+i5|%`Dyv#4i{BNH3%oPR`SsYuU(>_ifq9 z!nP$_S-94+mxbq?Y-ZtG%5D~xTDG$=71__iQ0s;)OnY?4mi26l31mN!U95}u6~OZx zu`xt}9$}4i!&gr}-f$Z^+x*(rqKJ~L24{#d_d>A?9=q$lNWTTFIqSD=b+$PwOIns3 zmxX1`ky&`oacmYoaj)Vva$fqkyf~}o_^kD{=u)kF&o|PZ8t<83&8<#9kM7q6L_z)n z2zezUMD!^ddPHyeDb05Vk^I-U28|rM_p0oltKTHTllah}-80?VL%9OwDj_b3q$1u- zspoIMO(0$opZ&@9f%^&c`|IC@0_{^fE&S})4k-~Zb^p899+kPl)!&=VjF{D*PXF(M zhsSB>CuOl+n?9Mm4OM~1b!`IfCBga)uoNDbi0B zXMQ^A*0uNh(oTK)yy*fr=5~bOeP)?^vQpRsYLv(63B;$Sv&kx9b;cq7_glVsHA%YH zCITD%7SpHgzwK7dcL6i~e6p&nnkZ#_w?Cic#cLp-XM_|gJB;of-ONgodNaFXDVH4~ci)##;N!|8_%C6IZ^zdk8PbF$i}#pKgh?4bkzBUBk*3%KV;8fo}b9arTj%crmg%& zKGtQPA{$?w$2bDxGS88X@%B7O{tP@%l8t98?~#pjnHR~%w$7X6<6CAy^3LGo<*YL~ z%e+hWde?cGe0(KulaH&;>ty3pRwWzHI_r{;uViKNaV@hpN8p=hb+Yk!*5?Q;m$E|H z_?0!v##1slS(r+GCksQJ|H;C%CqFcv7t*Z(jYLY`p@^*R=-LETBPR+a%j-3_@mRLH z_(iWIj?+XNjqw~N$QmzkmZ~L7&u*{XBSXFKbVLxS|KJqq5;mQntVez88S#5VFvMlO zdD?nEdLgw?LR1{Bgv4 z3%vM+J_A^K~qWi)Et!k}@_ zdW&|W{*lU8Dqx7`_I#}!*GT_^C6wQuHv4DXdEy1SNnUsEI{lizn*-nAFTU` zDjIsd7&1rRQ|!LYk6!vCm9!;$(>tG9_xD0-(aw1qjfeor&$3{b60uBUiVt-n=E{_KFY$ht*^4M6@8Y4q1JauIP-MM2=JOszjsl048tPy<*sXLA7NcYReIB@8)gqhMcJYi`>85erG9uER`DQS zO{Y=xp6sQs=I49u`f|)8oI{-RcFY{-5dHSS>&eR=hdzf)-e=w+Y8-l;M2NmUoE_(X zX%>p`HO^%wO2eMRHJ7PuTw5}ijkA`?BQVb8AkO1Fw}-58j&Ui|+3THSu4O(Od#w|) zF}COoS<_EVztWwj?+&0e&Zte6e$lMl>uFifngjKBjr7ZVYODOvMs$B9>`$hggx|IQ z>+CE$1K0D(7yNz7IwEKyjws!Hp4p7)P!a)Me4??OjK=M%U~4QmMtZIbn)p$F*C;Hp zJ1giK=U6gVGqTsx0r>bMaMqOej+y1{fJoS%SWovqP46y!efs}QHvO%LNDSn^u1$)( z^N;%f$6|0xkO`Ytz^MPF!Y3Dx>mN>-a`AX({6!KLvT3i(x^PZSM%iUHa@YTCxA*^Q zkmEV_zR~{4>7$fc1N)izEg2NyUdpr6|C-hM!G6g<%E07zQiSO{!$u|;3lP80_;@eO z^P%ShClpx{h{sPhJ?6!QwGLnZeMA!Esqh+aTetu9EM#3Z&R-4j2xw%Uf~pZNp;L>p zfqkC4psh!;-Rk08I*7*NC{`?3YmQq|Ejvc8shW+kh3eu+Nl44LqOZo1tHZKz;^TSFK3Tp5wZHF{pmM8& z_u7{;9a+67$XiQg?U4}wV7>u9`FQrYXIA-gS?@`?PoCCYW75XX*|XL->C?*AJbQ_XQ4)C$_ZF42aklJ29QD^!e?N5+=TyXCbbGup zS>m>6pLW@X^v|uYxbx{}kmM+Mg?HS0bC{B&adfZX ztwqnv=E>jV{a&i*kk!67>Xr~jbmUBm*`#Fp)bckF3dfi((A!_AoT%vK2(QHgbhO;o$GMAmMmajC{7gb~fmEJppIn#xCNQe}~P=qP#$#ui;i;P!kQr%klFwWOb0 zZIo=B!`niQI4Z1HKj-+U6|ZsB5|M>z4^5?aI5Y9refE5%UDQ~<9?>@i%5Pii#Q1M_ zCQCpy-4B*y_XaQ%5M}!t}KT_|#M#A*l)Zc{C;WlAZvDG)S!oHM9;&|Pk)}Wr9W7Gu)msII+1-nt>k*z4PFKOHf$$8 zH&~MFFWy4+cOt$&@%NZo9=hsN1^d|G-8Z^DGfLi@I(5Olk*oP1!$Ho=>y^Km_J~tG zdP)9ZE26?1y7K?WBc7s>;6CqbhOF?nap-h1qg#r14*>2sOnR?8G_K{%VD0--YmDR$ zBq=>MqF99w1TXWl>qB% zssnivqKgHac#(T!K!5ow#nAhu1~qOaNT}=-_?*+@;L`(;Of8f`@9@fYj7=21=JFFi z{~ig_%;NlCLDabRRd185D9=^+HEzvgTk?*a!CEqp!o5#&kDARn$vz7IdC5O&2HTq9 zDC|9&BVJ>yC3NnKpNm$aDYTx$xo9RE?^d*vjq~E%qosW8J<(MDtiJu#>pmZC z<Jt(Nol_U@_v3X5=*Jc- zcaAzO`>i!gGT=H^ShAP=OAkcNPNTGc%?+bER~WPw{axcZ%+4+E4m(|Ej>CSA+p=fS z!aH$XsJ+)%YtyK83xmc>>y>tWV;b?1S)5BhD{j_^rRJ=GP62&6dAIdMN_4y9Ot$Yl z@2=?wpZ|VMu2m?j)3*ZjBe!gk=$?~EzTzCK?_18V$(Q$>^&x#&!M=Gvwy*iN2lG?I z`>dO1ypo}J-h9o;Av1T{^{w9_aQ4L>^z0s<9w%D8@1MBLJun#5bNtdd3nY$LBRSD; zmz}Gb+@ZZbKK6okF;MaU?d7|G1 z!1#CoM$Naa9+y_sLd|gxOjs4~YFzKns1%;VGvB{#9PDovj(Y#IFzw-walQ~AXe`=o zbX|g|kxH0a`^95fYsY8|m-ayh;j zzx0#h7LA;8y_viYUNvp^7D^;dCZ;csMkWe4+>7x10lf0I_*z>cJmG5oG zp4&A8b_r@aPAn#qf^?sMi9_T&ukq%5+fK$K2giq?Ja1j96|Pew}PS3y(msVd55K+s)C0{ zMHMYFJ7O6GZ^X9}42|3c;$=Amyd{jP;ZuqJ>d2s z`+9%c(LTRLGy|4%Ew^8}5ll{Ggl^bPya>GwGX`tam()5n_RJ>EC(tb96KGsZeUyDT zhi|PFvhmHeMmDasR>{V<)Dzj*=6WO>*IbhGajhji8)J(mC>H+B6e)6xy5)Pxp~iKt zTH`cbOHpg?MeEeq!{zz*vg|^(mZ)QjY>X`wkE66a5sk~(E^5t! zt}&0g!k}>vRi)*Z-qD^fB`q42gPB9>kLBoVP6r>)h?AA}_d0nUGzV(TyuGW;Ym337 z;ZfyGMdzLg$&=!3KFpHl%Y6DSur9eGYX;}|JRjd}a*#EH zT28Vs?U5ttZ9hBa>``YJd%l*=)=2e1{~4a3Ec4gBTEsngztcg_!It}Dnu*4%Nk@9+ zB&Ql^(2)+;H4pTzvSpX@bmem2v-o+*7g<=g_$CY2Iljunw~leLFzw+>=^pPxNff{5 zE6JoruXz~p=9ZSOR}#lFx9>(PXcuBEfOcgmtGzT8v@Mw&g&9X}&9aAk<8-oSp{Pyc zawnqX{T#Nfoyx{p?_@T{mYt50RG+c zmG)dCnFxATaN^*9>d)V<>nM8dzyvshLZ^h5F66=)dVYW#8F%z%AT z)nHGb2JN}u^}8`UJN9y9Ef2j^+J@9u%hw_WdA5H`uClbnTJEy&%;ho*N0Hks3`@Dr z!&1wA7Ou5k$ilPM8(Da2ZIFd&kFJoNC1cN7z`lED(zP1vHTCMsJ#!Y;JpER$=Y80t zQlHmCB~i=0C{At1Dx&%Nxm8v1o(mPe-dhZd@L^|^LPYHKUt@QJ=%a9Vim_i$=k|}g zXY`ZF0p5eZ5Zv!GE>EpT;IQ}9$@S70d0c9^n7#A!L)~c{$+UPks*jsGWs0{o{&M<{ z)4vV)pq`zLp-inqe%m^6tv|9f;#wPI;aSUg7M`^Z$ih=gViu;kq-NoeBxk{|r85iD z9$P)`$tb_BG5SWR;A-@!44x}D@<%Bs*cv%q0nhTrel(WI`y-mU#`UfH=((@&NG^>GPtYVBdhgBt5x4K-*|mculygC zSJVr051qvqztvr>=N-MPm#Em!rILP)wWjayo+X$Xi3;CLchR|D#I16A0nwL(E_l4* zK(@J;Zo;AwJ@v+7P;kaU{mEtvM30i=;6e{R#dXl_&^gB@14m9@9YJsZ>c?qC*Khgm z{zRD{tgGBzH8bo6JnYkIJ{p# zC9q3Q4+P$BASc>R1I}Q0)!pwyQH=tp?30=$J@#OyO6r->{hr}?Wi$3}NqfGeul2Jj z13d4wfBSmTo=}TCtS$B zzf7?AyB3*szs0TftDQ7^^Y(25vccXd051g#=NSj@0J4uZC0lhK5_7zy`T75IIB4#% znXDn)HiH|x(M}hYr;~QwCwczOtDX9W89`qp=#KqYHDk9^+3$|xOLK;<^ID?omT&iK zr{{J@^`3F=KKS(d-1C!L`#&DxtT_iCaEq5m{j)WmHGhr5pHoLv!4AMBOFvGLki5gC z{-49qPs-yHT>6G7R$Ka$>o4?oeF+t$n?7Dhw0=^!^!N2R$Nk|+X9SmKy@sH_d-d`Y z^N7%5uc!awkJ3GJ3?47Fy#DpQmT`DD=8x0S)5o5+GP_{|^mCBT4`O literal 0 HcmV?d00001