|
| 1 | + |
| 2 | +"""# Measure invariance in a pretrained EfficientNet model |
| 3 | +#Using PyTorch, we will download a EfficientNet_b0 model pretrained on ImageNet and evaluate its invariance.""" |
| 4 | + |
| 5 | + |
| 6 | +# Commented out IPython magic to ensure Python compatibility. |
| 7 | +# %pip install tmeasures |
| 8 | +# %pip install tinyimagenet |
| 9 | +# %pip install scikit-learn |
| 10 | + |
| 11 | +# %load_ext autoreload |
| 12 | +# %autoreload 2 |
| 13 | +import torch |
| 14 | + |
| 15 | +from pathlib import Path |
| 16 | + |
| 17 | +results_path = Path("~/tm_example_pytorch/").expanduser() |
| 18 | +results_path.mkdir(parents=True, exist_ok=True) |
| 19 | + |
| 20 | +from torchvision import models |
| 21 | + |
| 22 | + |
| 23 | +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| 24 | +model = models.efficientnet_b0(weights = models.EfficientNet_B0_Weights.IMAGENET1K_V1) |
| 25 | +model = model.to(device) |
| 26 | + |
| 27 | +"""# Measure model's invariance |
| 28 | +To measure the model's invariance, we require three things: |
| 29 | +
|
| 30 | +A version of the dataset, without labels and reduced in size to reduce computation. |
| 31 | +A discrete and finite set of transformations |
| 32 | +The model itself, with access to intermediate values or activations |
| 33 | +Afterwards, we can create an Invariance Measure and compute it with these elements. |
| 34 | +
|
| 35 | +# 1. Dataset |
| 36 | +Since the invariance measure do not use the labels of the dataset, we will create a custom TinyImageNet dataset which does not return labels, only samples. |
| 37 | +
|
| 38 | +Also, since the invariance measure does not require large sample sizes, we will subsample the test set of mnist to obtain a reduced sample and reduce computation time. |
| 39 | +""" |
| 40 | + |
| 41 | +import tinyimagenet |
| 42 | +import torchvision |
| 43 | + |
| 44 | +class TinyImageNet(tinyimagenet.TinyImageNet): |
| 45 | + def __getitem__(self, index): |
| 46 | + x, y = super().__getitem__(index) |
| 47 | + return x |
| 48 | + |
| 49 | +normalize_transform = torchvision.transforms.Compose( |
| 50 | + [ |
| 51 | + torchvision.transforms.ToTensor(), |
| 52 | + torchvision.transforms.Normalize(TinyImageNet.mean,TinyImageNet.std), |
| 53 | + ]) |
| 54 | + |
| 55 | + |
| 56 | +dataset_nolabels = TinyImageNet(root="~/.datasets/tinyimagenet/",split="test", transform=normalize_transform) |
| 57 | + |
| 58 | + |
| 59 | +# Subsample |
| 60 | +N = 1000 |
| 61 | +import numpy as np |
| 62 | +from sklearn.model_selection import train_test_split |
| 63 | +from torch.utils.data import Subset |
| 64 | +indices, _ = train_test_split(np.arange(len(dataset_nolabels)), train_size=N, stratify=dataset_nolabels.targets,random_state=0) |
| 65 | +dataset_nolabels = Subset(dataset_nolabels, indices) |
| 66 | + |
| 67 | +"""# 2. Model |
| 68 | +PyTorch works with nn.Module objects that provide a forward method. The invariance measures, however, do not require just the result(s) of the forward method. Instead, we need the result of all the intermediate values or activations used to compute the final output(s). |
| 69 | +
|
| 70 | +While it would be possible to modify a nn.Module defined model to return all of its activations, this would be cumbersome and difficult to manage since when training/testing we would require the usual forward, and when computing the measure we would require the new forward. |
| 71 | +
|
| 72 | +Therefore, measures in tmeasures require a model that implements the ActivationsModule interface with just two methods: forward_activations and activation_names. While implementing these methods allow you to best decide which activations are selected and how they are used, they can be cumbersome to define and mantain, specially when actively modifying a model. |
| 73 | +
|
| 74 | +Luckily, the AutoActivationsModule can take an unmodified nn.Module and automatically implement these methods using [forward hooks](https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_forward_hook.html). |
| 75 | +""" |
| 76 | + |
| 77 | +import tmeasures as tm |
| 78 | + |
| 79 | + |
| 80 | +# Put the model in evaluation mode |
| 81 | +model.eval() |
| 82 | +print(model) |
| 83 | + |
| 84 | + |
| 85 | +# Create an ActivationsModule from the vanilla model |
| 86 | +def filter_stochastic(a): |
| 87 | + return not str(a).startswith("StochasticDepth") |
| 88 | + |
| 89 | +activations_module = tm.pytorch.AutoActivationsModule(model,filter=filter_stochastic) |
| 90 | + |
| 91 | + # a.training=False |
| 92 | +"""# Computing the measure |
| 93 | +Last step before computing the measure: we need to define a PyTorchMeasureOptions object to configure where and the measure will be computed. The batch_size and num_workers keywords are analogous to the ones used in PyTorch's DataLoader. |
| 94 | +
|
| 95 | +The data_device, model_device and measure_device indicate, respectively, where the transformations and data preprocessing is performed, where the activations of the model are computed, and finally where the actual measure is computed. In simple cases, these devices could all be the same. |
| 96 | +
|
| 97 | +Finally, we can eval the measure with the dataset, transformation, model and options, obtaining a PyTorchMeasureResult, which can be handily converted to a numpy version for easy visualization. |
| 98 | +""" |
| 99 | + |
| 100 | +import torchvision |
| 101 | +import math |
| 102 | + |
| 103 | +def brightness_transform(brightness_factor:float): |
| 104 | + #return lambda x: torchvision.transforms.functional.adjust_brightness(x,brightness_factor) |
| 105 | + return lambda x: x*brightness_factor |
| 106 | + |
| 107 | +transformations = [brightness_transform(factor) for factor in [0.25,0.5,0.75,1,1.25,1.50,1.75,2.0]] |
| 108 | + |
| 109 | +n=500 |
| 110 | +step = 50 |
| 111 | +transformed_images = [ t(dataset_nolabels[i]) for i in range(0,n,step) for t in transformations] |
| 112 | +grid = torchvision.utils.make_grid(transformed_images) |
| 113 | + |
| 114 | +import matplotlib.pyplot as plt |
| 115 | +grid_np = grid.permute(1,2,0).numpy() |
| 116 | +grid_np = grid_np * np.array(TinyImageNet.std)+np.array(TinyImageNet.mean) |
| 117 | +plt.figure(dpi=200) |
| 118 | +plt.imshow(grid_np) |
| 119 | + |
| 120 | +# Define options for computing the measure |
| 121 | +options = tm.pytorch.PyTorchMeasureOptions(batch_size=128, num_workers=0,model_device=device,measure_device=device,data_device="cpu") |
| 122 | + |
| 123 | +# Define the measure and evaluate it |
| 124 | +measure = tm.pytorch.NormalizedVarianceInvariance() |
| 125 | +measure_result:tm.pytorch.PyTorchMeasureResult = measure.eval(dataset_nolabels,transformations,activations_module,options) ## lista de varianzas de cada capa |
| 126 | +measure_result = measure_result.numpy() |
| 127 | + |
| 128 | +import matplotlib.pyplot as plt |
| 129 | +tm.visualization.plot_average_activations(measure_result) |
| 130 | +plt.show() |
| 131 | +tm.visualization.plot_heatmap(measure_result) |
| 132 | + |
0 commit comments