Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

112 changes: 112 additions & 0 deletions experiments/filtervisualizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
import matplotlib as plt

from fastai.conv_learner import *


class SaveFeatures():
def __init__(self, module):
self.hook = module.register_forward_hook(self.hook_fn)

def hook_fn(self, module, _input, output):
self.features = output

def close(self):
self.hook.remove()


class FilterVisualizer():
def __init__(self,
model,
size=56,
upscaling_steps=12,
upscaling_factor=1.2,
cpu=False):
self.size, self.upscaling_steps, self.upscaling_factor = size, upscaling_steps, upscaling_factor
self.model = model
set_trainable(self.model, False)
self.cpu = cpu

def visualize(self,
layer,
conv_filter,
lr=0.1,
opt_steps=20,
print_losses=False,
layer_name_plot=None,
blur=None, ):
sz = self.size
img = (np.random.random((sz, sz, 3)) * 20 + 128.) / 255.
activations = SaveFeatures(layer) # register hook

for i in range(self.upscaling_steps
): # scale the image up upscaling_steps times

if i > self.upscaling_steps / 2:
opt_steps_ = int(opt_steps * 1.3)
else:
opt_steps_ = opt_steps
train_tfms, val_tfms = tfms_from_model(self.model, sz)
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is done in the get_transformed and most_activated methods as well. How about making these instance attributes?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh you are right. I overlooked that I'm afraid. I will change that!

Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks!

img_var = V(
val_tfms(img)[None],
requires_grad=True) # convert image to variable that requires grad
optimizer = torch.optim.Adam([img_var], lr=lr, weight_decay=1e-6)

for n in range(
opt_steps_): # optimize pixel values for opt_steps times
optimizer.zero_grad()
self.model(img_var)
loss = -activations.features[0, conv_filter].mean()

if print_losses and i % 3 == 0 and n % 5 == 0:
print(f'{i} - {n} - {float(loss)}')
loss.backward()
optimizer.step()

img = val_tfms.denorm(img_var.data.cpu().numpy()[0].transpose(
1, 2, 0))

self.output = img
sz = int(self.upscaling_factor * sz) # calculate new image sz
img = cv2.resize(img, (sz, sz),
interpolation=cv2.INTER_CUBIC) # scale image up

if blur:
img = cv2.blur(
img,
(blur,
blur)) # blur image to reduce high frequency patterns

if layer_name_plot:
self.save(layer_name_plot, conv_filter)
activations.close()

return np.clip(self.output, 0, 1)

def save(self, layer_name_plot, conv_filter):
plt.imsave(f'layer_{layer_name_plot}_conv_filter_{conv_filter}.png',
np.clip(self.output, 0, 1))

def get_transformed_img(self, img, sz):
train_tfms, val_tfms = tfms_from_model(self.model, sz)

if self.cpu:
return val_tfms.denorm(np.rollaxis(val_tfms(img)[None], 1, 4))[0]

return val_tfms.denorm(np.rollaxis(to_np(val_tfms(img)[None]), 1,
4))[0]

def most_activated(self, image, layer):
train_tfms, val_tfms = tfms_from_model(self.model, 224)
transformed = val_tfms(image)

activations = SaveFeatures(layer) # register hook
self.model(V(transformed)[None])

print(activations.features[0, 5].mean().data.cpu().numpy())
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why print especially activations.features[0, 5]? This was from the experiments folder, correct? :)

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep. Sorry for that. I will also change that!

Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No need to apologize, I'm happy you put effort into improving the code, thanks for that!

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The experiments folder contains additional notebooks provided by a reader of the blog post, so I would like to keep the FilterVisualizer separated from it. Could you please move the filtervisualizer.py out of the experiments folder? The plot_reconstructions.py can stay in the experiments folder as it is only used for the additional experiments...

I will move it to the root then!

Would you be willing to simplify the Calculate_mean_activation_per_filter_in_specific_layer_given_an_image.ipynb using the most_activated method? from FiterVisualizer`?

Yes! I was thinking thinking about it already but decided not to, because

  1. the notebook would be basically empty then (perhaps I can combine the two then?) and
  2. I was not sure whether to include the threshold in the method or not.
    IMHO it makes more sense to do the threshold outside of the method.

What do you think?

Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  • Yes, I agree to not include the threshold in the method.
  • I would prefer to keep two separate notebooks even though there is not that much in the second one because I would have to go through the blog post and remove any hyperlinks to the 2nd notebook ahah

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, that does make sense! I will apply the discussed changes and keep both notebooks separated then! 😃

mean_act = [
activations.features[0, i].mean().data.cpu().numpy()
for i in range(activations.features.shape[1])
]
activations.close()

return mean_act
51 changes: 51 additions & 0 deletions experiments/plot_reconstructions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
from math import ceil

import matplotlib.pyplot as plt


def plot_reconstructions_single_layer(imgs,
layer_name,
filters,
n_cols=3,
cell_size=4,
save_fig=False):
n_rows = ceil((len(imgs)) / n_cols)
fig, axes = plt.subplots(n_rows,
n_cols,
figsize=(cell_size * n_cols, cell_size * n_rows))

for i, ax in enumerate(axes.flat):
ax.grid(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title(f'fmap {filters[i]}')
ax.imshow(imgs[i])

fig.suptitle(f'{layer_name}', fontsize="x-large", y=1.0)
plt.tight_layout()
plt.subplots_adjust(top=0.88)
save_name = layer_name.lower().replace(' ', '_')

if save_fig:
plt.savefig(
f'{save_name}_fmaps_{"_".join([str(f) for f in filters])}.png')

plt.show()

return plt


def reconstructions_single_layer(FV,
layer,
filters,
opt_steps=20,
blur=5,
lr=1e-1,
print_losses=False):
return [
FV.visualize(layer=layer,
conv_filter=filters[i],
opt_steps=opt_steps,
blur=blur,
lr=lr,
print_losses=print_losses) for i in range(len(filters))]
Loading