-
Notifications
You must be signed in to change notification settings - Fork 69
Unified, centralized and simplified code; added CPU option #6
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 3 commits
c889689
24c797d
1246860
12ebf75
84ea7cb
9b4b794
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,112 @@ | ||
| import matplotlib as plt | ||
|
|
||
| from fastai.conv_learner import * | ||
|
|
||
|
|
||
| class SaveFeatures(): | ||
| def __init__(self, module): | ||
| self.hook = module.register_forward_hook(self.hook_fn) | ||
|
|
||
| def hook_fn(self, module, _input, output): | ||
| self.features = output | ||
|
|
||
| def close(self): | ||
| self.hook.remove() | ||
|
|
||
|
|
||
| class FilterVisualizer(): | ||
| def __init__(self, | ||
| model, | ||
| size=56, | ||
| upscaling_steps=12, | ||
| upscaling_factor=1.2, | ||
| cpu=False): | ||
| self.size, self.upscaling_steps, self.upscaling_factor = size, upscaling_steps, upscaling_factor | ||
| self.model = model | ||
| set_trainable(self.model, False) | ||
| self.cpu = cpu | ||
|
|
||
| def visualize(self, | ||
| layer, | ||
| conv_filter, | ||
| lr=0.1, | ||
| opt_steps=20, | ||
| print_losses=False, | ||
| layer_name_plot=None, | ||
| blur=None, ): | ||
| sz = self.size | ||
| img = (np.random.random((sz, sz, 3)) * 20 + 128.) / 255. | ||
| activations = SaveFeatures(layer) # register hook | ||
|
|
||
| for i in range(self.upscaling_steps | ||
| ): # scale the image up upscaling_steps times | ||
|
|
||
| if i > self.upscaling_steps / 2: | ||
| opt_steps_ = int(opt_steps * 1.3) | ||
| else: | ||
| opt_steps_ = opt_steps | ||
| train_tfms, val_tfms = tfms_from_model(self.model, sz) | ||
| img_var = V( | ||
| val_tfms(img)[None], | ||
| requires_grad=True) # convert image to variable that requires grad | ||
| optimizer = torch.optim.Adam([img_var], lr=lr, weight_decay=1e-6) | ||
|
|
||
| for n in range( | ||
| opt_steps_): # optimize pixel values for opt_steps times | ||
| optimizer.zero_grad() | ||
| self.model(img_var) | ||
| loss = -activations.features[0, conv_filter].mean() | ||
|
|
||
| if print_losses and i % 3 == 0 and n % 5 == 0: | ||
| print(f'{i} - {n} - {float(loss)}') | ||
| loss.backward() | ||
| optimizer.step() | ||
|
|
||
| img = val_tfms.denorm(img_var.data.cpu().numpy()[0].transpose( | ||
| 1, 2, 0)) | ||
|
|
||
| self.output = img | ||
| sz = int(self.upscaling_factor * sz) # calculate new image sz | ||
| img = cv2.resize(img, (sz, sz), | ||
| interpolation=cv2.INTER_CUBIC) # scale image up | ||
|
|
||
| if blur: | ||
| img = cv2.blur( | ||
| img, | ||
| (blur, | ||
| blur)) # blur image to reduce high frequency patterns | ||
|
|
||
| if layer_name_plot: | ||
| self.save(layer_name_plot, conv_filter) | ||
| activations.close() | ||
|
|
||
| return np.clip(self.output, 0, 1) | ||
|
|
||
| def save(self, layer_name_plot, conv_filter): | ||
| plt.imsave(f'layer_{layer_name_plot}_conv_filter_{conv_filter}.png', | ||
| np.clip(self.output, 0, 1)) | ||
|
|
||
| def get_transformed_img(self, img, sz): | ||
| train_tfms, val_tfms = tfms_from_model(self.model, sz) | ||
|
|
||
| if self.cpu: | ||
| return val_tfms.denorm(np.rollaxis(val_tfms(img)[None], 1, 4))[0] | ||
|
|
||
| return val_tfms.denorm(np.rollaxis(to_np(val_tfms(img)[None]), 1, | ||
| 4))[0] | ||
|
|
||
| def most_activated(self, image, layer): | ||
| train_tfms, val_tfms = tfms_from_model(self.model, 224) | ||
| transformed = val_tfms(image) | ||
|
|
||
| activations = SaveFeatures(layer) # register hook | ||
| self.model(V(transformed)[None]) | ||
|
|
||
| print(activations.features[0, 5].mean().data.cpu().numpy()) | ||
|
Owner
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why print especially
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yep. Sorry for that. I will also change that!
Owner
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No need to apologize, I'm happy you put effort into improving the code, thanks for that!
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I will move it to the root then!
Yes! I was thinking thinking about it already but decided not to, because
What do you think?
Owner
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Okay, that does make sense! I will apply the discussed changes and keep both notebooks separated then! 😃 |
||
| mean_act = [ | ||
| activations.features[0, i].mean().data.cpu().numpy() | ||
| for i in range(activations.features.shape[1]) | ||
| ] | ||
| activations.close() | ||
|
|
||
| return mean_act | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,51 @@ | ||
| from math import ceil | ||
|
|
||
| import matplotlib.pyplot as plt | ||
|
|
||
|
|
||
| def plot_reconstructions_single_layer(imgs, | ||
| layer_name, | ||
| filters, | ||
| n_cols=3, | ||
| cell_size=4, | ||
| save_fig=False): | ||
| n_rows = ceil((len(imgs)) / n_cols) | ||
| fig, axes = plt.subplots(n_rows, | ||
| n_cols, | ||
| figsize=(cell_size * n_cols, cell_size * n_rows)) | ||
|
|
||
| for i, ax in enumerate(axes.flat): | ||
| ax.grid(False) | ||
| ax.get_xaxis().set_visible(False) | ||
| ax.get_yaxis().set_visible(False) | ||
| ax.set_title(f'fmap {filters[i]}') | ||
| ax.imshow(imgs[i]) | ||
|
|
||
| fig.suptitle(f'{layer_name}', fontsize="x-large", y=1.0) | ||
| plt.tight_layout() | ||
| plt.subplots_adjust(top=0.88) | ||
| save_name = layer_name.lower().replace(' ', '_') | ||
|
|
||
| if save_fig: | ||
| plt.savefig( | ||
| f'{save_name}_fmaps_{"_".join([str(f) for f in filters])}.png') | ||
|
|
||
| plt.show() | ||
|
|
||
| return plt | ||
|
|
||
|
|
||
| def reconstructions_single_layer(FV, | ||
| layer, | ||
| filters, | ||
| opt_steps=20, | ||
| blur=5, | ||
| lr=1e-1, | ||
| print_losses=False): | ||
| return [ | ||
| FV.visualize(layer=layer, | ||
| conv_filter=filters[i], | ||
| opt_steps=opt_steps, | ||
| blur=blur, | ||
| lr=lr, | ||
| print_losses=print_losses) for i in range(len(filters))] |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is done in the
get_transformedandmost_activatedmethods as well. How about making these instance attributes?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Oh you are right. I overlooked that I'm afraid. I will change that!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks!