Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.

Commit 974c620

Browse files
CI: pre-commit docformatter (#63)
* add pre-commit docformatter * fix args * fix deprecated Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 96de8be commit 974c620

File tree

9 files changed

+207
-220
lines changed

9 files changed

+207
-220
lines changed

.actions/helpers.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,7 @@ def _meta_file(folder: str) -> str:
148148
@staticmethod
149149
def augment_script(fpath: str):
150150
"""Add template header and footer to the python base script.
151+
151152
Args:
152153
fpath: path to python script
153154
"""
@@ -313,6 +314,7 @@ def parse_requirements(dir_path: str):
313314
@staticmethod
314315
def copy_notebooks(path_root: str, path_docs_ipynb: str = "docs/source/notebooks"):
315316
"""Copy all notebooks from a folder to doc folder.
317+
316318
Args:
317319
path_root: source path to the project root in this tutorials
318320
path_docs_ipynb: destination path to the notebooks location
@@ -362,7 +364,7 @@ def update_env_details(dir_path: str):
362364
req = [r.strip() for r in req]
363365

364366
def _parse(pkg: str, keys: str = " <=>") -> str:
365-
"""Parsing just the package name"""
367+
"""Parsing just the package name."""
366368
if any(c in pkg for c in keys):
367369
ix = min(pkg.index(c) for c in keys if c in pkg)
368370
pkg = pkg[:ix]

.pre-commit-config.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,12 @@ repos:
2929
args: [--py36-plus]
3030
name: Upgrade code
3131

32+
- repo: https://github.com/myint/docformatter
33+
rev: v1.4
34+
hooks:
35+
- id: docformatter
36+
args: [--in-place, --wrap-summaries=115, --wrap-descriptions=120]
37+
3238
- repo: https://github.com/PyCQA/isort
3339
rev: 5.9.2
3440
hooks:

course_UvA-DL/autoregressive-image-modeling/Autoregressive_Image_Modeling.py

Lines changed: 23 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -184,14 +184,14 @@ def show_imgs(imgs):
184184
class MaskedConvolution(nn.Module):
185185

186186
def __init__(self, c_in, c_out, mask, **kwargs):
187-
"""
188-
Implements a convolution with mask applied on its weights.
189-
Inputs:
190-
c_in - Number of input channels
191-
c_out - Number of output channels
192-
mask - Tensor of shape [kernel_size_H, kernel_size_W] with 0s where
187+
"""Implements a convolution with mask applied on its weights.
188+
189+
Args:
190+
c_in: Number of input channels
191+
c_out: Number of output channels
192+
mask: Tensor of shape [kernel_size_H, kernel_size_W] with 0s where
193193
the convolution should be masked, and 1s otherwise.
194-
kwargs - Additional arguments for the convolution
194+
kwargs: Additional arguments for the convolution
195195
"""
196196
super().__init__()
197197
# For simplicity: calculate padding automatically
@@ -290,12 +290,12 @@ def __init__(self, c_in, c_out, kernel_size=3, mask_center=False, **kwargs):
290290

291291

292292
def show_center_recep_field(img, out):
293-
"""
294-
Calculates the gradients of the input with respect to the output center pixel,
295-
and visualizes the overall receptive field.
296-
Inputs:
297-
img - Input image for which we want to calculate the receptive field on.
298-
out - Output features/loss which is used for backpropagation, and should be
293+
"""Calculates the gradients of the input with respect to the output center pixel, and visualizes the overall
294+
receptive field.
295+
296+
Args:
297+
img: Input image for which we want to calculate the receptive field on.
298+
out: Output features/loss which is used for backpropagation, and should be
299299
the output of the network/computation graph.
300300
"""
301301
# Determine gradients
@@ -476,9 +476,7 @@ def show_center_recep_field(img, out):
476476
class GatedMaskedConv(nn.Module):
477477

478478
def __init__(self, c_in, **kwargs):
479-
"""
480-
Gated Convolution block implemented the computation graph shown above.
481-
"""
479+
"""Gated Convolution block implemented the computation graph shown above."""
482480
super().__init__()
483481
self.conv_vert = VerticalStackConvolution(c_in, c_out=2 * c_in, **kwargs)
484482
self.conv_horiz = HorizontalStackConvolution(c_in, c_out=2 * c_in, **kwargs)
@@ -558,10 +556,10 @@ def __init__(self, c_in, c_hidden):
558556
self.example_input_array = train_set[0][0][None]
559557

560558
def forward(self, x):
561-
"""
562-
Forward image through model and return logits for each pixel.
563-
Inputs:
564-
x - Image tensor with integer values between 0 and 255.
559+
"""Forward image through model and return logits for each pixel.
560+
561+
Args:
562+
x: Image tensor with integer values between 0 and 255.
565563
"""
566564
# Scale input from 0 to 255 back to -1 to 1
567565
x = (x.float() / 255.0) * 2 - 1
@@ -589,11 +587,11 @@ def calc_likelihood(self, x):
589587

590588
@torch.no_grad()
591589
def sample(self, img_shape, img=None):
592-
"""
593-
Sampling function for the autoregressive model.
594-
Inputs:
595-
img_shape - Shape of the image to generate (B,C,H,W)
596-
img (optional) - If given, this tensor will be used as
590+
"""Sampling function for the autoregressive model.
591+
592+
Args:
593+
img_shape: Shape of the image to generate (B,C,H,W)
594+
img (optional): If given, this tensor will be used as
597595
a starting image. The pixels to fill
598596
should be -1 in the input tensor.
599597
"""

course_UvA-DL/deep-autoencoders/Deep_Autoencoders.py

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -133,11 +133,11 @@ def __init__(
133133
self, num_input_channels: int, base_channel_size: int, latent_dim: int, act_fn: object = nn.GELU
134134
):
135135
"""
136-
Inputs:
137-
- num_input_channels : Number of input channels of the image. For CIFAR, this parameter is 3
138-
- base_channel_size : Number of channels we use in the first convolutional layers. Deeper layers might use a duplicate of it.
139-
- latent_dim : Dimensionality of latent representation z
140-
- act_fn : Activation function used throughout the encoder network
136+
Args:
137+
num_input_channels : Number of input channels of the image. For CIFAR, this parameter is 3
138+
base_channel_size : Number of channels we use in the first convolutional layers. Deeper layers might use a duplicate of it.
139+
latent_dim : Dimensionality of latent representation z
140+
act_fn : Activation function used throughout the encoder network
141141
"""
142142
super().__init__()
143143
c_hid = base_channel_size
@@ -195,11 +195,11 @@ def __init__(
195195
self, num_input_channels: int, base_channel_size: int, latent_dim: int, act_fn: object = nn.GELU
196196
):
197197
"""
198-
Inputs:
199-
- num_input_channels : Number of channels of the image to reconstruct. For CIFAR, this parameter is 3
200-
- base_channel_size : Number of channels we use in the last convolutional layers. Early layers might use a duplicate of it.
201-
- latent_dim : Dimensionality of latent representation z
202-
- act_fn : Activation function used throughout the decoder network
198+
Args:
199+
num_input_channels : Number of channels of the image to reconstruct. For CIFAR, this parameter is 3
200+
base_channel_size : Number of channels we use in the last convolutional layers. Early layers might use a duplicate of it.
201+
latent_dim : Dimensionality of latent representation z
202+
act_fn : Activation function used throughout the decoder network
203203
"""
204204
super().__init__()
205205
c_hid = base_channel_size
@@ -263,17 +263,13 @@ def __init__(
263263
self.example_input_array = torch.zeros(2, num_input_channels, width, height)
264264

265265
def forward(self, x):
266-
"""
267-
The forward function takes in an image and returns the reconstructed image
268-
"""
266+
"""The forward function takes in an image and returns the reconstructed image."""
269267
z = self.encoder(x)
270268
x_hat = self.decoder(z)
271269
return x_hat
272270

273271
def _get_reconstruction_loss(self, batch):
274-
"""
275-
Given a batch of images, this function returns the reconstruction loss (MSE in our case)
276-
"""
272+
"""Given a batch of images, this function returns the reconstruction loss (MSE in our case)"""
277273
x, _ = batch # We do not need the labels
278274
x_hat = self.forward(x)
279275
loss = F.mse_loss(x, x_hat, reduction="none")

course_UvA-DL/deep-energy-based-generative-models/Deep_Energy_Models.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -333,11 +333,11 @@ class Sampler:
333333

334334
def __init__(self, model, img_shape, sample_size, max_len=8192):
335335
"""
336-
Inputs:
337-
model - Neural network to use for modeling E_theta
338-
img_shape - Shape of the images to model
339-
sample_size - Batch size of the samples
340-
max_len - Maximum number of data points to keep in the buffer
336+
Args:
337+
model: Neural network to use for modeling E_theta
338+
img_shape: Shape of the images to model
339+
sample_size: Batch size of the samples
340+
max_len: Maximum number of data points to keep in the buffer
341341
"""
342342
super().__init__()
343343
self.model = model
@@ -347,11 +347,11 @@ def __init__(self, model, img_shape, sample_size, max_len=8192):
347347
self.examples = [(torch.rand((1, ) + img_shape) * 2 - 1) for _ in range(self.sample_size)]
348348

349349
def sample_new_exmps(self, steps=60, step_size=10):
350-
"""
351-
Function for getting a new batch of "fake" images.
352-
Inputs:
353-
steps - Number of iterations in the MCMC algorithm
354-
step_size - Learning rate nu in the algorithm above
350+
"""Function for getting a new batch of "fake" images.
351+
352+
Args:
353+
steps: Number of iterations in the MCMC algorithm
354+
step_size: Learning rate nu in the algorithm above
355355
"""
356356
# Choose 95% of the batch from the buffer, 5% generate from scratch
357357
n_new = np.random.binomial(self.sample_size, 0.05)
@@ -369,14 +369,14 @@ def sample_new_exmps(self, steps=60, step_size=10):
369369

370370
@staticmethod
371371
def generate_samples(model, inp_imgs, steps=60, step_size=10, return_img_per_step=False):
372-
"""
373-
Function for sampling images for a given model.
374-
Inputs:
375-
model - Neural network to use for modeling E_theta
376-
inp_imgs - Images to start from for sampling. If you want to generate new images, enter noise between -1 and 1.
377-
steps - Number of iterations in the MCMC algorithm.
378-
step_size - Learning rate nu in the algorithm above
379-
return_img_per_step - If True, we return the sample at every iteration of the MCMC
372+
"""Function for sampling images for a given model.
373+
374+
Args:
375+
model: Neural network to use for modeling E_theta
376+
inp_imgs: Images to start from for sampling. If you want to generate new images, enter noise between -1 and 1.
377+
steps: Number of iterations in the MCMC algorithm.
378+
step_size: Learning rate nu in the algorithm above
379+
return_img_per_step: If True, we return the sample at every iteration of the MCMC
380380
"""
381381
# Before MCMC: set model parameters to "required_grad=False"
382382
# because we are only interested in the gradients of the input.

course_UvA-DL/graph-neural-networks/GNN_overview.py

Lines changed: 42 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -172,9 +172,9 @@ def __init__(self, c_in, c_out):
172172

173173
def forward(self, node_feats, adj_matrix):
174174
"""
175-
Inputs:
176-
node_feats - Tensor with node features of shape [batch_size, num_nodes, c_in]
177-
adj_matrix - Batch of adjacency matrices of the graph. If there is an edge from i to j,
175+
Args:
176+
node_feats: Tensor with node features of shape [batch_size, num_nodes, c_in]
177+
adj_matrix: Batch of adjacency matrices of the graph. If there is an edge from i to j,
178178
adj_matrix[b,i,j]=1 else 0. Supports directed edges by non-symmetric matrices.
179179
Assumes to already have added the identity connections.
180180
Shape: [batch_size, num_nodes, num_nodes]
@@ -302,13 +302,13 @@ class GATLayer(nn.Module):
302302

303303
def __init__(self, c_in, c_out, num_heads=1, concat_heads=True, alpha=0.2):
304304
"""
305-
Inputs:
306-
c_in - Dimensionality of input features
307-
c_out - Dimensionality of output features
308-
num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The
305+
Args:
306+
c_in: Dimensionality of input features
307+
c_out: Dimensionality of output features
308+
num_heads: Number of heads, i.e. attention mechanisms to apply in parallel. The
309309
output features are equally split up over the heads if concat_heads=True.
310-
concat_heads - If True, the output of the different heads is concatenated instead of averaged.
311-
alpha - Negative slope of the LeakyReLU activation.
310+
concat_heads: If True, the output of the different heads is concatenated instead of averaged.
311+
alpha: Negative slope of the LeakyReLU activation.
312312
"""
313313
super().__init__()
314314
self.num_heads = num_heads
@@ -328,10 +328,10 @@ def __init__(self, c_in, c_out, num_heads=1, concat_heads=True, alpha=0.2):
328328

329329
def forward(self, node_feats, adj_matrix, print_attn_probs=False):
330330
"""
331-
Inputs:
332-
node_feats - Input features of the node. Shape: [batch_size, c_in]
333-
adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes]
334-
print_attn_probs - If True, the attention weights are printed during the forward pass
331+
Args:
332+
node_feats: Input features of the node. Shape: [batch_size, c_in]
333+
adj_matrix: Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes]
334+
print_attn_probs: If True, the attention weights are printed during the forward pass
335335
(for debugging purposes)
336336
"""
337337
batch_size, num_nodes = node_feats.size(0), node_feats.size(1)
@@ -507,14 +507,14 @@ def __init__(
507507
**kwargs,
508508
):
509509
"""
510-
Inputs:
511-
c_in - Dimension of input features
512-
c_hidden - Dimension of hidden features
513-
c_out - Dimension of the output features. Usually number of classes in classification
514-
num_layers - Number of "hidden" graph layers
515-
layer_name - String of the graph layer to use
516-
dp_rate - Dropout rate to apply throughout the network
517-
kwargs - Additional arguments for the graph layer (e.g. number of heads for GAT)
510+
Args:
511+
c_in: Dimension of input features
512+
c_hidden: Dimension of hidden features
513+
c_out: Dimension of the output features. Usually number of classes in classification
514+
num_layers: Number of "hidden" graph layers
515+
layer_name: String of the graph layer to use
516+
dp_rate: Dropout rate to apply throughout the network
517+
kwargs: Additional arguments for the graph layer (e.g. number of heads for GAT)
518518
"""
519519
super().__init__()
520520
gnn_layer = gnn_layer_by_name[layer_name]
@@ -533,9 +533,9 @@ def __init__(
533533

534534
def forward(self, x, edge_index):
535535
"""
536-
Inputs:
537-
x - Input features per node
538-
edge_index - List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
536+
Args:
537+
x: Input features per node
538+
edge_index: List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
539539
"""
540540
for layer in self.layers:
541541
# For graph layers, we need to add the "edge_index" tensor as additional input
@@ -560,12 +560,12 @@ class MLPModel(nn.Module):
560560

561561
def __init__(self, c_in, c_hidden, c_out, num_layers=2, dp_rate=0.1):
562562
"""
563-
Inputs:
564-
c_in - Dimension of input features
565-
c_hidden - Dimension of hidden features
566-
c_out - Dimension of the output features. Usually number of classes in classification
567-
num_layers - Number of hidden layers
568-
dp_rate - Dropout rate to apply throughout the network
563+
Args:
564+
c_in: Dimension of input features
565+
c_hidden: Dimension of hidden features
566+
c_out: Dimension of the output features. Usually number of classes in classification
567+
num_layers: Number of hidden layers
568+
dp_rate: Dropout rate to apply throughout the network
569569
"""
570570
super().__init__()
571571
layers = []
@@ -578,8 +578,8 @@ def __init__(self, c_in, c_hidden, c_out, num_layers=2, dp_rate=0.1):
578578

579579
def forward(self, x, *args, **kwargs):
580580
"""
581-
Inputs:
582-
x - Input features per node
581+
Args:
582+
x: Input features per node
583583
"""
584584
return self.layers(x)
585585

@@ -858,12 +858,12 @@ class GraphGNNModel(nn.Module):
858858

859859
def __init__(self, c_in, c_hidden, c_out, dp_rate_linear=0.5, **kwargs):
860860
"""
861-
Inputs:
862-
c_in - Dimension of input features
863-
c_hidden - Dimension of hidden features
864-
c_out - Dimension of output features (usually number of classes)
865-
dp_rate_linear - Dropout rate before the linear layer (usually much higher than inside the GNN)
866-
kwargs - Additional arguments for the GNNModel object
861+
Args:
862+
c_in: Dimension of input features
863+
c_hidden: Dimension of hidden features
864+
c_out: Dimension of output features (usually number of classes)
865+
dp_rate_linear: Dropout rate before the linear layer (usually much higher than inside the GNN)
866+
kwargs: Additional arguments for the GNNModel object
867867
"""
868868
super().__init__()
869869
self.GNN = GNNModel(
@@ -876,10 +876,10 @@ def __init__(self, c_in, c_hidden, c_out, dp_rate_linear=0.5, **kwargs):
876876

877877
def forward(self, x, edge_index, batch_idx):
878878
"""
879-
Inputs:
880-
x - Input features per node
881-
edge_index - List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
882-
batch_idx - Index of batch element for each node
879+
Args:
880+
x: Input features per node
881+
edge_index: List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
882+
batch_idx: Index of batch element for each node
883883
"""
884884
x = self.GNN(x, edge_index)
885885
x = geom_nn.global_mean_pool(x, batch_idx) # Average pooling

0 commit comments

Comments
 (0)