Skip to content

Commit

Permalink
[TYPING](ALL): Fix typing errors
Browse files Browse the repository at this point in the history
  • Loading branch information
PauAndrio committed Oct 17, 2024
1 parent 3a17ef0 commit bd4b023
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 34 deletions.
15 changes: 7 additions & 8 deletions biobb_pytorch/docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# serve to show the default.

import sys
from typing import List, Dict
from pathlib import Path

# If extensions (or modules to document with autodoc) are in another directory,
Expand Down Expand Up @@ -95,9 +94,9 @@
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# list of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: List[str] = []
exclude_patterns: list[str] = []

# The reST default role (used for this markup: `text`) to use for all
# documents.
Expand All @@ -123,7 +122,7 @@
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False

# If true, `todo` and `todoList` produce output, else they produce nothing.
# If true, `todo` and `todolist` produce output, else they produce nothing.
todo_include_todos = False


Expand Down Expand Up @@ -232,7 +231,7 @@ def setup(app):

# -- Options for LaTeX output ---------------------------------------------

latex_elements: Dict[str, str] = {
latex_elements: dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',

Expand All @@ -246,7 +245,7 @@ def setup(app):
# 'figure_align': 'htbp',
}

# Grouping the document tree into LaTeX files. List of tuples
# Grouping the document tree into LaTeX files. list of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
Expand Down Expand Up @@ -277,7 +276,7 @@ def setup(app):

# -- Options for manual page output ---------------------------------------

# One entry per manual page. List of tuples
# One entry per manual page. list of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'biobb_pytorch', u'biobb_pytorch Documentation',
Expand All @@ -290,7 +289,7 @@ def setup(app):

# -- Options for Texinfo output -------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# Grouping the document tree into Texinfo files. list of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
Expand Down
10 changes: 6 additions & 4 deletions biobb_pytorch/mdae/apply_mdae.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@

"""Module containing the ApplyMDAE class and the command line interface."""
import torch
import torch.utils.data
import numpy as np
import argparse
from typing import Optional
import time
from typing import Optional, Tuple, Dict
from typing import Optional
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
Expand Down Expand Up @@ -60,7 +62,7 @@ def __init__(self,
input_model_pth_path: str,
output_reconstructed_data_npy_path: str,
output_latent_space_npy_path: Optional[str] = None,
properties: Optional[Dict] = None, **kwargs) -> None:
properties: Optional[dict] = None, **kwargs) -> None:
properties = properties or {}

# Call parent class constructor
Expand Down Expand Up @@ -131,7 +133,7 @@ def launch(self) -> int:
self.check_arguments(output_files_created=True, raise_exception=False)
return 0

def apply_model(self, dataloader: torch.utils.data.DataLoader) -> Tuple[np.ndarray, np.ndarray]:
def apply_model(self, dataloader: torch.utils.data.DataLoader) -> tuple[np.ndarray, np.ndarray]:
self.model.to(self.model.device)
start_time: float = time.time()
fu.log("Applying model:", self.out_log)
Expand All @@ -153,7 +155,7 @@ def apply_model(self, dataloader: torch.utils.data.DataLoader) -> Tuple[np.ndarr

def applyMDAE(input_data_npy_path: str, input_model_pth_path: str,
output_reconstructed_data_npy_path: str, output_latent_space_npy_path: Optional[str] = None,
properties: Optional[Dict] = None, **kwargs) -> int:
properties: Optional[dict] = None, **kwargs) -> int:
"""Execute the :class:`ApplyMDAE <mdae.apply_mdae.ApplyMDAE>` class and
execute the :meth:`launch() <mdae.apply_mdae.ApplyMDAE.launch>` method."""

Expand Down
10 changes: 5 additions & 5 deletions biobb_pytorch/mdae/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import numpy as np
import torch
from pathlib import Path
from typing import Callable, List, Optional, Tuple, Union
from typing import Callable, Optional, Union


def ndarray_normalization(ndarray: np.ndarray, max_values: np.ndarray, min_values: np.ndarray) -> np.ndarray:
Expand Down Expand Up @@ -69,11 +69,11 @@ def get_optimizer_function(optimizer_function: str) -> Callable:
raise ValueError(f'Invalid optimizer function: {optimizer_function}')


def execute_model(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, input_dimensions: int, latent_dimensions: int, loss_function: Optional[torch.nn.modules.loss._Loss] = None) -> Tuple[float, np.ndarray, np.ndarray]:
def execute_model(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, input_dimensions: int, latent_dimensions: int, loss_function: Optional[torch.nn.modules.loss._Loss] = None) -> tuple[float, np.ndarray, np.ndarray]:
model.eval()
losses: List[float] = []
z_list: List[float] = []
x_hat_list: List[float] = []
losses: list[float] = []
z_list: list[float] = []
x_hat_list: list[float] = []
with torch.no_grad():
for data in dataloader:
data = data[0].to(model.device)
Expand Down
5 changes: 2 additions & 3 deletions biobb_pytorch/mdae/mdae.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
"""Module containing the MDAutoEncoder class and the command line interface."""
import torch
from typing import List


class MDAE(torch.nn.Module):
Expand All @@ -16,7 +15,7 @@ def __init__(self, input_dimensions: int, num_layers: int, latent_dimensions: in
self.leaky_relu: float = leaky_relu

# Encoder
encoder: List = []
encoder: list = []
nunits: int = self.input_dimensions
for _ in range(self.num_layers-1):
encoder.append(torch.nn.Linear(nunits, nunits - self.delta))
Expand All @@ -32,7 +31,7 @@ def __init__(self, input_dimensions: int, num_layers: int, latent_dimensions: in
torch.nn.Sigmoid())

# Decoder
decoder: List = []
decoder: list = []
nunits = self.latent_dimensions
for _ in range(self.num_layers-1):
decoder.append(torch.nn.Linear(nunits, nunits + self.delta))
Expand Down
31 changes: 17 additions & 14 deletions biobb_pytorch/mdae/train_mdae.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,14 @@

"""Module containing the TrainMDAE class and the command line interface."""
import torch
import torch.utils.data
import numpy as np
import time
import argparse
from typing import Optional, List, Tuple, Dict
from typing import Optional
from typing import Optional
from torch.optim.optimizer import Optimizer
from torch.optim.adam import Adam
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
Expand Down Expand Up @@ -87,7 +91,7 @@ def __init__(self, input_train_npy_path: str,
input_model_pth_path: Optional[str] = None,
output_train_data_npz_path: Optional[str] = None, # npz of train_losses, valid_losses
output_performance_npz_path: Optional[str] = None, # npz of evaluate_losses, latent_space, reconstructed_data
properties: Optional[Dict] = None, **kwargs) -> None:
properties: Optional[dict] = None, **kwargs) -> None:
properties = properties or {}

# Call parent class constructor
Expand Down Expand Up @@ -177,12 +181,11 @@ def __init__(self, input_train_npy_path: str,

optimizer_str: str = properties.get('optimizer', '')
try:
self.optimizer: torch.optim.Optimizer = get_optimizer_function(optimizer_str)(self.model.parameters(), lr=self.lr)
self.optimizer = get_optimizer_function(optimizer_str)(self.model.parameters(), lr=self.lr)
fu.log(f'Using optimizer: {self.optimizer}', self.out_log)
except ValueError:
fu.log(f'Invalid optimizer: {optimizer_str}', self.out_log)
fu.log('Using default optimizer: Adam', self.out_log)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
self.optimizer = Adam(self.model.parameters(), lr=self.lr)

@launchlogger
def launch(self) -> int:
Expand Down Expand Up @@ -225,10 +228,10 @@ def launch(self) -> int:
self.check_arguments(output_files_created=True, raise_exception=False)
return 0

def train_model(self) -> Tuple[List[float], List[float], Dict, int]:
def train_model(self) -> tuple[list[float], list[float], dict, int]:
self.model.to(self.model.device)
train_losses: List[float] = []
validation_losses: List[float] = []
train_losses: list[float] = []
validation_losses: list[float] = []
best_valid_loss: float = float('inf') # Initialize best valid loss to infinity

start_time: float = time.time()
Expand Down Expand Up @@ -280,16 +283,16 @@ def train_model(self) -> Tuple[List[float], List[float], Dict, int]:
# Save best model
if avg_validation_loss < best_valid_loss:
best_valid_loss = avg_validation_loss
best_model: Dict = self.model.state_dict()
best_model: dict = self.model.state_dict()
best_model_epoch: int = epoch_index

fu.log(f"End Training, total time: {format_time((time.time() - start_time))}", self.out_log)

return train_losses, validation_losses, best_model, best_model_epoch

def training_step(self, dataloader: torch.utils.data.DataLoader, optimizer: torch.optim.Optimizer, loss_function: torch.nn.modules.loss._Loss) -> Tuple[float, float]:
def training_step(self, dataloader: torch.utils.data.DataLoader, optimizer: Optimizer, loss_function: torch.nn.modules.loss._Loss) -> tuple[float, float]:
self.model.train()
train_losses: List[float] = []
train_losses: list[float] = []
for data in dataloader:
data = data[0].to(self.model.device)
_, output = self.model(data)
Expand All @@ -300,7 +303,7 @@ def training_step(self, dataloader: torch.utils.data.DataLoader, optimizer: torc
train_losses.append(loss.item())

self.model.eval()
valid_losses: List[float] = []
valid_losses: list[float] = []
with torch.no_grad():
for data in dataloader:
data = data[0].to(self.model.device)
Expand All @@ -310,13 +313,13 @@ def training_step(self, dataloader: torch.utils.data.DataLoader, optimizer: torc

return float(np.mean(train_losses)), float(torch.mean(torch.tensor(valid_losses)))

def evaluate_model(self, dataloader: torch.utils.data.DataLoader, loss_function: torch.nn.modules.loss._Loss) -> Tuple[float, np.ndarray, np.ndarray]:
def evaluate_model(self, dataloader: torch.utils.data.DataLoader, loss_function: torch.nn.modules.loss._Loss) -> tuple[float, np.ndarray, np.ndarray]:
return execute_model(self.model, dataloader, self.input_dimensions, self.latent_dimensions, loss_function)


def trainMDAE(input_train_npy_path: str, output_model_pth_path: str, input_model_pth_path: Optional[str] = None,
output_train_data_npz_path: Optional[str] = None, output_performance_npz_path: Optional[str] = None,
properties: Optional[Dict] = None, **kwargs) -> int:
properties: Optional[dict] = None, **kwargs) -> int:
"""Execute the :class:`TrainMDAE <mdae.train_mdae.TrainMDAE>` class and
execute the :meth:`launch() <mdae.train_mdae.TrainMDAE.launch>` method."""

Expand Down

0 comments on commit bd4b023

Please sign in to comment.