Skip to content

Commit ee74e71

Browse files
authored
simplify torch imports of Tensor (#169)
* Tensor * venv * PT version * setuptools * torch-sparse * 95
1 parent aea79f2 commit ee74e71

File tree

14 files changed

+41
-32
lines changed

14 files changed

+41
-32
lines changed

Diff for: .actions/assistant.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ def _find_meta(folder: str) -> str:
182182

183183
@staticmethod
184184
def _load_meta(folder: str, strict: bool = False) -> Optional[dict]:
185-
"""Loading meta data for a particular notebook with given folder path.
185+
"""Loading meta-data for a particular notebook with given folder path.
186186
187187
Args:
188188
folder: path to the folder with python script, meta and artefacts
@@ -259,7 +259,7 @@ def _parse_requirements(folder: str) -> Tuple[str, str]:
259259
for k, v in meta.items()
260260
if k.startswith(AssistantCLI._META_PIP_KEY)
261261
}
262-
pip_args = []
262+
pip_args = ["--extra-index-url https://download.pytorch.org/whl/" + _RUNTIME_VERSIONS.get("DEVICE")]
263263
for pip_key in meta_pip_args:
264264
if not isinstance(meta_pip_args[pip_key], (list, tuple, set)):
265265
meta_pip_args[pip_key] = [meta_pip_args[pip_key]]

Diff for: .azure/ipynb-publish.yml

+1
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ jobs:
3838
whereis nvidia
3939
nvidia-smi
4040
python --version
41+
pip list | grep torch
4142
displayName: 'Image info & NVIDIA'
4243
4344
- script: |

Diff for: .azure/ipynb-tests.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ jobs:
99

1010
- job: nbval
1111
# how long to run the job before automatically cancelling
12-
timeoutInMinutes: 55
12+
timeoutInMinutes: 95
1313
# how much time to give 'run always even if cancelled tasks' before stopping them
1414
cancelTimeoutInMinutes: 2
1515

@@ -32,6 +32,7 @@ jobs:
3232
whereis nvidia
3333
nvidia-smi
3434
python --version
35+
pip list | grep torch
3536
displayName: 'Image info & NVIDIA'
3637
3738
- script: |

Diff for: Makefile

+2
Original file line numberDiff line numberDiff line change
@@ -33,5 +33,7 @@ clean:
3333
rm -f ./*-folders.txt
3434
rm -f ./*/**/*.ipynb
3535
rm -rf ./*/**/.ipynb_checkpoints
36+
rm -rf ./*/**/venv
37+
rm -rf ./*/**/logs
3638
rm -rf ./*/**/lightning_logs
3739
rm -f ./*/**/requirements.txt

Diff for: course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.py

+10-9
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
# %matplotlib inline
3434
from IPython.display import set_matplotlib_formats
3535
from matplotlib.colors import to_rgba
36+
from torch import Tensor
3637
from tqdm.notebook import tqdm # Progress bar
3738

3839
set_matplotlib_formats("svg", "pdf")
@@ -82,10 +83,10 @@
8283
#
8384
# Let's first start by looking at different ways of creating a tensor.
8485
# There are many possible options, the most simple one is to call
85-
# `torch.Tensor` passing the desired shape as input argument:
86+
# `Tensor` passing the desired shape as input argument:
8687

8788
# %%
88-
x = torch.Tensor(2, 3, 4)
89+
x = Tensor(2, 3, 4)
8990
print(x)
9091

9192
# %% [markdown]
@@ -101,7 +102,7 @@
101102

102103
# %%
103104
# Create a tensor from a (nested) list
104-
x = torch.Tensor([[1, 2], [3, 4]])
105+
x = Tensor([[1, 2], [3, 4]])
105106
print(x)
106107

107108
# %%
@@ -648,9 +649,9 @@ def __getitem__(self, idx):
648649

649650
# %%
650651
def visualize_samples(data, label):
651-
if isinstance(data, torch.Tensor):
652+
if isinstance(data, Tensor):
652653
data = data.cpu().numpy()
653-
if isinstance(label, torch.Tensor):
654+
if isinstance(label, Tensor):
654655
label = label.cpu().numpy()
655656
data_0 = data[label == 0]
656657
data_1 = data[label == 1]
@@ -935,9 +936,9 @@ def eval_model(model, data_loader):
935936
# %%
936937
@torch.no_grad() # Decorator, same effect as "with torch.no_grad(): ..." over the whole function.
937938
def visualize_classification(model, data, label):
938-
if isinstance(data, torch.Tensor):
939+
if isinstance(data, Tensor):
939940
data = data.cpu().numpy()
940-
if isinstance(label, torch.Tensor):
941+
if isinstance(label, Tensor):
941942
label = label.cpu().numpy()
942943
data_0 = data[label == 0]
943944
data_1 = data[label == 1]
@@ -952,8 +953,8 @@ def visualize_classification(model, data, label):
952953

953954
# Let's make use of a lot of operations we have learned above
954955
model.to(device)
955-
c0 = torch.Tensor(to_rgba("C0")).to(device)
956-
c1 = torch.Tensor(to_rgba("C1")).to(device)
956+
c0 = Tensor(to_rgba("C0")).to(device)
957+
c1 = Tensor(to_rgba("C1")).to(device)
957958
x1 = torch.arange(-0.5, 1.5, step=0.01, device=device)
958959
x2 = torch.arange(-0.5, 1.5, step=0.01, device=device)
959960
xx1, xx2 = torch.meshgrid(x1, x2) # Meshgrid function as in numpy

Diff for: course_UvA-DL/04-inception-resnet-densenet/Inception_ResNet_DenseNet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@
197197
# %% [markdown]
198198
# Thus, in the future, we don't have to define our own `set_seed` function anymore.
199199
#
200-
# In PyTorch Lightning, we define `pl.LightningModule`'s (inheriting from `torch.nn.Module`) that organize our code into 5 main sections:
200+
# In PyTorch Lightning, we define `pl.LightningModule`'s (inheriting from `Module`) that organize our code into 5 main sections:
201201
#
202202
# 1. Initialization (`__init__`), where we create all necessary parameters/models
203203
# 2. Optimizers (`configure_optimizers`) where we create the optimizers, learning rate scheduler, etc.

Diff for: course_UvA-DL/06-graph-neural-networks/.meta.yml

-1
Original file line numberDiff line numberDiff line change
@@ -27,5 +27,4 @@ pip__find-link:
2727
# - https://pytorch-geometric.com/whl/torch-1.8.0+cu101.html
2828
- https://pytorch-geometric.com/whl/torch-%(TORCH_MAJOR_DOT_MINOR)s.0+%(DEVICE)s.html
2929
accelerator:
30-
- CPU
3130
- GPU

Diff for: course_UvA-DL/06-graph-neural-networks/GNN_overview.py

+8-7
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626

2727
# PL callbacks
2828
from pytorch_lightning.callbacks import ModelCheckpoint
29+
from torch import Tensor
2930

3031
AVAIL_GPUS = min(1, torch.cuda.device_count())
3132
BATCH_SIZE = 256 if AVAIL_GPUS else 64
@@ -183,7 +184,7 @@ def forward(self, node_feats, adj_matrix):
183184

184185
# %%
185186
node_feats = torch.arange(8, dtype=torch.float32).view(1, 4, 2)
186-
adj_matrix = torch.Tensor([[[1, 1, 0, 0], [1, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1]]])
187+
adj_matrix = Tensor([[[1, 1, 0, 0], [1, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1]]])
187188

188189
print("Node features:\n", node_feats)
189190
print("\nAdjacency matrix:\n", adj_matrix)
@@ -195,8 +196,8 @@ def forward(self, node_feats, adj_matrix):
195196

196197
# %%
197198
layer = GCNLayer(c_in=2, c_out=2)
198-
layer.projection.weight.data = torch.Tensor([[1.0, 0.0], [0.0, 1.0]])
199-
layer.projection.bias.data = torch.Tensor([0.0, 0.0])
199+
layer.projection.weight.data = Tensor([[1.0, 0.0], [0.0, 1.0]])
200+
layer.projection.bias.data = Tensor([0.0, 0.0])
200201

201202
with torch.no_grad():
202203
out_feats = layer(node_feats, adj_matrix)
@@ -308,7 +309,7 @@ def __init__(self, c_in, c_out, num_heads=1, concat_heads=True, alpha=0.2):
308309

309310
# Sub-modules and parameters needed in the layer
310311
self.projection = nn.Linear(c_in, c_out * num_heads)
311-
self.a = nn.Parameter(torch.Tensor(num_heads, 2 * c_out)) # One per head
312+
self.a = nn.Parameter(Tensor(num_heads, 2 * c_out)) # One per head
312313
self.leakyrelu = nn.LeakyReLU(alpha)
313314

314315
# Initialization from the original implementation
@@ -376,9 +377,9 @@ def forward(self, node_feats, adj_matrix, print_attn_probs=False):
376377

377378
# %%
378379
layer = GATLayer(2, 2, num_heads=2)
379-
layer.projection.weight.data = torch.Tensor([[1.0, 0.0], [0.0, 1.0]])
380-
layer.projection.bias.data = torch.Tensor([0.0, 0.0])
381-
layer.a.data = torch.Tensor([[-0.2, 0.3], [0.1, -0.1]])
380+
layer.projection.weight.data = Tensor([[1.0, 0.0], [0.0, 1.0]])
381+
layer.projection.bias.data = Tensor([0.0, 0.0])
382+
layer.a.data = Tensor([[-0.2, 0.3], [0.1, -0.1]])
382383

383384
with torch.no_grad():
384385
out_feats = layer(node_feats, adj_matrix, print_attn_probs=True)

Diff for: course_UvA-DL/09-normalizing-flows/NF_image_modeling.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from IPython.display import HTML, display, set_matplotlib_formats
2626
from matplotlib.colors import to_rgb
2727
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
28+
from torch import Tensor
2829
from torchvision import transforms
2930
from torchvision.datasets import MNIST
3031
from tqdm.notebook import tqdm
@@ -117,8 +118,8 @@ def discretize(sample):
117118
# %%
118119
def show_imgs(imgs, title=None, row_size=4):
119120
# Form a grid of pictures (we use max. 8 columns)
120-
num_imgs = imgs.shape[0] if isinstance(imgs, torch.Tensor) else len(imgs)
121-
is_int = imgs.dtype == torch.int32 if isinstance(imgs, torch.Tensor) else imgs[0].dtype == torch.int32
121+
num_imgs = imgs.shape[0] if isinstance(imgs, Tensor) else len(imgs)
122+
is_int = imgs.dtype == torch.int32 if isinstance(imgs, Tensor) else imgs[0].dtype == torch.int32
122123
nrow = min(num_imgs, row_size)
123124
ncol = int(math.ceil(num_imgs / nrow))
124125
imgs = torchvision.utils.make_grid(imgs, nrow=nrow, pad_value=128 if is_int else 0.5)
@@ -1317,7 +1318,7 @@ def interpolate(model, img1, img2, num_steps=8):
13171318

13181319

13191320
# %%
1320-
def visualize_dequant_distribution(model: ImageFlow, imgs: torch.Tensor, title: str = None):
1321+
def visualize_dequant_distribution(model: ImageFlow, imgs: Tensor, title: str = None):
13211322
"""
13221323
Args:
13231324
model: The flow of which we want to visualize the dequantization distribution

Diff for: course_UvA-DL/10-autoregressive-image-modeling/Autoregressive_Image_Modeling.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050
from IPython.display import set_matplotlib_formats
5151
from matplotlib.colors import to_rgb
5252
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
53+
from torch import Tensor
5354
from torchvision import transforms
5455
from torchvision.datasets import MNIST
5556
from tqdm.notebook import tqdm
@@ -133,7 +134,7 @@ def discretize(sample):
133134

134135
# %%
135136
def show_imgs(imgs):
136-
num_imgs = imgs.shape[0] if isinstance(imgs, torch.Tensor) else len(imgs)
137+
num_imgs = imgs.shape[0] if isinstance(imgs, Tensor) else len(imgs)
137138
nrow = min(num_imgs, 4)
138139
ncol = int(math.ceil(num_imgs / nrow))
139140
imgs = torchvision.utils.make_grid(imgs, nrow=nrow, pad_value=128)
@@ -910,8 +911,8 @@ def autocomplete_image(img):
910911
# similar likelihoods. We can visualize a discrete logistic below:
911912

912913
# %%
913-
mu = torch.Tensor([128])
914-
sigma = torch.Tensor([2.0])
914+
mu = Tensor([128])
915+
sigma = Tensor([2.0])
915916

916917

917918
def discrete_logistic(x, mu, sigma):

Diff for: course_UvA-DL/11-vision-transformer/Vision_Transformer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@
155155
def img_to_patch(x, patch_size, flatten_channels=True):
156156
"""
157157
Inputs:
158-
x - torch.Tensor representing the image of shape [B, C, H, W]
158+
x - Tensor representing the image of shape [B, C, H, W]
159159
patch_size - Number of pixels per dimension of the patches (integer)
160160
flatten_channels - If True, the patches will be returned in a flattened format
161161
as a feature vector instead of a image grid.

Diff for: course_UvA-DL/13-contrastive-learning/.meta.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,5 +21,5 @@ requirements:
2121
- matplotlib
2222
- seaborn
2323
accelerator:
24-
- GPU
2524
- CPU
25+
- GPU

Diff for: lightning_examples/barlow-twins/barlow_twins.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
import torchvision.transforms.functional as VisionF
1818
from pytorch_lightning import Callback, LightningModule, Trainer
1919
from pytorch_lightning.callbacks import ModelCheckpoint
20+
from torch import Tensor
2021
from torch.utils.data import DataLoader
2122
from torchmetrics.functional import accuracy
2223
from torchvision.datasets import CIFAR10
@@ -334,7 +335,7 @@ def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> No
334335

335336
def extract_online_finetuning_view(
336337
self, batch: Sequence, device: Union[str, torch.device]
337-
) -> Tuple[torch.Tensor, torch.Tensor]:
338+
) -> Tuple[Tensor, Tensor]:
338339
(_, _, finetune_view), y = batch
339340
finetune_view = finetune_view.to(device)
340341
y = y.to(device)

Diff for: requirements/default.txt

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1+
setuptools==59.5.0
12
ipython[notebook]
3+
torch>=1.8
24
pytorch-lightning>=1.4
3-
torchmetrics>=0.6
4-
torch>=1.6, <1.9
5+
torchmetrics>=0.7

0 commit comments

Comments
 (0)