Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

req: shift defaults to parent #338

Merged
merged 5 commits into from
Jul 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion _requirements/devel.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ virtualenv>=20.10
# pytest>=7.0, <9.0
# testing with own fork with extended cell timeout
# https://github.com/Borda/nbval/archive/refs/heads/timeout-limit.zip
ipython[notebook]>=8.0.0, <8.17.0
ipython[notebook]>=8.0.0, <8.24.0
urllib3 # for ipython
jupytext>=1.10, <1.15 # converting
papermill>=2.3.4, <2.5.0 # rendering
Expand Down
2 changes: 0 additions & 2 deletions course_UvA-DL/01-introduction-to-pytorch/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +0,0 @@
matplotlib
lightning>=2.0.0
2 changes: 0 additions & 2 deletions course_UvA-DL/02-activation-functions/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,2 @@
torchvision
matplotlib
seaborn
lightning>=2.0.0
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
import urllib.request
from urllib.error import HTTPError

import lightning as L
import matplotlib.pyplot as plt

# %matplotlib inline
import matplotlib_inline.backend_inline
import numpy as np
import pytorch_lightning as pl
import seaborn as sns
import torch
import torch.nn as nn
Expand All @@ -33,7 +33,7 @@
sns.set()

# %% [markdown]
# Instead of the `set_seed` function as in Tutorial 3, we can use Lightning's built-in function `L.seed_everything`.
# Instead of the `set_seed` function as in Tutorial 3, we can use Lightning's built-in function `pl.seed_everything`.
# We will reuse the path variables `DATASET_PATH` and `CHECKPOINT_PATH` as in Tutorial 3.
# Adjust the paths if necessary.

Expand All @@ -44,7 +44,7 @@
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/InitOptim/")

# Seed everything
L.seed_everything(42)
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.deterministic = True
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,2 @@
torchvision
matplotlib
seaborn
lightning>=2.0.0
18 changes: 9 additions & 9 deletions course_UvA-DL/04-inception-resnet-densenet/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@
from types import SimpleNamespace
from urllib.error import HTTPError

import lightning as L
import matplotlib
import matplotlib.pyplot as plt
import matplotlib_inline.backend_inline
import numpy as np
import pytorch_lightning as pl
import seaborn as sns
import tabulate
import torch
Expand Down Expand Up @@ -46,7 +46,7 @@


# Function for setting the seed
L.seed_everything(42)
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.deterministic = True
Expand Down Expand Up @@ -136,9 +136,9 @@
# We need to do a little trick because the validation set should not use the augmentation.
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
L.seed_everything(42)
pl.seed_everything(42)
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
L.seed_everything(42)
pl.seed_everything(42)
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])

# Loading the test set
Expand Down Expand Up @@ -192,12 +192,12 @@

# %%
# Setting the seed
L.seed_everything(42)
pl.seed_everything(42)

# %% [markdown]
# Thus, in the future, we don't have to define our own `set_seed` function anymore.
#
# In PyTorch Lightning, we define `L.LightningModule`'s (inheriting from `Module`) that organize our code into 5 main sections:
# In PyTorch Lightning, we define `pl.LightningModule`'s (inheriting from `Module`) that organize our code into 5 main sections:
#
# 1. Initialization (`__init__`), where we create all necessary parameters/models
# 2. Optimizers (`configure_optimizers`) where we create the optimizers, learning rate scheduler, etc.
Expand All @@ -214,7 +214,7 @@


# %%
class CIFARModule(L.LightningModule):
class CIFARModule(pl.LightningModule):
def __init__(self, model_name, model_hparams, optimizer_name, optimizer_hparams):
"""CIFARModule.

Expand Down Expand Up @@ -350,7 +350,7 @@ def train_model(model_name, save_name=None, **kwargs):
save_name = model_name

# Create a PyTorch Lightning trainer with the generation callback
trainer = L.Trainer(
trainer = pl.Trainer(
default_root_dir=os.path.join(CHECKPOINT_PATH, save_name), # Where to save models
# We run on a single GPU (if possible)
accelerator="auto",
Expand All @@ -374,7 +374,7 @@ def train_model(model_name, save_name=None, **kwargs):
# Automatically loads the model with the saved hyperparameters
model = CIFARModule.load_from_checkpoint(pretrained_filename)
else:
L.seed_everything(42) # To be reproducible
pl.seed_everything(42) # To be reproducible
model = CIFARModule(model_name=model_name, **kwargs)
trainer.fit(model, train_loader, val_loader)
model = CIFARModule.load_from_checkpoint(
Expand Down
2 changes: 0 additions & 2 deletions course_UvA-DL/04-inception-resnet-densenet/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
torchvision
matplotlib
seaborn
tabulate
lightning>=2.0.0
tensorboard
20 changes: 10 additions & 10 deletions course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@
from functools import partial
from urllib.error import HTTPError

# PyTorch Lightning
import lightning as L

# Plotting
import matplotlib
import matplotlib.pyplot as plt
import matplotlib_inline.backend_inline
import numpy as np

# PyTorch Lightning
import pytorch_lightning as pl
import seaborn as sns

# PyTorch
Expand All @@ -41,7 +41,7 @@

# Torchvision
import torchvision
from lightning.pytorch.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks import ModelCheckpoint
from torchvision import transforms
from torchvision.datasets import CIFAR100
from tqdm.notebook import tqdm
Expand All @@ -58,7 +58,7 @@
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/Transformers/")

# Setting the seed
L.seed_everything(42)
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.deterministic = True
Expand Down Expand Up @@ -246,7 +246,7 @@ def scaled_dot_product(q, k, v, mask=None):

# %%
seq_len, d_k = 3, 2
L.seed_everything(42)
pl.seed_everything(42)
q = torch.randn(seq_len, d_k)
k = torch.randn(seq_len, d_k)
v = torch.randn(seq_len, d_k)
Expand Down Expand Up @@ -748,7 +748,7 @@ def get_lr_factor(self, epoch):


# %%
class TransformerPredictor(L.LightningModule):
class TransformerPredictor(pl.LightningModule):
def __init__(
self,
input_dim,
Expand Down Expand Up @@ -965,7 +965,7 @@ def test_step(self, batch, batch_idx):

# %% [markdown]
# Finally, we can create a training function similar to the one we have seen in Tutorial 5 for PyTorch Lightning.
# We create a `L.Trainer` object, running for $N$ epochs, logging in TensorBoard, and saving our best model based on the validation.
# We create a `pl.Trainer` object, running for $N$ epochs, logging in TensorBoard, and saving our best model based on the validation.
# Afterward, we test our models on the test set.
# An additional parameter we pass to the trainer here is `gradient_clip_val`.
# This clips the norm of the gradients for all parameters before taking an optimizer step and prevents the model
Expand All @@ -983,7 +983,7 @@ def train_reverse(**kwargs):
# Create a PyTorch Lightning trainer with the generation callback
root_dir = os.path.join(CHECKPOINT_PATH, "ReverseTask")
os.makedirs(root_dir, exist_ok=True)
trainer = L.Trainer(
trainer = pl.Trainer(
default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
accelerator="auto",
Expand Down Expand Up @@ -1444,7 +1444,7 @@ def train_anomaly(**kwargs):
# Create a PyTorch Lightning trainer with the generation callback
root_dir = os.path.join(CHECKPOINT_PATH, "SetAnomalyTask")
os.makedirs(root_dir, exist_ok=True)
trainer = L.Trainer(
trainer = pl.Trainer(
default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
accelerator="auto",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,2 @@
torchvision
matplotlib
seaborn
lightning>=2.0.0
22 changes: 11 additions & 11 deletions course_UvA-DL/06-graph-neural-networks/overview.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from urllib.error import HTTPError

# PyTorch Lightning
import lightning as L
import pytorch_lightning as pl

# PyTorch
import torch
Expand All @@ -25,7 +25,7 @@
import torch_geometric.nn as geom_nn

# PL callbacks
from lightning.pytorch.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks import ModelCheckpoint
from torch import Tensor

AVAIL_GPUS = min(1, torch.cuda.device_count())
Expand All @@ -36,7 +36,7 @@
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/GNNs/")

# Setting the seed
L.seed_everything(42)
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.deterministic = True
Expand Down Expand Up @@ -592,7 +592,7 @@ def forward(self, x, *args, **kwargs):


# %%
class NodeLevelGNN(L.LightningModule):
class NodeLevelGNN(pl.LightningModule):
def __init__(self, model_name, **model_kwargs):
super().__init__()
# Saving hyperparameters
Expand Down Expand Up @@ -654,13 +654,13 @@ def test_step(self, batch, batch_idx):

# %%
def train_node_classifier(model_name, dataset, **model_kwargs):
L.seed_everything(42)
pl.seed_everything(42)
node_data_loader = geom_data.DataLoader(dataset, batch_size=1)

# Create a PyTorch Lightning trainer
root_dir = os.path.join(CHECKPOINT_PATH, "NodeLevel" + model_name)
os.makedirs(root_dir, exist_ok=True)
trainer = L.Trainer(
trainer = pl.Trainer(
default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
accelerator="auto",
Expand All @@ -676,7 +676,7 @@ def train_node_classifier(model_name, dataset, **model_kwargs):
print("Found pretrained model, loading...")
model = NodeLevelGNN.load_from_checkpoint(pretrained_filename)
else:
L.seed_everything()
pl.seed_everything()
model = NodeLevelGNN(
model_name=model_name, c_in=dataset.num_node_features, c_out=dataset.num_classes, **model_kwargs
)
Expand Down Expand Up @@ -892,7 +892,7 @@ def forward(self, x, edge_index, batch_idx):


# %%
class GraphLevelGNN(L.LightningModule):
class GraphLevelGNN(pl.LightningModule):
def __init__(self, **model_kwargs):
super().__init__()
# Saving hyperparameters
Expand Down Expand Up @@ -941,12 +941,12 @@ def test_step(self, batch, batch_idx):

# %%
def train_graph_classifier(model_name, **model_kwargs):
L.seed_everything(42)
pl.seed_everything(42)

# Create a PyTorch Lightning trainer with the generation callback
root_dir = os.path.join(CHECKPOINT_PATH, "GraphLevel" + model_name)
os.makedirs(root_dir, exist_ok=True)
trainer = L.Trainer(
trainer = pl.Trainer(
default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
accelerator="cuda",
Expand All @@ -962,7 +962,7 @@ def train_graph_classifier(model_name, **model_kwargs):
print("Found pretrained model, loading...")
model = GraphLevelGNN.load_from_checkpoint(pretrained_filename)
else:
L.seed_everything(42)
pl.seed_everything(42)
model = GraphLevelGNN(
c_in=tu_dataset.num_node_features,
c_out=1 if tu_dataset.num_classes == 2 else tu_dataset.num_classes,
Expand Down
1 change: 0 additions & 1 deletion course_UvA-DL/06-graph-neural-networks/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,3 @@ torch-sparse
torch-cluster
torch-spline-conv
torch-geometric>=2.0.0,<2.5.0
lightning>=2.0.0
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
torchvision
matplotlib
tensorboard
pytorch-lightning>=2.0.0
12 changes: 6 additions & 6 deletions course_UvA-DL/08-deep-autoencoders/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,18 @@
import urllib.request
from urllib.error import HTTPError

import lightning as L
import matplotlib
import matplotlib.pyplot as plt
import matplotlib_inline.backend_inline
import pytorch_lightning as pl
import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import torchvision
from lightning.pytorch.callbacks import Callback, LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.callbacks import Callback, LearningRateMonitor, ModelCheckpoint
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torchvision.datasets import CIFAR10
Expand All @@ -38,7 +38,7 @@
CHECKPOINT_PATH = os.environ.get("PATH_CHECKPOINT", "saved_models/tutorial9")

# Setting the seed
L.seed_everything(42)
pl.seed_everything(42)

# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.deterministic = True
Expand Down Expand Up @@ -94,7 +94,7 @@

# Loading the training dataset. We need to split it into a training and validation part
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=transform, download=True)
L.seed_everything(42)
pl.seed_everything(42)
train_set, val_set = torch.utils.data.random_split(train_dataset, [45000, 5000])

# Loading the test set
Expand Down Expand Up @@ -240,7 +240,7 @@ def forward(self, x):


# %%
class Autoencoder(L.LightningModule):
class Autoencoder(pl.LightningModule):
def __init__(
self,
base_channel_size: int,
Expand Down Expand Up @@ -387,7 +387,7 @@ def on_train_epoch_end(self, trainer, pl_module):
# %%
def train_cifar(latent_dim):
# Create a PyTorch Lightning trainer with the generation callback
trainer = L.Trainer(
trainer = pl.Trainer(
default_root_dir=os.path.join(CHECKPOINT_PATH, "cifar10_%i" % latent_dim),
accelerator="auto",
devices=1,
Expand Down
2 changes: 0 additions & 2 deletions course_UvA-DL/08-deep-autoencoders/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
torchvision
matplotlib
seaborn
lightning>=2.0.0
tensorboard
Loading
Loading