Skip to content

Commit

Permalink
Moved to lightning (#370)
Browse files Browse the repository at this point in the history
  • Loading branch information
vturrisi authored Dec 4, 2023
1 parent d27c713 commit 9f60a1b
Show file tree
Hide file tree
Showing 56 changed files with 93 additions and 117 deletions.
6 changes: 3 additions & 3 deletions docs/source/tutorials/offline_linear_eval.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@ As for pretraining, we start by importing the required packages:
.. code-block:: python
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import LearningRateMonitor
from lightning.pytorch import Trainer
from lightning.pytorch.loggers import WandbLogger
from lightning.pytorch.callbacks import LearningRateMonitor
from torchvision.models import resnet18
from solo.methods.linear import LinearModel # imports the linear eval class
Expand Down
8 changes: 4 additions & 4 deletions docs/source/tutorials/overview.rst
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@ We start by importing everything that we will need (we will be relying on Pytorc
.. code-block:: python
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.plugins import DDPPlugin
from lightning.pytorch import Trainer
from lightning.pytorch.callbacks import LearningRateMonitor
from lightning.pytorch.loggers import WandbLogger
from lightning.pytorch.plugins import DDPPlugin
from solo.methods import BarlowTwins # imports the method class
from solo.utils.checkpointer import Checkpointer
Expand Down
26 changes: 4 additions & 22 deletions main_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,11 @@
import hydra
import torch
import torch.nn as nn
from lightning.pytorch import Trainer
from lightning.pytorch.callbacks import LearningRateMonitor
from lightning.pytorch.loggers import WandbLogger
from lightning.pytorch.strategies.ddp import DDPStrategy
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.strategies.ddp import DDPStrategy
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy

Expand Down Expand Up @@ -169,7 +169,6 @@ def main(cfg: DictConfig):
callbacks = []

if cfg.checkpoint.enabled:
# save checkpoint on last epoch only
ckpt = Checkpointer(
cfg,
logdir=os.path.join(cfg.checkpoint.dir, "linear"),
Expand Down Expand Up @@ -211,23 +210,6 @@ def main(cfg: DictConfig):
)
trainer = Trainer(**trainer_kwargs)

# fix for incompatibility with nvidia-dali and pytorch lightning
# with dali 1.15 (this will be fixed on 1.16)
# https://github.com/Lightning-AI/lightning/issues/12956
try:
from pytorch_lightning.loops import FitLoop

class WorkaroundFitLoop(FitLoop):
@property
def prefetch_batches(self) -> int:
return 1

trainer.fit_loop = WorkaroundFitLoop(
trainer.fit_loop.min_epochs, trainer.fit_loop.max_epochs
)
except:
pass

if cfg.data.format == "dali":
trainer.fit(model, ckpt_path=ckpt_path, datamodule=dali_datamodule)
else:
Expand Down
27 changes: 4 additions & 23 deletions main_pretrain.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,11 @@

import hydra
import torch
from lightning.pytorch import Trainer, seed_everything
from lightning.pytorch.callbacks import LearningRateMonitor
from lightning.pytorch.loggers.wandb import WandbLogger
from lightning.pytorch.strategies.ddp import DDPStrategy
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.strategies.ddp import DDPStrategy

from solo.args.pretrain import parse_cfg
from solo.data.classification_dataloader import prepare_data as prepare_data_classification
from solo.data.pretrain_dataloader import (
Expand Down Expand Up @@ -177,7 +176,6 @@ def main(cfg: DictConfig):
callbacks = []

if cfg.checkpoint.enabled:
# save checkpoint on last epoch only
ckpt = Checkpointer(
cfg,
logdir=os.path.join(cfg.checkpoint.dir, cfg.method),
Expand Down Expand Up @@ -230,23 +228,6 @@ def main(cfg: DictConfig):
)
trainer = Trainer(**trainer_kwargs)

# fix for incompatibility with nvidia-dali and pytorch lightning
# with dali 1.15 (this will be fixed on 1.16)
# https://github.com/Lightning-AI/lightning/issues/12956
try:
from pytorch_lightning.loops import FitLoop

class WorkaroundFitLoop(FitLoop):
@property
def prefetch_batches(self) -> int:
return 1

trainer.fit_loop = WorkaroundFitLoop(
trainer.fit_loop.min_epochs, trainer.fit_loop.max_epochs
)
except:
pass

if cfg.data.format == "dali":
trainer.fit(model, ckpt_path=ckpt_path, datamodule=dali_datamodule)
else:
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
torch>=1.10.0
torchvision>=0.11.1
einops
pytorch-lightning==2.0.2
lightning==2.1.2
torchmetrics>=0.6.0, <0.12.0
tqdm
wandb
Expand Down
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar-multicrop/swav.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,4 +55,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/barlow.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/byol.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/deepclusterv2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/dino.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/mae.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/mocov2plus.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/mocov3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/nnbyol.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/nnclr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/nnsiam.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/ressl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/simclr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/simsiam.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/supcon.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/swav.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/vibcreg.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -74,4 +74,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/vicreg.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/cifar/wmse.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -70,4 +70,4 @@ devices: [0]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/custom/byol.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,4 +60,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100-multicrop/byol.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100-multicrop/simclr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100-multicrop/supcon.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/barlow.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/byol.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/deepclusterv2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,4 +55,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/dino.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/dino_vit.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/mae.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/mocov2plus.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/mocov3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/mocov3_vit.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ devices: [0, 1, 2, 3, 4, 5, 6, 7]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/nnclr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/ressl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/simclr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/simsiam.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/supcon.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/swav.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/vibcreg.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/vicreg.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -81,4 +81,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet-100/wmse.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet/barlow.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,4 @@ devices: [0, 1, 2, 3]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet/byol.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,5 +53,5 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
accumulate_grad_batches: 16
2 changes: 1 addition & 1 deletion scripts/pretrain/imagenet/mae.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ devices: [0, 1]
sync_batchnorm: True
accelerator: "gpu"
strategy: "ddp"
precision: 16
precision: 16-mixed
Loading

0 comments on commit 9f60a1b

Please sign in to comment.