clarena.cl_algorithms.random

The submodule in cl_algorithms for Random algorithm.

 1r"""
 2The submodule in `cl_algorithms` for Random algorithm.
 3"""
 4
 5__all__ = ["Random"]
 6
 7import logging
 8from typing import Any
 9
10import torch
11from torch import Tensor
12
13from clarena.backbones import CLBackbone
14from clarena.cl_algorithms import Finetuning
15from clarena.heads import HeadDIL, HeadsCIL, HeadsTIL
16
17# always get logger for built-in logging in each module
18pylogger = logging.getLogger(__name__)
19
20
21class Random(Finetuning):
22    r"""Random stratified model.
23
24    Pass the training step and simply use the randomly initialized model to predict the test data. This serves as a reference model to compute forgetting rate. See chapter 4 in [HAT (Hard Attention to the Task) paper](http://proceedings.mlr.press/v80/serra18a).
25
26
27    We implement Random as a subclass of Finetuning algorithm, as Random has the same `forward()`, `validation_step()` and `test_step()` method as `Finetuning` class.
28    """
29
30    def __init__(
31        self,
32        backbone: CLBackbone,
33        heads: HeadsTIL | HeadsCIL | HeadDIL,
34        non_algorithmic_hparams: dict[str, Any] = {},
35        **kwargs,
36    ) -> None:
37        r"""Initialize the Random algorithm with the network. It has no additional hyperparameters.
38
39        **Args:**
40        - **backbone** (`CLBackbone`): backbone network.
41        - **heads** (`HeadsTIL` | `HeadsCIL` | `HeadDIL`): output heads.
42        - **non_algorithmic_hparams** (`dict[str, Any]`): non-algorithmic hyperparameters that are not related to the algorithm itself are passed to this `LightningModule` object from the config, such as optimizer and learning rate scheduler configurations. They are saved for Lightning APIs from `save_hyperparameters()` method. This is useful for the experiment configuration and reproducibility.
43        - **kwargs**: Reserved for multiple inheritance.
44
45        """
46        super().__init__(
47            backbone=backbone,
48            heads=heads,
49            non_algorithmic_hparams=non_algorithmic_hparams,
50            **kwargs,
51        )
52
53        # set manual optimization
54        self.automatic_optimization = False
55
56        # ensure we only freeze/switch to eval once
57        self._frozen_applied: bool = False
58
59    def training_step(self, batch: Any) -> dict[str, Tensor]:
60        """Pass the training step for current task `self.task_id`.
61
62        **Args:**
63        - **batch** (`Any`): a batch of training data.
64
65        **Returns:**
66        - **outputs** (`dict[str, Tensor]`): a dictionary contains loss and other metrics from this training step. Keys (`str`) are the metrics names, and values (`Tensor`) are the metrics. Must include the key 'loss' which is total loss in the case of automatic optimization, according to PyTorch Lightning docs.
67        """
68        x, y = batch
69
70        # freeze all parameters and stop BN/Dropout updates once
71        if not self._frozen_applied:
72            for p in self.parameters():
73                p.requires_grad = False
74            self.eval()
75            self._frozen_applied = True
76            pylogger.info(
77                "Random: parameters frozen and module set to eval; no training will occur."
78            )
79
80        # run forward and metrics without autograd
81        with torch.inference_mode():
82            logits, activations = self.forward(x, stage="train", task_id=self.task_id)
83            loss_cls = self.criterion(logits, y)
84            loss = loss_cls
85            preds = logits.argmax(dim=1)
86            acc = (preds == y).float().mean()
87
88        # note: no optimizer step, by design of Random algorithm.
89
90        return {
91            "preds": preds,
92            "loss": loss,  # return loss is essential for training step, or backpropagation will fail
93            "loss_cls": loss_cls,
94            "acc": acc,
95            "activations": activations,
96        }
class Random(clarena.cl_algorithms.finetuning.Finetuning):
22class Random(Finetuning):
23    r"""Random stratified model.
24
25    Pass the training step and simply use the randomly initialized model to predict the test data. This serves as a reference model to compute forgetting rate. See chapter 4 in [HAT (Hard Attention to the Task) paper](http://proceedings.mlr.press/v80/serra18a).
26
27
28    We implement Random as a subclass of Finetuning algorithm, as Random has the same `forward()`, `validation_step()` and `test_step()` method as `Finetuning` class.
29    """
30
31    def __init__(
32        self,
33        backbone: CLBackbone,
34        heads: HeadsTIL | HeadsCIL | HeadDIL,
35        non_algorithmic_hparams: dict[str, Any] = {},
36        **kwargs,
37    ) -> None:
38        r"""Initialize the Random algorithm with the network. It has no additional hyperparameters.
39
40        **Args:**
41        - **backbone** (`CLBackbone`): backbone network.
42        - **heads** (`HeadsTIL` | `HeadsCIL` | `HeadDIL`): output heads.
43        - **non_algorithmic_hparams** (`dict[str, Any]`): non-algorithmic hyperparameters that are not related to the algorithm itself are passed to this `LightningModule` object from the config, such as optimizer and learning rate scheduler configurations. They are saved for Lightning APIs from `save_hyperparameters()` method. This is useful for the experiment configuration and reproducibility.
44        - **kwargs**: Reserved for multiple inheritance.
45
46        """
47        super().__init__(
48            backbone=backbone,
49            heads=heads,
50            non_algorithmic_hparams=non_algorithmic_hparams,
51            **kwargs,
52        )
53
54        # set manual optimization
55        self.automatic_optimization = False
56
57        # ensure we only freeze/switch to eval once
58        self._frozen_applied: bool = False
59
60    def training_step(self, batch: Any) -> dict[str, Tensor]:
61        """Pass the training step for current task `self.task_id`.
62
63        **Args:**
64        - **batch** (`Any`): a batch of training data.
65
66        **Returns:**
67        - **outputs** (`dict[str, Tensor]`): a dictionary contains loss and other metrics from this training step. Keys (`str`) are the metrics names, and values (`Tensor`) are the metrics. Must include the key 'loss' which is total loss in the case of automatic optimization, according to PyTorch Lightning docs.
68        """
69        x, y = batch
70
71        # freeze all parameters and stop BN/Dropout updates once
72        if not self._frozen_applied:
73            for p in self.parameters():
74                p.requires_grad = False
75            self.eval()
76            self._frozen_applied = True
77            pylogger.info(
78                "Random: parameters frozen and module set to eval; no training will occur."
79            )
80
81        # run forward and metrics without autograd
82        with torch.inference_mode():
83            logits, activations = self.forward(x, stage="train", task_id=self.task_id)
84            loss_cls = self.criterion(logits, y)
85            loss = loss_cls
86            preds = logits.argmax(dim=1)
87            acc = (preds == y).float().mean()
88
89        # note: no optimizer step, by design of Random algorithm.
90
91        return {
92            "preds": preds,
93            "loss": loss,  # return loss is essential for training step, or backpropagation will fail
94            "loss_cls": loss_cls,
95            "acc": acc,
96            "activations": activations,
97        }

Random stratified model.

Pass the training step and simply use the randomly initialized model to predict the test data. This serves as a reference model to compute forgetting rate. See chapter 4 in HAT (Hard Attention to the Task) paper.

We implement Random as a subclass of Finetuning algorithm, as Random has the same forward(), validation_step() and test_step() method as Finetuning class.

Random( backbone: clarena.backbones.CLBackbone, heads: clarena.heads.HeadsTIL | clarena.heads.HeadsCIL | clarena.heads.HeadDIL, non_algorithmic_hparams: dict[str, typing.Any] = {}, **kwargs)
31    def __init__(
32        self,
33        backbone: CLBackbone,
34        heads: HeadsTIL | HeadsCIL | HeadDIL,
35        non_algorithmic_hparams: dict[str, Any] = {},
36        **kwargs,
37    ) -> None:
38        r"""Initialize the Random algorithm with the network. It has no additional hyperparameters.
39
40        **Args:**
41        - **backbone** (`CLBackbone`): backbone network.
42        - **heads** (`HeadsTIL` | `HeadsCIL` | `HeadDIL`): output heads.
43        - **non_algorithmic_hparams** (`dict[str, Any]`): non-algorithmic hyperparameters that are not related to the algorithm itself are passed to this `LightningModule` object from the config, such as optimizer and learning rate scheduler configurations. They are saved for Lightning APIs from `save_hyperparameters()` method. This is useful for the experiment configuration and reproducibility.
44        - **kwargs**: Reserved for multiple inheritance.
45
46        """
47        super().__init__(
48            backbone=backbone,
49            heads=heads,
50            non_algorithmic_hparams=non_algorithmic_hparams,
51            **kwargs,
52        )
53
54        # set manual optimization
55        self.automatic_optimization = False
56
57        # ensure we only freeze/switch to eval once
58        self._frozen_applied: bool = False

Initialize the Random algorithm with the network. It has no additional hyperparameters.

Args:

  • backbone (CLBackbone): backbone network.
  • heads (HeadsTIL | HeadsCIL | HeadDIL): output heads.
  • non_algorithmic_hparams (dict[str, Any]): non-algorithmic hyperparameters that are not related to the algorithm itself are passed to this LightningModule object from the config, such as optimizer and learning rate scheduler configurations. They are saved for Lightning APIs from save_hyperparameters() method. This is useful for the experiment configuration and reproducibility.
  • kwargs: Reserved for multiple inheritance.
automatic_optimization: bool
290    @property
291    def automatic_optimization(self) -> bool:
292        """If set to ``False`` you are responsible for calling ``.backward()``, ``.step()``, ``.zero_grad()``."""
293        return self._automatic_optimization

If set to False you are responsible for calling .backward(), .step(), .zero_grad().

def training_step(self, batch: Any) -> dict[str, torch.Tensor]:
60    def training_step(self, batch: Any) -> dict[str, Tensor]:
61        """Pass the training step for current task `self.task_id`.
62
63        **Args:**
64        - **batch** (`Any`): a batch of training data.
65
66        **Returns:**
67        - **outputs** (`dict[str, Tensor]`): a dictionary contains loss and other metrics from this training step. Keys (`str`) are the metrics names, and values (`Tensor`) are the metrics. Must include the key 'loss' which is total loss in the case of automatic optimization, according to PyTorch Lightning docs.
68        """
69        x, y = batch
70
71        # freeze all parameters and stop BN/Dropout updates once
72        if not self._frozen_applied:
73            for p in self.parameters():
74                p.requires_grad = False
75            self.eval()
76            self._frozen_applied = True
77            pylogger.info(
78                "Random: parameters frozen and module set to eval; no training will occur."
79            )
80
81        # run forward and metrics without autograd
82        with torch.inference_mode():
83            logits, activations = self.forward(x, stage="train", task_id=self.task_id)
84            loss_cls = self.criterion(logits, y)
85            loss = loss_cls
86            preds = logits.argmax(dim=1)
87            acc = (preds == y).float().mean()
88
89        # note: no optimizer step, by design of Random algorithm.
90
91        return {
92            "preds": preds,
93            "loss": loss,  # return loss is essential for training step, or backpropagation will fail
94            "loss_cls": loss_cls,
95            "acc": acc,
96            "activations": activations,
97        }

Pass the training step for current task self.task_id.

Args:

  • batch (Any): a batch of training data.

Returns:

  • outputs (dict[str, Tensor]): a dictionary contains loss and other metrics from this training step. Keys (str) are the metrics names, and values (Tensor) are the metrics. Must include the key 'loss' which is total loss in the case of automatic optimization, according to PyTorch Lightning docs.