clarena.cl_datasets.permuted_notmnist
The submodule in cl_datasets for Permuted NotMNIST dataset.
1r""" 2The submodule in `cl_datasets` for Permuted NotMNIST dataset. 3""" 4 5__all__ = ["PermutedNotMNIST"] 6 7import logging 8from typing import Callable 9 10import torch 11from torch.utils.data import Dataset, random_split 12from torchvision.transforms import transforms 13 14from clarena.cl_datasets import CLPermutedDataset 15from clarena.stl_datasets.raw import NotMNIST 16 17# always get logger for built-in logging in each module 18pylogger = logging.getLogger(__name__) 19 20 21class PermutedNotMNIST(CLPermutedDataset): 22 r"""Permuted NotMNIST dataset. The [NotMNIST dataset](https://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) is a collection of letters (A-J). Permuted MNIST dataset. This version uses the smaller set, which consists of about 19,000 images of 10 classes, each 28x28 grayscale image.""" 23 24 original_dataset_python_class: type[Dataset] = NotMNIST 25 r"""The original dataset class.""" 26 27 def __init__( 28 self, 29 root: str, 30 num_tasks: int, 31 validation_percentage: float, 32 batch_size: int | dict[int, int] = 1, 33 num_workers: int | dict[int, int] = 0, 34 custom_transforms: ( 35 Callable 36 | transforms.Compose 37 | None 38 | dict[int, Callable | transforms.Compose | None] 39 ) = None, 40 repeat_channels: int | None | dict[int, int | None] = None, 41 to_tensor: bool | dict[int, bool] = True, 42 resize: tuple[int, int] | None | dict[int, tuple[int, int] | None] = None, 43 permutation_mode: str = "first_channel_only", 44 permutation_seeds: dict[int, int] | None = None, 45 ) -> None: 46 r""" 47 **Args:** 48 - **root** (`str`): the root directory where the original NotMNIST data 'NotMNIST/' live. 49 - **num_tasks** (`int`): the maximum number of tasks supported by the CL dataset. This decides the valid task IDs from 1 to `num_tasks`. 50 - **validation_percentage** (`float`): the percentage to randomly split some training data into validation data. 51 - **batch_size** (`int` | `dict[int, int]`): the batch size for train, val, and test dataloaders. 52 If it is a dict, the keys are task IDs and the values are the batch sizes for each task. If it is an `int`, it is the same batch size for all tasks. 53 - **num_workers** (`int` | `dict[int, int]`): the number of workers for dataloaders. 54 If it is a dict, the keys are task IDs and the values are the number of workers for each task. If it is an `int`, it is the same number of workers for all tasks. 55 - **custom_transforms** (`transform` or `transforms.Compose` or `None` or dict of them): the custom transforms to apply ONLY to the TRAIN dataset. Can be a single transform, composed transforms, or no transform. `ToTensor()`, normalization, permute, and so on are not included. 56 If it is a dict, the keys are task IDs and the values are the custom transforms for each task. If it is a single transform or composed transforms, it is applied to all tasks. If it is `None`, no custom transforms are applied. 57 - **repeat_channels** (`int` | `None` | dict of them): the number of channels to repeat for each task. Default is `None`, which means no repeat. 58 If it is a dict, the keys are task IDs and the values are the number of channels to repeat for each task. If it is an `int`, it is the same number of channels to repeat for all tasks. If it is `None`, no repeat is applied. 59 - **to_tensor** (`bool` | `dict[int, bool]`): whether to include the `ToTensor()` transform. Default is `True`. 60 If it is a dict, the keys are task IDs and the values are whether to include the `ToTensor()` transform for each task. If it is a single boolean value, it is applied to all tasks. 61 - **resize** (`tuple[int, int]` | `None` or dict of them): the size to resize the images to. Default is `None`, which means no resize. 62 If it is a dict, the keys are task IDs and the values are the sizes to resize for each task. If it is a single tuple of two integers, it is applied to all tasks. If it is `None`, no resize is applied. 63 - **permutation_mode** (`str`): the mode of permutation; one of: 64 1. 'all': permute all pixels. 65 2. 'by_channel': permute channel by channel separately. All channels are applied the same permutation order. 66 3. 'first_channel_only': permute only the first channel. 67 - **permutation_seeds** (`dict[int, int]` | `None`): the dict of seeds for permutation operations used to construct each task. Keys are task IDs and the values are permutation seeds for each task. Default is `None`, which creates a dict of seeds from 0 to `num_tasks`-1. 68 """ 69 70 super().__init__( 71 root=root, 72 num_tasks=num_tasks, 73 batch_size=batch_size, 74 num_workers=num_workers, 75 custom_transforms=custom_transforms, 76 repeat_channels=repeat_channels, 77 to_tensor=to_tensor, 78 resize=resize, 79 permutation_mode=permutation_mode, 80 permutation_seeds=permutation_seeds, 81 ) 82 83 self.validation_percentage: float = validation_percentage 84 r"""The percentage to randomly split some training data into validation data.""" 85 86 def prepare_data(self) -> None: 87 r"""Download the original NotMNIST dataset if haven't.""" 88 89 if self.task_id != 1: 90 return # download all original datasets only at the beginning of first task 91 92 NotMNIST(root=self.root_t, train=True, download=True) 93 NotMNIST(root=self.root_t, train=False, download=True) 94 95 pylogger.debug( 96 "The original NotMNIST dataset has been downloaded to %s.", self.root_t 97 ) 98 99 def train_and_val_dataset(self) -> tuple[Dataset, Dataset]: 100 """Get the training and validation dataset of task `self.task_id`. 101 102 **Returns:** 103 - **train_and_val_dataset** (`tuple[Dataset, Dataset]`): the train and validation dataset of task `self.task_id`. 104 """ 105 dataset_train_and_val = NotMNIST( 106 root=self.root_t, 107 train=True, 108 transform=self.train_and_val_transforms(), 109 target_transform=self.target_transform(), 110 download=False, 111 ) 112 113 return random_split( 114 dataset_train_and_val, 115 lengths=[1 - self.validation_percentage, self.validation_percentage], 116 generator=torch.Generator().manual_seed( 117 42 118 ), # this must be set fixed to make sure the datasets across experiments are the same. Don't handle it to global seed as it might vary across experiments 119 ) 120 121 def test_dataset(self) -> Dataset: 122 r"""Get the test dataset of task `self.task_id`. 123 124 **Returns:** 125 - **test_dataset** (`Dataset`): the test dataset of task `self.task_id`. 126 """ 127 dataset_test = NotMNIST( 128 root=self.root_t, 129 train=False, 130 transform=self.test_transforms(), 131 target_transform=self.target_transform(), 132 download=False, 133 ) 134 135 return dataset_test
class
PermutedNotMNIST(clarena.cl_datasets.base.CLPermutedDataset):
22class PermutedNotMNIST(CLPermutedDataset): 23 r"""Permuted NotMNIST dataset. The [NotMNIST dataset](https://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) is a collection of letters (A-J). Permuted MNIST dataset. This version uses the smaller set, which consists of about 19,000 images of 10 classes, each 28x28 grayscale image.""" 24 25 original_dataset_python_class: type[Dataset] = NotMNIST 26 r"""The original dataset class.""" 27 28 def __init__( 29 self, 30 root: str, 31 num_tasks: int, 32 validation_percentage: float, 33 batch_size: int | dict[int, int] = 1, 34 num_workers: int | dict[int, int] = 0, 35 custom_transforms: ( 36 Callable 37 | transforms.Compose 38 | None 39 | dict[int, Callable | transforms.Compose | None] 40 ) = None, 41 repeat_channels: int | None | dict[int, int | None] = None, 42 to_tensor: bool | dict[int, bool] = True, 43 resize: tuple[int, int] | None | dict[int, tuple[int, int] | None] = None, 44 permutation_mode: str = "first_channel_only", 45 permutation_seeds: dict[int, int] | None = None, 46 ) -> None: 47 r""" 48 **Args:** 49 - **root** (`str`): the root directory where the original NotMNIST data 'NotMNIST/' live. 50 - **num_tasks** (`int`): the maximum number of tasks supported by the CL dataset. This decides the valid task IDs from 1 to `num_tasks`. 51 - **validation_percentage** (`float`): the percentage to randomly split some training data into validation data. 52 - **batch_size** (`int` | `dict[int, int]`): the batch size for train, val, and test dataloaders. 53 If it is a dict, the keys are task IDs and the values are the batch sizes for each task. If it is an `int`, it is the same batch size for all tasks. 54 - **num_workers** (`int` | `dict[int, int]`): the number of workers for dataloaders. 55 If it is a dict, the keys are task IDs and the values are the number of workers for each task. If it is an `int`, it is the same number of workers for all tasks. 56 - **custom_transforms** (`transform` or `transforms.Compose` or `None` or dict of them): the custom transforms to apply ONLY to the TRAIN dataset. Can be a single transform, composed transforms, or no transform. `ToTensor()`, normalization, permute, and so on are not included. 57 If it is a dict, the keys are task IDs and the values are the custom transforms for each task. If it is a single transform or composed transforms, it is applied to all tasks. If it is `None`, no custom transforms are applied. 58 - **repeat_channels** (`int` | `None` | dict of them): the number of channels to repeat for each task. Default is `None`, which means no repeat. 59 If it is a dict, the keys are task IDs and the values are the number of channels to repeat for each task. If it is an `int`, it is the same number of channels to repeat for all tasks. If it is `None`, no repeat is applied. 60 - **to_tensor** (`bool` | `dict[int, bool]`): whether to include the `ToTensor()` transform. Default is `True`. 61 If it is a dict, the keys are task IDs and the values are whether to include the `ToTensor()` transform for each task. If it is a single boolean value, it is applied to all tasks. 62 - **resize** (`tuple[int, int]` | `None` or dict of them): the size to resize the images to. Default is `None`, which means no resize. 63 If it is a dict, the keys are task IDs and the values are the sizes to resize for each task. If it is a single tuple of two integers, it is applied to all tasks. If it is `None`, no resize is applied. 64 - **permutation_mode** (`str`): the mode of permutation; one of: 65 1. 'all': permute all pixels. 66 2. 'by_channel': permute channel by channel separately. All channels are applied the same permutation order. 67 3. 'first_channel_only': permute only the first channel. 68 - **permutation_seeds** (`dict[int, int]` | `None`): the dict of seeds for permutation operations used to construct each task. Keys are task IDs and the values are permutation seeds for each task. Default is `None`, which creates a dict of seeds from 0 to `num_tasks`-1. 69 """ 70 71 super().__init__( 72 root=root, 73 num_tasks=num_tasks, 74 batch_size=batch_size, 75 num_workers=num_workers, 76 custom_transforms=custom_transforms, 77 repeat_channels=repeat_channels, 78 to_tensor=to_tensor, 79 resize=resize, 80 permutation_mode=permutation_mode, 81 permutation_seeds=permutation_seeds, 82 ) 83 84 self.validation_percentage: float = validation_percentage 85 r"""The percentage to randomly split some training data into validation data.""" 86 87 def prepare_data(self) -> None: 88 r"""Download the original NotMNIST dataset if haven't.""" 89 90 if self.task_id != 1: 91 return # download all original datasets only at the beginning of first task 92 93 NotMNIST(root=self.root_t, train=True, download=True) 94 NotMNIST(root=self.root_t, train=False, download=True) 95 96 pylogger.debug( 97 "The original NotMNIST dataset has been downloaded to %s.", self.root_t 98 ) 99 100 def train_and_val_dataset(self) -> tuple[Dataset, Dataset]: 101 """Get the training and validation dataset of task `self.task_id`. 102 103 **Returns:** 104 - **train_and_val_dataset** (`tuple[Dataset, Dataset]`): the train and validation dataset of task `self.task_id`. 105 """ 106 dataset_train_and_val = NotMNIST( 107 root=self.root_t, 108 train=True, 109 transform=self.train_and_val_transforms(), 110 target_transform=self.target_transform(), 111 download=False, 112 ) 113 114 return random_split( 115 dataset_train_and_val, 116 lengths=[1 - self.validation_percentage, self.validation_percentage], 117 generator=torch.Generator().manual_seed( 118 42 119 ), # this must be set fixed to make sure the datasets across experiments are the same. Don't handle it to global seed as it might vary across experiments 120 ) 121 122 def test_dataset(self) -> Dataset: 123 r"""Get the test dataset of task `self.task_id`. 124 125 **Returns:** 126 - **test_dataset** (`Dataset`): the test dataset of task `self.task_id`. 127 """ 128 dataset_test = NotMNIST( 129 root=self.root_t, 130 train=False, 131 transform=self.test_transforms(), 132 target_transform=self.target_transform(), 133 download=False, 134 ) 135 136 return dataset_test
Permuted NotMNIST dataset. The NotMNIST dataset is a collection of letters (A-J). Permuted MNIST dataset. This version uses the smaller set, which consists of about 19,000 images of 10 classes, each 28x28 grayscale image.
PermutedNotMNIST( root: str, num_tasks: int, validation_percentage: float, batch_size: int | dict[int, int] = 1, num_workers: int | dict[int, int] = 0, custom_transforms: Union[Callable, torchvision.transforms.transforms.Compose, NoneType, dict[int, Union[Callable, torchvision.transforms.transforms.Compose, NoneType]]] = None, repeat_channels: int | None | dict[int, int | None] = None, to_tensor: bool | dict[int, bool] = True, resize: tuple[int, int] | None | dict[int, tuple[int, int] | None] = None, permutation_mode: str = 'first_channel_only', permutation_seeds: dict[int, int] | None = None)
28 def __init__( 29 self, 30 root: str, 31 num_tasks: int, 32 validation_percentage: float, 33 batch_size: int | dict[int, int] = 1, 34 num_workers: int | dict[int, int] = 0, 35 custom_transforms: ( 36 Callable 37 | transforms.Compose 38 | None 39 | dict[int, Callable | transforms.Compose | None] 40 ) = None, 41 repeat_channels: int | None | dict[int, int | None] = None, 42 to_tensor: bool | dict[int, bool] = True, 43 resize: tuple[int, int] | None | dict[int, tuple[int, int] | None] = None, 44 permutation_mode: str = "first_channel_only", 45 permutation_seeds: dict[int, int] | None = None, 46 ) -> None: 47 r""" 48 **Args:** 49 - **root** (`str`): the root directory where the original NotMNIST data 'NotMNIST/' live. 50 - **num_tasks** (`int`): the maximum number of tasks supported by the CL dataset. This decides the valid task IDs from 1 to `num_tasks`. 51 - **validation_percentage** (`float`): the percentage to randomly split some training data into validation data. 52 - **batch_size** (`int` | `dict[int, int]`): the batch size for train, val, and test dataloaders. 53 If it is a dict, the keys are task IDs and the values are the batch sizes for each task. If it is an `int`, it is the same batch size for all tasks. 54 - **num_workers** (`int` | `dict[int, int]`): the number of workers for dataloaders. 55 If it is a dict, the keys are task IDs and the values are the number of workers for each task. If it is an `int`, it is the same number of workers for all tasks. 56 - **custom_transforms** (`transform` or `transforms.Compose` or `None` or dict of them): the custom transforms to apply ONLY to the TRAIN dataset. Can be a single transform, composed transforms, or no transform. `ToTensor()`, normalization, permute, and so on are not included. 57 If it is a dict, the keys are task IDs and the values are the custom transforms for each task. If it is a single transform or composed transforms, it is applied to all tasks. If it is `None`, no custom transforms are applied. 58 - **repeat_channels** (`int` | `None` | dict of them): the number of channels to repeat for each task. Default is `None`, which means no repeat. 59 If it is a dict, the keys are task IDs and the values are the number of channels to repeat for each task. If it is an `int`, it is the same number of channels to repeat for all tasks. If it is `None`, no repeat is applied. 60 - **to_tensor** (`bool` | `dict[int, bool]`): whether to include the `ToTensor()` transform. Default is `True`. 61 If it is a dict, the keys are task IDs and the values are whether to include the `ToTensor()` transform for each task. If it is a single boolean value, it is applied to all tasks. 62 - **resize** (`tuple[int, int]` | `None` or dict of them): the size to resize the images to. Default is `None`, which means no resize. 63 If it is a dict, the keys are task IDs and the values are the sizes to resize for each task. If it is a single tuple of two integers, it is applied to all tasks. If it is `None`, no resize is applied. 64 - **permutation_mode** (`str`): the mode of permutation; one of: 65 1. 'all': permute all pixels. 66 2. 'by_channel': permute channel by channel separately. All channels are applied the same permutation order. 67 3. 'first_channel_only': permute only the first channel. 68 - **permutation_seeds** (`dict[int, int]` | `None`): the dict of seeds for permutation operations used to construct each task. Keys are task IDs and the values are permutation seeds for each task. Default is `None`, which creates a dict of seeds from 0 to `num_tasks`-1. 69 """ 70 71 super().__init__( 72 root=root, 73 num_tasks=num_tasks, 74 batch_size=batch_size, 75 num_workers=num_workers, 76 custom_transforms=custom_transforms, 77 repeat_channels=repeat_channels, 78 to_tensor=to_tensor, 79 resize=resize, 80 permutation_mode=permutation_mode, 81 permutation_seeds=permutation_seeds, 82 ) 83 84 self.validation_percentage: float = validation_percentage 85 r"""The percentage to randomly split some training data into validation data."""
Args:
- root (
str): the root directory where the original NotMNIST data 'NotMNIST/' live. - num_tasks (
int): the maximum number of tasks supported by the CL dataset. This decides the valid task IDs from 1 tonum_tasks. - validation_percentage (
float): the percentage to randomly split some training data into validation data. - batch_size (
int|dict[int, int]): the batch size for train, val, and test dataloaders. If it is a dict, the keys are task IDs and the values are the batch sizes for each task. If it is anint, it is the same batch size for all tasks. - num_workers (
int|dict[int, int]): the number of workers for dataloaders. If it is a dict, the keys are task IDs and the values are the number of workers for each task. If it is anint, it is the same number of workers for all tasks. - custom_transforms (
transformortransforms.ComposeorNoneor dict of them): the custom transforms to apply ONLY to the TRAIN dataset. Can be a single transform, composed transforms, or no transform.ToTensor(), normalization, permute, and so on are not included. If it is a dict, the keys are task IDs and the values are the custom transforms for each task. If it is a single transform or composed transforms, it is applied to all tasks. If it isNone, no custom transforms are applied. - repeat_channels (
int|None| dict of them): the number of channels to repeat for each task. Default isNone, which means no repeat. If it is a dict, the keys are task IDs and the values are the number of channels to repeat for each task. If it is anint, it is the same number of channels to repeat for all tasks. If it isNone, no repeat is applied. - to_tensor (
bool|dict[int, bool]): whether to include theToTensor()transform. Default isTrue. If it is a dict, the keys are task IDs and the values are whether to include theToTensor()transform for each task. If it is a single boolean value, it is applied to all tasks. - resize (
tuple[int, int]|Noneor dict of them): the size to resize the images to. Default isNone, which means no resize. If it is a dict, the keys are task IDs and the values are the sizes to resize for each task. If it is a single tuple of two integers, it is applied to all tasks. If it isNone, no resize is applied. - permutation_mode (
str): the mode of permutation; one of:- 'all': permute all pixels.
- 'by_channel': permute channel by channel separately. All channels are applied the same permutation order.
- 'first_channel_only': permute only the first channel.
- permutation_seeds (
dict[int, int]|None): the dict of seeds for permutation operations used to construct each task. Keys are task IDs and the values are permutation seeds for each task. Default isNone, which creates a dict of seeds from 0 tonum_tasks-1.
original_dataset_python_class: type[torch.utils.data.dataset.Dataset] =
<class 'clarena.stl_datasets.raw.notmnist.NotMNIST'>
The original dataset class.
validation_percentage: float
The percentage to randomly split some training data into validation data.
def
prepare_data(self) -> None:
87 def prepare_data(self) -> None: 88 r"""Download the original NotMNIST dataset if haven't.""" 89 90 if self.task_id != 1: 91 return # download all original datasets only at the beginning of first task 92 93 NotMNIST(root=self.root_t, train=True, download=True) 94 NotMNIST(root=self.root_t, train=False, download=True) 95 96 pylogger.debug( 97 "The original NotMNIST dataset has been downloaded to %s.", self.root_t 98 )
Download the original NotMNIST dataset if haven't.
def
train_and_val_dataset( self) -> tuple[torch.utils.data.dataset.Dataset, torch.utils.data.dataset.Dataset]:
100 def train_and_val_dataset(self) -> tuple[Dataset, Dataset]: 101 """Get the training and validation dataset of task `self.task_id`. 102 103 **Returns:** 104 - **train_and_val_dataset** (`tuple[Dataset, Dataset]`): the train and validation dataset of task `self.task_id`. 105 """ 106 dataset_train_and_val = NotMNIST( 107 root=self.root_t, 108 train=True, 109 transform=self.train_and_val_transforms(), 110 target_transform=self.target_transform(), 111 download=False, 112 ) 113 114 return random_split( 115 dataset_train_and_val, 116 lengths=[1 - self.validation_percentage, self.validation_percentage], 117 generator=torch.Generator().manual_seed( 118 42 119 ), # this must be set fixed to make sure the datasets across experiments are the same. Don't handle it to global seed as it might vary across experiments 120 )
Get the training and validation dataset of task self.task_id.
Returns:
- train_and_val_dataset (
tuple[Dataset, Dataset]): the train and validation dataset of taskself.task_id.
def
test_dataset(self) -> torch.utils.data.dataset.Dataset:
122 def test_dataset(self) -> Dataset: 123 r"""Get the test dataset of task `self.task_id`. 124 125 **Returns:** 126 - **test_dataset** (`Dataset`): the test dataset of task `self.task_id`. 127 """ 128 dataset_test = NotMNIST( 129 root=self.root_t, 130 train=False, 131 transform=self.test_transforms(), 132 target_transform=self.target_transform(), 133 download=False, 134 ) 135 136 return dataset_test
Get the test dataset of task self.task_id.
Returns:
- test_dataset (
Dataset): the test dataset of taskself.task_id.