clarena.stl_datasets.country211

The submodule in stl_datasets for Country211 dataset.

  1r"""
  2The submodule in `stl_datasets` for Country211 dataset.
  3"""
  4
  5__all__ = ["Country211"]
  6
  7import logging
  8from typing import Callable
  9
 10from torch.utils.data import Dataset
 11from torchvision.datasets import Country211 as Country211Raw
 12from torchvision.transforms import transforms
 13
 14from clarena.stl_datasets.base import STLDatasetFromRaw
 15
 16# always get logger for built-in logging in each module
 17pylogger = logging.getLogger(__name__)
 18
 19
 20class Country211(STLDatasetFromRaw):
 21    r"""Country211 dataset. The [Country211 dataset](https://github.com/openai/CLIP/blob/main/data/country211.md) is a collection of geolocation pictures of different countries. It consists of 31,650 training, 10,550 validation, and 21,100 test images of 211 countries (classes), each 256x256 color image."""
 22
 23    original_dataset_python_class: type[Dataset] = Country211Raw
 24    r"""The original dataset class."""
 25
 26    def __init__(
 27        self,
 28        root: str,
 29        batch_size: int = 1,
 30        num_workers: int = 0,
 31        custom_transforms: Callable | transforms.Compose | None = None,
 32        repeat_channels: int | None = None,
 33        to_tensor: bool = True,
 34        resize: tuple[int, int] | None = None,
 35    ) -> None:
 36        r"""
 37        **Args:**
 38        - **root** (`str`): the root directory where the original Country211 data 'Country211/' live.
 39        - **validation_percentage** (`float`): the percentage to randomly split some training data into validation data.
 40        - **batch_size** (`int`): The batch size in train, val, test dataloader.
 41        - **num_workers** (`int`): the number of workers for dataloaders.
 42        - **custom_transforms** (`transform` or `transforms.Compose` or `None`): the custom transforms to apply to ONLY TRAIN dataset. Can be a single transform, composed transforms or no transform. `ToTensor()`, normalize and so on are not included.
 43        - **repeat_channels** (`int` | `None`): the number of channels to repeat. Default is None, which means no repeat. If not None, it should be an integer.
 44        - **to_tensor** (`bool`): whether to include `ToTensor()` transform. Default is True.
 45        - **resize** (`tuple[int, int]` | `None` or list of them): the size to resize the images to. Default is None, which means no resize. If not None, it should be a tuple of two integers.
 46        """
 47        super().__init__(
 48            root=root,
 49            batch_size=batch_size,
 50            num_workers=num_workers,
 51            custom_transforms=custom_transforms,
 52            repeat_channels=repeat_channels,
 53            to_tensor=to_tensor,
 54            resize=resize,
 55        )
 56
 57    def prepare_data(self) -> None:
 58        r"""Download the original Country211 dataset if haven't."""
 59
 60        Country211Raw(root=self.root, split="train", download=True)
 61        Country211Raw(root=self.root, split="valid", download=True)
 62        Country211Raw(root=self.root, split="test", download=True)
 63
 64        pylogger.debug(
 65            "The original Country211 dataset has been downloaded to %s.",
 66            self.root,
 67        )
 68
 69    def train_and_val_dataset(self) -> tuple[Dataset, Dataset]:
 70        """Get the training and validation dataset.
 71
 72        **Returns:**
 73        - **train_and_val_dataset** (`tuple[Dataset, Dataset]`): the train and validation dataset.
 74        """
 75        dataset_train = Country211Raw(
 76            root=self.root,
 77            split="train",
 78            transform=self.train_and_val_transforms(),
 79            target_transform=self.target_transform(),
 80            download=False,
 81        )
 82
 83        dataset_val = Country211Raw(
 84            root=self.root,
 85            split="valid",
 86            transform=self.train_and_val_transforms(),
 87            download=False,
 88        )
 89
 90        return dataset_train, dataset_val
 91
 92    def test_dataset(self) -> Dataset:
 93        r"""Get the test dataset.
 94
 95        **Returns:**
 96        - **test_dataset** (`Dataset`): the test dataset.
 97        """
 98        dataset_test = Country211Raw(
 99            root=self.root,
100            split="test",
101            transform=self.test_transforms(),
102            target_transform=self.target_transform(),
103            download=False,
104        )
105
106        return dataset_test
class Country211(clarena.stl_datasets.base.STLDatasetFromRaw):
 21class Country211(STLDatasetFromRaw):
 22    r"""Country211 dataset. The [Country211 dataset](https://github.com/openai/CLIP/blob/main/data/country211.md) is a collection of geolocation pictures of different countries. It consists of 31,650 training, 10,550 validation, and 21,100 test images of 211 countries (classes), each 256x256 color image."""
 23
 24    original_dataset_python_class: type[Dataset] = Country211Raw
 25    r"""The original dataset class."""
 26
 27    def __init__(
 28        self,
 29        root: str,
 30        batch_size: int = 1,
 31        num_workers: int = 0,
 32        custom_transforms: Callable | transforms.Compose | None = None,
 33        repeat_channels: int | None = None,
 34        to_tensor: bool = True,
 35        resize: tuple[int, int] | None = None,
 36    ) -> None:
 37        r"""
 38        **Args:**
 39        - **root** (`str`): the root directory where the original Country211 data 'Country211/' live.
 40        - **validation_percentage** (`float`): the percentage to randomly split some training data into validation data.
 41        - **batch_size** (`int`): The batch size in train, val, test dataloader.
 42        - **num_workers** (`int`): the number of workers for dataloaders.
 43        - **custom_transforms** (`transform` or `transforms.Compose` or `None`): the custom transforms to apply to ONLY TRAIN dataset. Can be a single transform, composed transforms or no transform. `ToTensor()`, normalize and so on are not included.
 44        - **repeat_channels** (`int` | `None`): the number of channels to repeat. Default is None, which means no repeat. If not None, it should be an integer.
 45        - **to_tensor** (`bool`): whether to include `ToTensor()` transform. Default is True.
 46        - **resize** (`tuple[int, int]` | `None` or list of them): the size to resize the images to. Default is None, which means no resize. If not None, it should be a tuple of two integers.
 47        """
 48        super().__init__(
 49            root=root,
 50            batch_size=batch_size,
 51            num_workers=num_workers,
 52            custom_transforms=custom_transforms,
 53            repeat_channels=repeat_channels,
 54            to_tensor=to_tensor,
 55            resize=resize,
 56        )
 57
 58    def prepare_data(self) -> None:
 59        r"""Download the original Country211 dataset if haven't."""
 60
 61        Country211Raw(root=self.root, split="train", download=True)
 62        Country211Raw(root=self.root, split="valid", download=True)
 63        Country211Raw(root=self.root, split="test", download=True)
 64
 65        pylogger.debug(
 66            "The original Country211 dataset has been downloaded to %s.",
 67            self.root,
 68        )
 69
 70    def train_and_val_dataset(self) -> tuple[Dataset, Dataset]:
 71        """Get the training and validation dataset.
 72
 73        **Returns:**
 74        - **train_and_val_dataset** (`tuple[Dataset, Dataset]`): the train and validation dataset.
 75        """
 76        dataset_train = Country211Raw(
 77            root=self.root,
 78            split="train",
 79            transform=self.train_and_val_transforms(),
 80            target_transform=self.target_transform(),
 81            download=False,
 82        )
 83
 84        dataset_val = Country211Raw(
 85            root=self.root,
 86            split="valid",
 87            transform=self.train_and_val_transforms(),
 88            download=False,
 89        )
 90
 91        return dataset_train, dataset_val
 92
 93    def test_dataset(self) -> Dataset:
 94        r"""Get the test dataset.
 95
 96        **Returns:**
 97        - **test_dataset** (`Dataset`): the test dataset.
 98        """
 99        dataset_test = Country211Raw(
100            root=self.root,
101            split="test",
102            transform=self.test_transforms(),
103            target_transform=self.target_transform(),
104            download=False,
105        )
106
107        return dataset_test

Country211 dataset. The Country211 dataset is a collection of geolocation pictures of different countries. It consists of 31,650 training, 10,550 validation, and 21,100 test images of 211 countries (classes), each 256x256 color image.

Country211( root: str, batch_size: int = 1, num_workers: int = 0, custom_transforms: Union[Callable, torchvision.transforms.transforms.Compose, NoneType] = None, repeat_channels: int | None = None, to_tensor: bool = True, resize: tuple[int, int] | None = None)
27    def __init__(
28        self,
29        root: str,
30        batch_size: int = 1,
31        num_workers: int = 0,
32        custom_transforms: Callable | transforms.Compose | None = None,
33        repeat_channels: int | None = None,
34        to_tensor: bool = True,
35        resize: tuple[int, int] | None = None,
36    ) -> None:
37        r"""
38        **Args:**
39        - **root** (`str`): the root directory where the original Country211 data 'Country211/' live.
40        - **validation_percentage** (`float`): the percentage to randomly split some training data into validation data.
41        - **batch_size** (`int`): The batch size in train, val, test dataloader.
42        - **num_workers** (`int`): the number of workers for dataloaders.
43        - **custom_transforms** (`transform` or `transforms.Compose` or `None`): the custom transforms to apply to ONLY TRAIN dataset. Can be a single transform, composed transforms or no transform. `ToTensor()`, normalize and so on are not included.
44        - **repeat_channels** (`int` | `None`): the number of channels to repeat. Default is None, which means no repeat. If not None, it should be an integer.
45        - **to_tensor** (`bool`): whether to include `ToTensor()` transform. Default is True.
46        - **resize** (`tuple[int, int]` | `None` or list of them): the size to resize the images to. Default is None, which means no resize. If not None, it should be a tuple of two integers.
47        """
48        super().__init__(
49            root=root,
50            batch_size=batch_size,
51            num_workers=num_workers,
52            custom_transforms=custom_transforms,
53            repeat_channels=repeat_channels,
54            to_tensor=to_tensor,
55            resize=resize,
56        )

Args:

  • root (str): the root directory where the original Country211 data 'Country211/' live.
  • validation_percentage (float): the percentage to randomly split some training data into validation data.
  • batch_size (int): The batch size in train, val, test dataloader.
  • num_workers (int): the number of workers for dataloaders.
  • custom_transforms (transform or transforms.Compose or None): the custom transforms to apply to ONLY TRAIN dataset. Can be a single transform, composed transforms or no transform. ToTensor(), normalize and so on are not included.
  • repeat_channels (int | None): the number of channels to repeat. Default is None, which means no repeat. If not None, it should be an integer.
  • to_tensor (bool): whether to include ToTensor() transform. Default is True.
  • resize (tuple[int, int] | None or list of them): the size to resize the images to. Default is None, which means no resize. If not None, it should be a tuple of two integers.
original_dataset_python_class: type[torch.utils.data.dataset.Dataset] = <class 'torchvision.datasets.country211.Country211'>

The original dataset class.

def prepare_data(self) -> None:
58    def prepare_data(self) -> None:
59        r"""Download the original Country211 dataset if haven't."""
60
61        Country211Raw(root=self.root, split="train", download=True)
62        Country211Raw(root=self.root, split="valid", download=True)
63        Country211Raw(root=self.root, split="test", download=True)
64
65        pylogger.debug(
66            "The original Country211 dataset has been downloaded to %s.",
67            self.root,
68        )

Download the original Country211 dataset if haven't.

def train_and_val_dataset( self) -> tuple[torch.utils.data.dataset.Dataset, torch.utils.data.dataset.Dataset]:
70    def train_and_val_dataset(self) -> tuple[Dataset, Dataset]:
71        """Get the training and validation dataset.
72
73        **Returns:**
74        - **train_and_val_dataset** (`tuple[Dataset, Dataset]`): the train and validation dataset.
75        """
76        dataset_train = Country211Raw(
77            root=self.root,
78            split="train",
79            transform=self.train_and_val_transforms(),
80            target_transform=self.target_transform(),
81            download=False,
82        )
83
84        dataset_val = Country211Raw(
85            root=self.root,
86            split="valid",
87            transform=self.train_and_val_transforms(),
88            download=False,
89        )
90
91        return dataset_train, dataset_val

Get the training and validation dataset.

Returns:

  • train_and_val_dataset (tuple[Dataset, Dataset]): the train and validation dataset.
def test_dataset(self) -> torch.utils.data.dataset.Dataset:
 93    def test_dataset(self) -> Dataset:
 94        r"""Get the test dataset.
 95
 96        **Returns:**
 97        - **test_dataset** (`Dataset`): the test dataset.
 98        """
 99        dataset_test = Country211Raw(
100            root=self.root,
101            split="test",
102            transform=self.test_transforms(),
103            target_transform=self.target_transform(),
104            download=False,
105        )
106
107        return dataset_test

Get the test dataset.

Returns:

  • test_dataset (Dataset): the test dataset.