clarena.cul_algorithms.amnesiac_hat_unlearn

The submoduule in cul_algorithms for AmnesiacHAT unlearning algorithm.

  1r"""
  2The submoduule in `cul_algorithms` for AmnesiacHAT unlearning algorithm.
  3"""
  4
  5__all__ = ["AmnesiacHATUnlearn"]
  6
  7import logging
  8
  9import torch
 10
 11from clarena.cl_algorithms import HAT
 12from clarena.cul_algorithms import CULAlgorithm
 13
 14# always get logger for built-in logging in each module
 15pylogger = logging.getLogger(__name__)
 16
 17
 18class AmnesiacHATUnlearn(CULAlgorithm):
 19    r"""The base class of the AmnesiacHAT unlearning algorithm."""
 20
 21    def __init__(self, model: HAT) -> None:
 22        r"""Initialize the unlearning algorithm with the continual learning model.
 23
 24        **Args:**
 25        - **model** (`Independent`): the continual learning model (`CLAlgorithm` object which already contains the backbone and heads). It must be `HAT` algorithm.
 26        """
 27        super().__init__(model=model)
 28
 29    def delete_update(self, unlearning_task_id: str) -> None:
 30        r"""Delete the update of the specified unlearning task.
 31        **Args:**
 32        - **unlearning_task_id** (`str`): the ID of the unlearning task to delete the update.
 33        """
 34        if unlearning_task_id not in self.model.parameters_task_update:
 35            raise ValueError(
 36                f"Unlearning task ID {unlearning_task_id} is not in the model's parameters_task_update."
 37            )
 38        # delete the update of the specified unlearning task
 39        del self.model.parameters_task_update[unlearning_task_id]
 40
 41    def compensate_layer_if_first_task(
 42        self,
 43        layer_name: str,
 44        unlearning_task_id: str,
 45        if_first_task_layer: tuple[bool, str],
 46        next_masked_task_layer: tuple[bool, str],
 47    ) -> None:
 48        r"""Compensate if the first task."""
 49
 50        # compensate the layer if it is the first task layer
 51
 52        for l in range(
 53            len(self.model.backbone.masks[f"{unlearning_task_id}"][layer_name])
 54        ):
 55            # if the layer is the first task layer, then we need to compensate it
 56            if if_first_task_layer[l] and next_masked_task_layer[l] is None:
 57                self.model.parameters_task_update[layer_name] *= (
 58                    self.model.backbone.masks[f"{next_masked_task_layer[l]}"][
 59                        layer_name
 60                    ][l]
 61                    / self.model.adjustment_intensity
 62                )
 63
 64    def unlearn(self) -> None:
 65        r"""Unlearn the requested unlearning tasks in current task `self.task_id`."""
 66
 67        current_state_dict = self.model.backbone.state_dict()
 68
 69        # substract the update from the model
 70        for unlearning_task_id in self.unlearning_task_ids:
 71
 72            # substract the update from the model
 73            for layer_name, _ in current_state_dict.items():
 74
 75                current_state_dict[layer_name] -= self.model.parameters_task_update[
 76                    f"{unlearning_task_id}"
 77                ][layer_name]
 78
 79                self.model.backbone.masks[f"{unlearning_task_id}"][layer_name] = 0
 80
 81                # compensate
 82                compensation = torch.zeros_like(current_state_dict[layer_name])
 83
 84                # decide if
 85                if_first_task_layer = self.model.if_first_task_layer(
 86                    layer_name,
 87                    unlearning_task_id,
 88                    self.model.processed_task_ids,
 89                    self.model.backbone.masks,
 90                )
 91                next_masked_task_layer = self.model.next_masked_task_layer(
 92                    layer_name,
 93                    unlearning_task_id,
 94                    self.model.processed_task_ids,
 95                    self.model.backbone.masks,
 96                )
 97
 98                # delete the update of the unlearning task
 99                self.delete_update(unlearning_task_id)
100
101                # compensate if the first task
102                self.compensate_layer_if_first_task(
103                    layer_name,
104                    unlearning_task_id,
105                    if_first_task_layer,
106                    next_masked_task_layer,
107                )
108
109            # update the model's state dict
110            self.model.construct_parameters_from_updates()
111
112        # for unlearning_task_id in self.unlearning_task_ids:
113        #     self.model.backbones[f"{unlearning_task_id}"].load_state_dict(
114        #         self.model.original_backbone_state_dict
115        #     )
116
117        # substract the update from the model
118        # self.model.state_dict = self.
class AmnesiacHATUnlearn(clarena.cul_algorithms.base.CULAlgorithm):
 19class AmnesiacHATUnlearn(CULAlgorithm):
 20    r"""The base class of the AmnesiacHAT unlearning algorithm."""
 21
 22    def __init__(self, model: HAT) -> None:
 23        r"""Initialize the unlearning algorithm with the continual learning model.
 24
 25        **Args:**
 26        - **model** (`Independent`): the continual learning model (`CLAlgorithm` object which already contains the backbone and heads). It must be `HAT` algorithm.
 27        """
 28        super().__init__(model=model)
 29
 30    def delete_update(self, unlearning_task_id: str) -> None:
 31        r"""Delete the update of the specified unlearning task.
 32        **Args:**
 33        - **unlearning_task_id** (`str`): the ID of the unlearning task to delete the update.
 34        """
 35        if unlearning_task_id not in self.model.parameters_task_update:
 36            raise ValueError(
 37                f"Unlearning task ID {unlearning_task_id} is not in the model's parameters_task_update."
 38            )
 39        # delete the update of the specified unlearning task
 40        del self.model.parameters_task_update[unlearning_task_id]
 41
 42    def compensate_layer_if_first_task(
 43        self,
 44        layer_name: str,
 45        unlearning_task_id: str,
 46        if_first_task_layer: tuple[bool, str],
 47        next_masked_task_layer: tuple[bool, str],
 48    ) -> None:
 49        r"""Compensate if the first task."""
 50
 51        # compensate the layer if it is the first task layer
 52
 53        for l in range(
 54            len(self.model.backbone.masks[f"{unlearning_task_id}"][layer_name])
 55        ):
 56            # if the layer is the first task layer, then we need to compensate it
 57            if if_first_task_layer[l] and next_masked_task_layer[l] is None:
 58                self.model.parameters_task_update[layer_name] *= (
 59                    self.model.backbone.masks[f"{next_masked_task_layer[l]}"][
 60                        layer_name
 61                    ][l]
 62                    / self.model.adjustment_intensity
 63                )
 64
 65    def unlearn(self) -> None:
 66        r"""Unlearn the requested unlearning tasks in current task `self.task_id`."""
 67
 68        current_state_dict = self.model.backbone.state_dict()
 69
 70        # substract the update from the model
 71        for unlearning_task_id in self.unlearning_task_ids:
 72
 73            # substract the update from the model
 74            for layer_name, _ in current_state_dict.items():
 75
 76                current_state_dict[layer_name] -= self.model.parameters_task_update[
 77                    f"{unlearning_task_id}"
 78                ][layer_name]
 79
 80                self.model.backbone.masks[f"{unlearning_task_id}"][layer_name] = 0
 81
 82                # compensate
 83                compensation = torch.zeros_like(current_state_dict[layer_name])
 84
 85                # decide if
 86                if_first_task_layer = self.model.if_first_task_layer(
 87                    layer_name,
 88                    unlearning_task_id,
 89                    self.model.processed_task_ids,
 90                    self.model.backbone.masks,
 91                )
 92                next_masked_task_layer = self.model.next_masked_task_layer(
 93                    layer_name,
 94                    unlearning_task_id,
 95                    self.model.processed_task_ids,
 96                    self.model.backbone.masks,
 97                )
 98
 99                # delete the update of the unlearning task
100                self.delete_update(unlearning_task_id)
101
102                # compensate if the first task
103                self.compensate_layer_if_first_task(
104                    layer_name,
105                    unlearning_task_id,
106                    if_first_task_layer,
107                    next_masked_task_layer,
108                )
109
110            # update the model's state dict
111            self.model.construct_parameters_from_updates()
112
113        # for unlearning_task_id in self.unlearning_task_ids:
114        #     self.model.backbones[f"{unlearning_task_id}"].load_state_dict(
115        #         self.model.original_backbone_state_dict
116        #     )
117
118        # substract the update from the model
119        # self.model.state_dict = self.

The base class of the AmnesiacHAT unlearning algorithm.

AmnesiacHATUnlearn(model: clarena.cl_algorithms.hat.HAT)
22    def __init__(self, model: HAT) -> None:
23        r"""Initialize the unlearning algorithm with the continual learning model.
24
25        **Args:**
26        - **model** (`Independent`): the continual learning model (`CLAlgorithm` object which already contains the backbone and heads). It must be `HAT` algorithm.
27        """
28        super().__init__(model=model)

Initialize the unlearning algorithm with the continual learning model.

Args:

  • model (Independent): the continual learning model (CLAlgorithm object which already contains the backbone and heads). It must be HAT algorithm.
def delete_update(self, unlearning_task_id: str) -> None:
30    def delete_update(self, unlearning_task_id: str) -> None:
31        r"""Delete the update of the specified unlearning task.
32        **Args:**
33        - **unlearning_task_id** (`str`): the ID of the unlearning task to delete the update.
34        """
35        if unlearning_task_id not in self.model.parameters_task_update:
36            raise ValueError(
37                f"Unlearning task ID {unlearning_task_id} is not in the model's parameters_task_update."
38            )
39        # delete the update of the specified unlearning task
40        del self.model.parameters_task_update[unlearning_task_id]

Delete the update of the specified unlearning task. Args:

  • unlearning_task_id (str): the ID of the unlearning task to delete the update.
def compensate_layer_if_first_task( self, layer_name: str, unlearning_task_id: str, if_first_task_layer: tuple[bool, str], next_masked_task_layer: tuple[bool, str]) -> None:
42    def compensate_layer_if_first_task(
43        self,
44        layer_name: str,
45        unlearning_task_id: str,
46        if_first_task_layer: tuple[bool, str],
47        next_masked_task_layer: tuple[bool, str],
48    ) -> None:
49        r"""Compensate if the first task."""
50
51        # compensate the layer if it is the first task layer
52
53        for l in range(
54            len(self.model.backbone.masks[f"{unlearning_task_id}"][layer_name])
55        ):
56            # if the layer is the first task layer, then we need to compensate it
57            if if_first_task_layer[l] and next_masked_task_layer[l] is None:
58                self.model.parameters_task_update[layer_name] *= (
59                    self.model.backbone.masks[f"{next_masked_task_layer[l]}"][
60                        layer_name
61                    ][l]
62                    / self.model.adjustment_intensity
63                )

Compensate if the first task.

def unlearn(self) -> None:
 65    def unlearn(self) -> None:
 66        r"""Unlearn the requested unlearning tasks in current task `self.task_id`."""
 67
 68        current_state_dict = self.model.backbone.state_dict()
 69
 70        # substract the update from the model
 71        for unlearning_task_id in self.unlearning_task_ids:
 72
 73            # substract the update from the model
 74            for layer_name, _ in current_state_dict.items():
 75
 76                current_state_dict[layer_name] -= self.model.parameters_task_update[
 77                    f"{unlearning_task_id}"
 78                ][layer_name]
 79
 80                self.model.backbone.masks[f"{unlearning_task_id}"][layer_name] = 0
 81
 82                # compensate
 83                compensation = torch.zeros_like(current_state_dict[layer_name])
 84
 85                # decide if
 86                if_first_task_layer = self.model.if_first_task_layer(
 87                    layer_name,
 88                    unlearning_task_id,
 89                    self.model.processed_task_ids,
 90                    self.model.backbone.masks,
 91                )
 92                next_masked_task_layer = self.model.next_masked_task_layer(
 93                    layer_name,
 94                    unlearning_task_id,
 95                    self.model.processed_task_ids,
 96                    self.model.backbone.masks,
 97                )
 98
 99                # delete the update of the unlearning task
100                self.delete_update(unlearning_task_id)
101
102                # compensate if the first task
103                self.compensate_layer_if_first_task(
104                    layer_name,
105                    unlearning_task_id,
106                    if_first_task_layer,
107                    next_masked_task_layer,
108                )
109
110            # update the model's state dict
111            self.model.construct_parameters_from_updates()
112
113        # for unlearning_task_id in self.unlearning_task_ids:
114        #     self.model.backbones[f"{unlearning_task_id}"].load_state_dict(
115        #         self.model.original_backbone_state_dict
116        #     )
117
118        # substract the update from the model
119        # self.model.state_dict = self.

Unlearn the requested unlearning tasks in current task self.task_id.