Skip to content

Commit

Permalink
added the required layer
Browse files Browse the repository at this point in the history
  • Loading branch information
sushmanthreddy committed Mar 26, 2024
1 parent f2b93e5 commit 295b629
Showing 1 changed file with 0 additions and 169 deletions.
169 changes: 0 additions & 169 deletions torch_dreams/maco/features_visualizations/objectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,176 +6,7 @@
from .losses import dot_cossim


# class Objective:
# """
# Use to combine several sub-objectives into one.

# Each sub-objective act on a layer, possibly on a neuron or a channel (in
# that case we apply a mask on the layer values), or even multiple neurons (in
# that case we have multiples masks). When two sub-objectives are added, we
# optimize all their combinations.

# e.g Objective 1 target the neurons 1 to 10 of the logits l1,...,l10
# Objective 2 target a direction on the first layer d1
# Objective 3 target each of the 5 channels on another layer c1,...,c5

# The resulting Objective will have 10*5*1 combinations. The first input
# will optimize l1+d1+c1 and the last one l10+d1+c5.

# Parameters
# ----------
# model
# Model used for optimization.
# layers
# A list of the layers output for each sub-objectives.
# masks
# A list of masks that will be applied on the targeted layer for each
# sub-objectives.
# funcs
# A list of loss functions for each sub-objectives.
# multipliers
# A list of multiplication factor for each sub-objectives
# names
# A list of name for each sub-objectives


# """

# def __init__(self, model: torch.nn.Module, layers: List[torch.Tensor],
# masks: List[Optional[torch.Tensor]],
# funcs: List[Callable[[torch.Tensor], torch.Tensor]],
# multipliers: List[float], names: List[str]) -> None:
# self.model = model
# self.layers = layers
# self.masks = masks
# self.funcs = funcs
# self.multipliers = multipliers
# self.names = names



# def __add__(self, term):
# if not isinstance(term, Objective):
# raise ValueError(f"{term} is not an objective.")
# return Objective(
# self.model,
# layers=self.layers + term.layers,
# masks=self.masks + term.masks,
# funcs=self.funcs + term.funcs,
# multipliers=self.multipliers + term.multipliers,
# names=self.names + term.names
# )


# def __sub__(self, term):
# if not isinstance(term, Objective):
# raise ValueError(f"{term} is not an objective.")
# term.multipliers = [-1.0 * m for m in term.multipliers]
# return self + term


# def __mul__(self, factor: float):
# if not isinstance(factor, (int, float)):
# raise ValueError(f"{factor} is not a number.")
# self.multipliers = [m * factor for m in self.multipliers]
# return self


# def __rmul__(self, factor: float):
# return self * factor



# def compile(self) -> Tuple[torch.nn.Module, Callable, List[str], Tuple]:
# """
# Compile all the sub-objectives into one and return the objects
# for the optimisation process.

# Returns
# -------
# model_reconfigured
# Model with the outputs needed for the optimization.
# objective_function
# Function to call that compute the loss for the objectives.
# names
# Names of each objectives.
# input_shape
# Shape of the input, one sample for each optimization.
# """

# nb_sub_objectives = len(self.multipliers)


# masks = np.array([np.array(m, dtype=object) for m in itertools.product(*self.masks)])
# masks = [torch.stack(list(masks[:, i])) for i in
# range(nb_sub_objectives)]


# names = np.array([' & '.join(names) for names in
# itertools.product(*self.names)])

# multipliers = torch.tensor(self.multipliers)

# def objective_function(model_outputs):
# loss = 0.0
# for output_index in range(0, nb_sub_objectives):
# outputs = model_outputs[output_index]
# loss += self.funcs[output_index](
# outputs, torch.tensor(masks[output_index], dtype=outputs.dtype))
# loss *= multipliers[output_index]
# return loss


# model_reconfigured = self.model

# nb_combinations = masks[0].shape[0]
# input_shape = (nb_combinations, *model_reconfigured.input.shape[1:])

# return model_reconfigured, objective_function, names, input_shape



# @staticmethod
# def layer(model: torch.nn.Module,
# layer: Union[str, int],
# reducer: str = "magnitude",
# multiplier: float = 1.0,
# name: Optional[str] = None):
# """
# Util to build an objective to maximise a layer.

# Parameters
# ----------
# model
# Model used for optimization.
# layer
# Index or name of the targeted layer.
# reducer
# Type of reduction to apply, 'mean' will optimize the mean value of the
# layer, 'magnitude' will optimize the mean of the absolute values.
# multiplier
# Multiplication factor of the objective.
# name
# A name for the objective.

# Returns
# -------
# objective
# An objective ready to be compiled
# """
# layer = get_module_by_name(model, layer)
# layer_shape = layer.shape
# mask = np.ones((1, *layer_shape[1:]))

# if name is None:
# name = [f"Layer#{layer.name}"]

# power = 2.0 if reducer == "magnitude" else 1.0

# def optim_func(model_output, mask):
# return torch.mean((model_output * mask) ** power)

# return Objective(model, [layer], [mask], [optim_func], [multiplier], [name])

import torch
import torch.nn as nn
Expand Down

0 comments on commit 295b629

Please sign in to comment.