• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""Explainer with modified ReLU."""
16
17import mindspore.nn as nn
18import mindspore.ops.operations as op
19from mindspore.explainer._utils import (
20    deprecated_error,
21    unify_inputs,
22    unify_targets,
23)
24
25from .backprop_utils import GradNet, get_bp_weights
26from .gradient import Gradient
27
28
29class ModifiedReLU(Gradient):
30    """Basic class for modified ReLU explanation."""
31
32    def __init__(self, network, use_relu_backprop=False):
33        super(ModifiedReLU, self).__init__(network)
34        self.use_relu_backprop = use_relu_backprop
35        self._hook_relu_backward()
36        self._grad_net = GradNet(self._backward_model)
37
38    def __call__(self, inputs, targets):
39        """
40        Call function for `ModifiedReLU`, inherited by "Deconvolution" and "GuidedBackprop".
41
42        Args:
43            inputs (Tensor): The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
44            targets (Tensor, int): The label of interest. It should be a 1D or 0D tensor, or an integer.
45                If it is a 1D tensor, its length should be the same as `inputs`.
46
47        Returns:
48            Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`, saliency maps.
49
50        Raises:
51            TypeError: Be raised for any argument type problem.
52            ValueError: Be raised for any argument value problem.
53
54        Supported Platforms:
55            ``Ascend`` ``GPU``
56        """
57
58        self._verify_data(inputs, targets)
59        inputs = unify_inputs(inputs)
60        targets = unify_targets(targets)
61
62        weights = get_bp_weights(self._backward_model, inputs, targets)
63        gradients = self._grad_net(*inputs, weights)
64        saliency = self._aggregation_fn(gradients)
65
66        return saliency
67
68    def _hook_relu_backward(self):
69        """Set backward hook for ReLU layers."""
70        for _, cell in self._backward_model.cells_and_names():
71            if isinstance(cell, nn.ReLU):
72                cell.register_backward_hook(self._backward_hook)
73
74    def _backward_hook(self, _, grad_inputs, grad_outputs):
75        """Hook function for ReLU layers."""
76        inputs = grad_inputs if self.use_relu_backprop else grad_outputs
77        relu = op.ReLU()
78        if isinstance(inputs, tuple):
79            return relu(*inputs)
80        return relu(inputs)
81
82
83@deprecated_error
84class Deconvolution(ModifiedReLU):
85    """
86    Deconvolution explanation.
87
88    Deconvolution method is a modified version of Gradient method. For the original ReLU operation in the network to be
89    explained, Deconvolution modifies the propagation rule from directly backpropagating gradients to backprpagating
90    positive gradients.
91
92    Note:
93        The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
94        If you want to train the `network` afterwards, please reset it back to training mode through the opposite
95        operations. To use `Deconvolution`, the `ReLU` operations in the network must be implemented with
96        `mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be
97        correct.
98
99    Args:
100        network (Cell): The black-box model to be explained.
101
102    Inputs:
103        - **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
104        - **targets** (Tensor, int) - The label of interest. It should be a 1D or 0D tensor, or an integer.
105          If it is a 1D tensor, its length should be the same as `inputs`.
106
107    Outputs:
108        Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`, saliency maps.
109
110    Raises:
111        TypeError: Be raised for any argument or input type problem.
112        ValueError: Be raised for any input value problem.
113
114    Supported Platforms:
115        ``Ascend`` ``GPU``
116
117    Examples:
118        >>> import numpy as np
119        >>> import mindspore as ms
120        >>> from mindspore.explainer.explanation import Deconvolution
121        >>> from mindspore import context
122        >>>
123        >>> context.set_context(mode=context.PYNATIVE_MODE)
124        >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py
125        >>> net = LeNet5(10, num_channel=3)
126        >>> deconvolution = Deconvolution(net)
127        >>> # parse data and the target label to be explained and get the saliency map
128        >>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32)
129        >>> label = 5
130        >>> saliency = deconvolution(inputs, label)
131        >>> print(saliency.shape)
132        (1, 1, 32, 32)
133    """
134
135    def __init__(self, network):
136        super(Deconvolution, self).__init__(network, use_relu_backprop=True)
137
138
139@deprecated_error
140class GuidedBackprop(ModifiedReLU):
141    """
142    Guided-Backpropagation explanation.
143
144    Guided-Backpropagation method is an extension of Gradient method. On top of the original ReLU operation in the
145    network to be explained, Guided-Backpropagation introduces another ReLU operation to filter out the negative
146    gradients during backpropagation.
147
148    Note:
149        The parsed `network` will be set to eval mode through `network.set_grad(False)` and `network.set_train(False)`.
150        If you want to train the `network` afterwards, please reset it back to training mode through the opposite
151        operations. To use `GuidedBackprop`, the `ReLU` operations in the network must be implemented with
152        `mindspore.nn.Cell` object rather than `mindspore.ops.Operations.ReLU`. Otherwise, the results will not be
153        correct.
154
155    Args:
156        network (Cell): The black-box model to be explained.
157
158    Inputs:
159        - **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
160        - **targets** (Tensor, int) - The label of interest. It should be a 1D or 0D tensor, or an integer.
161          If it is a 1D tensor, its length should be the same as `inputs`.
162
163    Outputs:
164        Tensor, a 4D tensor of shape :math:`(N, 1, H, W)`, saliency maps.
165
166    Raises:
167        TypeError: Be raised for any argument or input type problem.
168        ValueError: Be raised for any input value problem.
169
170    Supported Platforms:
171        ``Ascend`` ``GPU``
172
173    Examples:
174        >>> import numpy as np
175        >>> import mindspore as ms
176        >>> from mindspore.explainer.explanation import GuidedBackprop
177        >>> from mindspore import context
178        >>>
179        >>> context.set_context(mode=context.PYNATIVE_MODE)
180        >>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py
181        >>> net = LeNet5(10, num_channel=3)
182        >>> gbp = GuidedBackprop(net)
183        >>> # feed data and the target label to be explained and get the saliency map
184        >>> inputs = ms.Tensor(np.random.rand(1, 3, 32, 32), ms.float32)
185        >>> label = 5
186        >>> saliency = gbp(inputs, label)
187        >>> print(saliency.shape)
188        (1, 1, 32, 32)
189    """
190
191    def __init__(self, network):
192        super(GuidedBackprop, self).__init__(network, use_relu_backprop=False)
193