• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""
16This test is used to monitor some features of MindArmour.
17"""
18import numpy as np
19import pytest
20
21import mindspore.nn as nn
22from mindspore import context, Tensor
23from mindspore.nn import Cell, WithLossCell, TrainOneStepCell
24from mindspore.nn.optim.momentum import Momentum
25from mindspore.common.initializer import TruncatedNormal
26from mindspore.ops.composite import GradOperation
27
28
29def weight_variable():
30    """weight initial"""
31    return TruncatedNormal(0.02)
32
33
34def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
35    """weight initial for conv layer"""
36    weight = weight_variable()
37    return nn.Conv2d(in_channels, out_channels,
38                     kernel_size=kernel_size, stride=stride, padding=padding,
39                     weight_init=weight, has_bias=False, pad_mode="valid")
40
41
42def fc_with_initialize(input_channels, out_channels):
43    """weight initial for fc layer"""
44    weight = weight_variable()
45    bias = weight_variable()
46    return nn.Dense(input_channels, out_channels, weight, bias)
47
48
49class LeNet(nn.Cell):
50    """
51    Lenet network
52    Args:
53        num_class (int): Num classes, Default: 10.
54    Returns:
55        Tensor, output tensor
56    Examples:
57        >>> LeNet(num_class=10)
58    """
59
60    def __init__(self, num_class=10):
61        super(LeNet, self).__init__()
62        self.conv1 = conv(1, 6, 5)
63        self.conv2 = conv(6, 16, 5)
64        self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
65        self.fc2 = fc_with_initialize(120, 84)
66        self.fc3 = fc_with_initialize(84, 10)
67        self.relu = nn.ReLU()
68        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
69        self.flatten = nn.Flatten()
70
71    def construct(self, x):
72        x = self.conv1(x)
73        x = self.relu(x)
74        x = self.max_pool2d(x)
75        x = self.conv2(x)
76        x = self.relu(x)
77        x = self.max_pool2d(x)
78        x = self.flatten(x)
79        x = self.fc1(x)
80        x = self.relu(x)
81        x = self.fc2(x)
82        x = self.relu(x)
83        x = self.fc3(x)
84        return x
85
86
87class GradWithSens(Cell):
88    def __init__(self, network):
89        super(GradWithSens, self).__init__()
90        self.grad = GradOperation(get_all=False,
91                                  sens_param=True)
92        self.network = network
93
94    def construct(self, inputs, weight):
95        gout = self.grad(self.network)(inputs, weight)
96        return gout
97
98
99class GradWrapWithLoss(Cell):
100    def __init__(self, network):
101        super(GradWrapWithLoss, self).__init__()
102        self._grad_all = GradOperation(get_all=True,
103                                       sens_param=False)
104        self._network = network
105
106    def construct(self, inputs, labels):
107        gout = self._grad_all(self._network)(inputs, labels)
108        return gout[0]
109
110
111@pytest.mark.level0
112@pytest.mark.platform_arm_ascend_training
113@pytest.mark.platform_x86_ascend_training
114@pytest.mark.env_onecard
115def test_grad_values_and_infer_shape():
116    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
117    inputs_np = np.random.rand(32, 1, 32, 32).astype(np.float32)
118    sens = np.ones((inputs_np.shape[0], 10)).astype(np.float32)
119    inputs_np_2 = np.random.rand(64, 1, 32, 32).astype(np.float32)
120
121    net = LeNet()
122    grad_all = GradWithSens(net)
123
124    grad_out = grad_all(Tensor(inputs_np), Tensor(sens)).asnumpy()
125    out_shape = net(Tensor(inputs_np_2)).asnumpy().shape
126    assert np.any(grad_out != 0), 'grad result can not be all zeros'
127    assert out_shape == (64, 10), 'output shape should be (64, 10)'
128
129
130@pytest.mark.level0
131@pytest.mark.platform_arm_ascend_training
132@pytest.mark.platform_x86_ascend_training
133@pytest.mark.env_onecard
134def test_multi_grads():
135    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
136    sparse = False
137    inputs_np = np.random.rand(32, 1, 32, 32).astype(np.float32)
138    labels_np = np.random.randint(10, size=32).astype(np.int32)
139    inputs_np_2 = np.random.rand(64, 1, 32, 32).astype(np.float32)
140    labels_np_2 = np.random.randint(10, size=64).astype(np.int32)
141    if not sparse:
142        labels_np = np.eye(10)[labels_np].astype(np.float32)
143        labels_np_2 = np.eye(10)[labels_np_2].astype(np.float32)
144
145    net = LeNet()
146
147    # grad operation
148    loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse)
149    with_loss_cell = WithLossCell(net, loss_fn)
150    grad_all = GradWrapWithLoss(with_loss_cell)
151    grad_out = grad_all(Tensor(inputs_np), Tensor(labels_np)).asnumpy()
152    assert np.any(grad_out != 0), 'grad result can not be all zeros'
153
154    # train-one-step operation
155    loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse)
156    optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
157                         0.01, 0.9)
158    loss_net = WithLossCell(net, loss_fn)
159    train_net = TrainOneStepCell(loss_net, optimizer)
160    train_net.set_train()
161    train_net(Tensor(inputs_np_2), Tensor(labels_np_2))
162