• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16import numpy as np
17import pytest
18
19import mindspore.context as context
20import mindspore.nn as nn
21from mindspore import Tensor
22from mindspore.ops import operations as P
23import mindspore.ops.operations._grad_ops as G
24
25
26class ReluNet(nn.Cell):
27    def __init__(self):
28        super(ReluNet, self).__init__()
29        self.relu = P.ReLU()
30        self.relu_grad = G.ReluGrad()
31
32    def construct(self, x, dy):
33        y = self.relu(x)
34        dx = self.relu_grad(dy, y)
35        return y, dx
36
37@pytest.mark.level0
38@pytest.mark.platform_x86_gpu_training
39@pytest.mark.env_onecard
40def test_ReluV2():
41    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
42
43    x = Tensor(np.array([[[[-1, 1, 10],
44                           [1, -1, 1],
45                           [10, 1, -1]]]]).astype(np.float32))
46    dy = Tensor(np.array([[[[1, 0, 3],
47                            [0, 1, 0],
48                            [2, 1, 1]]]]).astype(np.float32))
49    expect_y = np.array([[[[0, 1, 10,],
50                           [1, 0, 1,],
51                           [10, 1, 0.]]]]).astype(np.float32)
52    expect_dx = np.array([[[[0, 0, 3],
53                            [0, 0, 0],
54                            [2, 1, 0]]]]).astype(np.float32)
55    net = ReluNet()
56    y, dx = net(Tensor(x), Tensor(dy))
57
58    assert np.allclose(y.asnumpy(), expect_y)
59    assert np.allclose(dx.asnumpy(), expect_dx)
60
61
62class AddReluNet(nn.Cell):
63    def __init__(self):
64        super(AddReluNet, self).__init__()
65        self.add = P.Add()
66        self.relu = P.ReLU()
67        self.relu_grad = G.ReluGrad()
68
69    def construct(self, x1, x2, dy):
70        y = self.add(x1, x2)
71        y = self.relu(y)
72        dx = self.relu_grad(dy, y)
73        return y, dx
74
75
76@pytest.mark.level0
77@pytest.mark.platform_x86_gpu_training
78@pytest.mark.env_onecard
79def test_AddRelu():
80    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
81
82    x1 = Tensor(np.array([[[[-1, 1, 10],
83                            [1, -1, 1],
84                            [10, 1, -1]]]]).astype(np.float32))
85    x2 = Tensor(np.array([[[[-1, 1, 10],
86                            [1, -1, 1],
87                            [10, 1, -1]]]]).astype(np.float32))
88    dy = Tensor(np.array([[[[1, 0, 3],
89                            [0, 1, 0],
90                            [2, 1, 1]]]]).astype(np.float32))
91    expect_y = np.array([[[[0, 2, 20],
92                           [2, 0, 2],
93                           [20, 2, 0]]]]).astype(np.float32)
94    expect_dx = np.array([[[[0, 0, 3],
95                            [0, 0, 0],
96                            [2, 1, 0]]]]).astype(np.float32)
97    net = AddReluNet()
98    y, dx1 = net(Tensor(x1), Tensor(x2), Tensor(dy))
99
100    assert np.allclose(y.asnumpy(), expect_y)
101    assert np.allclose(dx1.asnumpy(), expect_dx)
102
103class AddReluGradNet(nn.Cell):
104    def __init__(self):
105        super(AddReluGradNet, self).__init__()
106        self.add = P.Add()
107        self.relu = P.ReLU()
108        self.relu_grad = G.ReluGrad()
109
110    def construct(self, x, dy1, dy2):
111        y = self.relu(x)
112        dy = self.add(dy1, dy2)
113        dx = self.relu_grad(dy, y)
114        return y, dx
115
116
117@pytest.mark.level0
118@pytest.mark.platform_x86_gpu_training
119@pytest.mark.env_onecard
120def test_AddReluGrad():
121    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
122
123    x = Tensor(np.array([[[[-1, 1, 10],
124                           [1, -1, 1],
125                           [10, 1, -1]]]]).astype(np.float32))
126    dy1 = Tensor(np.array([[[[1, 0, 3],
127                             [0, 1, 0],
128                             [2, 1, 1]]]]).astype(np.float32))
129    dy2 = Tensor(np.array([[[[1, 0, 3],
130                             [0, 1, 0],
131                             [2, 1, 1]]]]).astype(np.float32))
132    expect_y = np.array([[[[0, 1, 10,],
133                           [1, 0, 1,],
134                           [10, 1, 0.]]]]).astype(np.float32)
135    expect_dx = np.array([[[[0, 0, 6],
136                            [0, 0, 0],
137                            [4, 2, 0]]]]).astype(np.float32)
138    net = AddReluGradNet()
139    y, dx1 = net(Tensor(x), Tensor(dy1), Tensor(dy2))
140
141    assert np.allclose(y.asnumpy(), expect_y)
142    assert np.allclose(dx1.asnumpy(), expect_dx)
143