• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16import numpy as np
17import pytest
18
19import mindspore.context as context
20import mindspore.nn as nn
21from mindspore import Tensor
22from mindspore.ops import operations as P
23from mindspore.ops.operations import _inner_ops as inner
24
25
26class NetRelu(nn.Cell):
27    def __init__(self):
28        super(NetRelu, self).__init__()
29        self.relu = P.ReLU()
30
31    def construct(self, x):
32        return self.relu(x)
33
34
35class NetReluDynamic(nn.Cell):
36    def __init__(self):
37        super(NetReluDynamic, self).__init__()
38        self.conv = inner.GpuConvertToDynamicShape()
39        self.relu = P.ReLU()
40
41    def construct(self, x):
42        x_conv = self.conv(x)
43        return self.relu(x_conv)
44
45
46@pytest.mark.level0
47@pytest.mark.platform_x86_gpu_training
48@pytest.mark.env_onecard
49def test_relu_float32():
50    x = Tensor(np.array([[[[-1, 1, 10],
51                           [1, -1, 1],
52                           [10, 1, -1]]]]).astype(np.float32))
53    expect = np.array([[[[0, 1, 10,],
54                         [1, 0, 1,],
55                         [10, 1, 0.]]]]).astype(np.float32)
56
57    context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
58    relu = NetRelu()
59    output = relu(x)
60    assert (output.asnumpy() == expect).all()
61
62    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
63    relu = NetRelu()
64    output = relu(x)
65    assert (output.asnumpy() == expect).all()
66
67
68@pytest.mark.level0
69@pytest.mark.platform_x86_gpu_training
70@pytest.mark.env_onecard
71def test_relu_int8():
72    x = Tensor(np.array([[[[-1, 1, 10],
73                           [1, -1, 1],
74                           [10, 1, -1]]]]).astype(np.int8))
75    expect = np.array([[[[0, 1, 10,],
76                         [1, 0, 1,],
77                         [10, 1, 0.]]]]).astype(np.int8)
78
79    context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
80    relu = NetRelu()
81    output = relu(x)
82    assert (output.asnumpy() == expect).all()
83
84    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
85    relu = NetRelu()
86    output = relu(x)
87    assert (output.asnumpy() == expect).all()
88
89
90@pytest.mark.level0
91@pytest.mark.platform_x86_gpu_training
92@pytest.mark.env_onecard
93def test_relu_int32():
94    x = Tensor(np.array([[[[-1, 1, 10],
95                           [1, -1, 1],
96                           [10, 1, -1]]]]).astype(np.int32))
97    expect = np.array([[[[0, 1, 10,],
98                         [1, 0, 1,],
99                         [10, 1, 0.]]]]).astype(np.int32)
100
101    context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
102    relu = NetRelu()
103    output = relu(x)
104    assert (output.asnumpy() == expect).all()
105
106    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
107    relu = NetRelu()
108    output = relu(x)
109    assert (output.asnumpy() == expect).all()
110
111
112@pytest.mark.level0
113@pytest.mark.platform_x86_gpu_training
114@pytest.mark.env_onecard
115def test_relu_int64():
116    x = Tensor(np.array([[[[-1, 1, 10],
117                           [1, -1, 1],
118                           [10, 1, -1]]]]).astype(np.int64))
119    expect = np.array([[[[0, 1, 10,],
120                         [1, 0, 1,],
121                         [10, 1, 0.]]]]).astype(np.int64)
122
123    context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
124    relu = NetRelu()
125    output = relu(x)
126    print(output.asnumpy(), expect)
127    assert (output.asnumpy() == expect).all()
128
129    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
130    relu = NetRelu()
131    output = relu(x)
132    assert (output.asnumpy() == expect).all()
133
134
135@pytest.mark.level0
136@pytest.mark.platform_x86_gpu_training
137@pytest.mark.env_onecard
138def test_relu_int64_dynamic_shape():
139    x = Tensor(np.array([[[[-1, 1, 10],
140                           [1, -1, 1],
141                           [10, 1, -1]]]]).astype(np.int64))
142    expect = np.array([[[[0, 1, 10,],
143                         [1, 0, 1,],
144                         [10, 1, 0.]]]]).astype(np.int64)
145
146    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
147    relu_dynamic = NetReluDynamic()
148    output = relu_dynamic(x)
149    assert (output.asnumpy() == expect).all()
150