1# Copyright 2019-2021 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15 16import numpy as np 17import pytest 18 19import mindspore.context as context 20import mindspore.nn as nn 21from mindspore import Tensor 22from mindspore.common.api import ms_function 23from mindspore.common.initializer import initializer 24from mindspore.common.parameter import Parameter 25from mindspore.ops import operations as P 26from mindspore.ops.operations import _inner_ops as inner 27 28class AddNet(nn.Cell): 29 def __init__(self, nptype): 30 super(AddNet, self).__init__() 31 32 self.add = P.Add() 33 34 np.random.seed(0) 35 self.x = Parameter(initializer( 36 Tensor(np.random.randn(2, 0).astype(nptype)), [2, 0]), name='x') 37 self.y = Parameter(initializer( 38 Tensor(np.random.randn(2, 1).astype(nptype)), [2, 1]), name='y') 39 40 self.x1 = Parameter(initializer( 41 Tensor(np.arange(3).reshape(3).astype(nptype)), [3]), name='x1') 42 self.y1 = Parameter(initializer( 43 Tensor(np.array([2]).astype(nptype)), [1]), name='y1') 44 45 self.x2 = Parameter(initializer( 46 Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(nptype)), [3, 3, 3, 3]), name='x2') 47 self.y2 = Parameter(initializer( 48 Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(nptype)), [3, 3, 3, 3]), name='y2') 49 50 self.x3 = Parameter(initializer( 51 Tensor(np.arange(1 * 1 * 3 * 3).reshape(1, 1, 3, 3).astype(nptype)), [1, 1, 3, 3]), name='x3') 52 self.y3 = Parameter(initializer( 53 Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(nptype)), [3, 3, 3, 3]), name='y3') 54 55 @ms_function 56 def construct(self): 57 return ( 58 self.add(self.x, self.y), self.add(self.x1, self.y1), self.add(self.x2, self.y2), 59 self.add(self.x3, self.y3)) 60 61 62def add(nptype): 63 context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU') 64 65 add_net = AddNet(nptype) 66 output = add_net() 67 expect0 = np.array([]) 68 expect1 = np.array([2, 3, 4]).astype(nptype) 69 expect2 = np.array( 70 [[[[0., 2., 4.], 71 [6., 8., 10.], 72 [12., 14., 16.]], 73 [[18., 20., 22.], 74 [24., 26., 28.], 75 [30., 32., 34.]], 76 [[36., 38., 40.], 77 [42., 44., 46.], 78 [48., 50., 52.]]], 79 [[[54., 56., 58.], 80 [60., 62., 64.], 81 [66., 68., 70.]], 82 [[72., 74., 76.], 83 [78., 80., 82.], 84 [84., 86., 88.]], 85 [[90., 92., 94.], 86 [96., 98., 100.], 87 [102., 104., 106.]]], 88 [[[108., 110., 112.], 89 [114., 116., 118.], 90 [120., 122., 124.]], 91 [[126., 128., 130.], 92 [132., 134., 136.], 93 [138., 140., 142.]], 94 [[144., 146., 148.], 95 [150., 152., 154.], 96 [156., 158., 160.]]]]).astype(nptype) 97 expect3 = np.array( 98 [[[[0., 2., 4.], 99 [6., 8., 10.], 100 [12., 14., 16.]], 101 [[9., 11., 13.], 102 [15., 17., 19.], 103 [21., 23., 25.]], 104 [[18., 20., 22.], 105 [24., 26., 28.], 106 [30., 32., 34.]]], 107 [[[27., 29., 31.], 108 [33., 35., 37.], 109 [39., 41., 43.]], 110 [[36., 38., 40.], 111 [42., 44., 46.], 112 [48., 50., 52.]], 113 [[45., 47., 49.], 114 [51., 53., 55.], 115 [57., 59., 61.]]], 116 [[[54., 56., 58.], 117 [60., 62., 64.], 118 [66., 68., 70.]], 119 [[63., 65., 67.], 120 [69., 71., 73.], 121 [75., 77., 79.]], 122 [[72., 74., 76.], 123 [78., 80., 82.], 124 [84., 86., 88.]]]]).astype(nptype) 125 assert (output[0].asnumpy() == expect0).all() 126 assert (output[1].asnumpy() == expect1).all() 127 assert (output[2].asnumpy() == expect2).all() 128 assert (output[3].asnumpy() == expect3).all() 129 130 131@pytest.mark.skip(reason='0 in shape is not support') 132@pytest.mark.level0 133@pytest.mark.platform_x86_gpu_training 134@pytest.mark.env_onecard 135def test_add_float64(): 136 add(np.float64) 137 138 139@pytest.mark.skip(reason='0 in shape is not support') 140@pytest.mark.level0 141@pytest.mark.platform_x86_gpu_training 142@pytest.mark.env_onecard 143def test_add_float32(): 144 add(np.float32) 145 146 147@pytest.mark.skip(reason='0 in shape is not support') 148@pytest.mark.level1 149@pytest.mark.platform_x86_gpu_training 150@pytest.mark.env_onecard 151def test_add_float16(): 152 add(np.float16) 153 154 155@pytest.mark.skip(reason='0 in shape is not support') 156@pytest.mark.level1 157@pytest.mark.platform_x86_gpu_training 158@pytest.mark.env_onecard 159def test_add_int64(): 160 add(np.int64) 161 162@pytest.mark.skip(reason='0 in shape is not support') 163@pytest.mark.level1 164@pytest.mark.platform_x86_gpu_training 165@pytest.mark.env_onecard 166def test_add_int32(): 167 add(np.int32) 168 169class Tensoradd_d(nn.Cell): 170 def __init__(self): 171 super(Tensoradd_d, self).__init__() 172 self.test_dynamic = inner.GpuConvertToDynamicShape() 173 self.add = P.Add() 174 175 def construct(self, x, y): 176 x = self.test_dynamic(x) 177 y = self.test_dynamic(y) 178 return self.add(x, y) 179 180 181def add_dynamic(nptype): 182 context.set_context(device_target='GPU', mode=context.GRAPH_MODE) 183 net = Tensoradd_d() 184 185 x1 = Tensor(np.arange(3).reshape(3).astype(nptype)) 186 y1 = Tensor(np.array([2]).astype(nptype)) 187 188 x2 = Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(nptype)) 189 y2 = Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(nptype)) 190 191 expect1 = np.array([2, 3, 4]) 192 expect2 = np.array( 193 [[[[0., 2., 4.], 194 [6., 8., 10.], 195 [12., 14., 16.]], 196 [[18., 20., 22.], 197 [24., 26., 28.], 198 [30., 32., 34.]], 199 [[36., 38., 40.], 200 [42., 44., 46.], 201 [48., 50., 52.]]], 202 [[[54., 56., 58.], 203 [60., 62., 64.], 204 [66., 68., 70.]], 205 [[72., 74., 76.], 206 [78., 80., 82.], 207 [84., 86., 88.]], 208 [[90., 92., 94.], 209 [96., 98., 100.], 210 [102., 104., 106.]]], 211 [[[108., 110., 112.], 212 [114., 116., 118.], 213 [120., 122., 124.]], 214 [[126., 128., 130.], 215 [132., 134., 136.], 216 [138., 140., 142.]], 217 [[144., 146., 148.], 218 [150., 152., 154.], 219 [156., 158., 160.]]]]) 220 221 output1 = net(x1, y1) 222 output2 = net(x2, y2) 223 assert (output1.asnumpy() == expect1).all() 224 assert (output2.asnumpy() == expect2).all() 225 226@pytest.mark.level0 227@pytest.mark.platform_x86_gpu_training 228@pytest.mark.env_onecard 229def test_add_dynamic_float64(): 230 add_dynamic(np.float64) 231 232@pytest.mark.level0 233@pytest.mark.platform_x86_gpu_training 234@pytest.mark.env_onecard 235def test_add_dynamic_float32(): 236 add_dynamic(np.float32) 237 238@pytest.mark.level1 239@pytest.mark.platform_x86_gpu_training 240@pytest.mark.env_onecard 241def test_add_dynamic_float16(): 242 add_dynamic(np.float16) 243 244@pytest.mark.level1 245@pytest.mark.platform_x86_gpu_training 246@pytest.mark.env_onecard 247def test_add_dynamic_int64(): 248 add_dynamic(np.int64) 249 250@pytest.mark.level1 251@pytest.mark.platform_x86_gpu_training 252@pytest.mark.env_onecard 253def test_add_dynamic_int32(): 254 add_dynamic(np.int32) 255