1# Copyright 2020 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15""" test array ops """ 16import functools 17import numpy as np 18import pytest 19from mindspore.ops.signature import sig_rw, sig_dtype, make_sig 20 21import mindspore as ms 22from mindspore import Tensor 23from mindspore.common import dtype as mstype 24from mindspore.nn import Cell 25from mindspore.ops import operations as P 26from mindspore.ops.operations import _inner_ops as inner 27from mindspore.ops import prim_attr_register 28from mindspore.ops.primitive import PrimitiveWithInfer 29import mindspore.context as context 30from ..ut_filter import non_graph_engine 31from ....mindspore_test_framework.mindspore_test import mindspore_test 32from ....mindspore_test_framework.pipeline.forward.compile_forward \ 33 import pipeline_for_compile_forward_ge_graph_for_case_by_case_config 34from ....mindspore_test_framework.pipeline.forward.verify_exception \ 35 import pipeline_for_verify_exception_for_case_by_case_config 36 37context.set_context(mode=context.PYNATIVE_MODE) 38 39 40def test_expand_dims(): 41 input_tensor = Tensor(np.array([[2, 2], [2, 2]])) 42 expand_dims = P.ExpandDims() 43 output = expand_dims(input_tensor, 0) 44 assert output.asnumpy().shape == (1, 2, 2) 45 46 47def test_cast(): 48 input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) 49 input_x = Tensor(input_np) 50 td = ms.int32 51 cast = P.Cast() 52 result = cast(input_x, td) 53 expect = input_np.astype(np.int32) 54 assert np.all(result.asnumpy() == expect) 55 56 57def test_ones(): 58 ones = P.Ones() 59 output = ones((2, 3), mstype.int32) 60 assert output.asnumpy().shape == (2, 3) 61 assert np.sum(output.asnumpy()) == 6 62 63 64def test_ones_1(): 65 ones = P.Ones() 66 output = ones(2, mstype.int32) 67 assert output.asnumpy().shape == (2,) 68 assert np.sum(output.asnumpy()) == 2 69 70 71def test_zeros(): 72 zeros = P.Zeros() 73 output = zeros((2, 3), mstype.int32) 74 assert output.asnumpy().shape == (2, 3) 75 assert np.sum(output.asnumpy()) == 0 76 77 78def test_zeros_1(): 79 zeros = P.Zeros() 80 output = zeros(2, mstype.int32) 81 assert output.asnumpy().shape == (2,) 82 assert np.sum(output.asnumpy()) == 0 83 84 85@non_graph_engine 86def test_reshape(): 87 input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])) 88 shp = (3, 2) 89 reshape = P.Reshape() 90 output = reshape(input_tensor, shp) 91 assert output.asnumpy().shape == (3, 2) 92 93 94def test_transpose(): 95 input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])) 96 perm = (0, 2, 1) 97 expect = np.array([[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]]) 98 99 transpose = P.Transpose() 100 output = transpose(input_tensor, perm) 101 assert np.all(output.asnumpy() == expect) 102 103 104def test_squeeze(): 105 input_tensor = Tensor(np.ones(shape=[3, 2, 1])) 106 squeeze = P.Squeeze(2) 107 output = squeeze(input_tensor) 108 assert output.asnumpy().shape == (3, 2) 109 110 111def test_invert_permutation(): 112 invert_permutation = P.InvertPermutation() 113 x = (3, 4, 0, 2, 1) 114 output = invert_permutation(x) 115 expect = (2, 4, 3, 0, 1) 116 assert np.all(output == expect) 117 118 119def test_select(): 120 select = P.Select() 121 cond = Tensor(np.array([[True, False, False], [False, True, True]])) 122 x = Tensor(np.array([[1, 2, 3], [4, 5, 6]])) 123 y = Tensor(np.array([[7, 8, 9], [10, 11, 12]])) 124 output = select(cond, x, y) 125 expect = np.array([[1, 8, 9], [10, 5, 6]]) 126 assert np.all(output.asnumpy() == expect) 127 128 129def test_argmin_invalid_output_type(): 130 P.Argmin(-1, mstype.int64) 131 P.Argmin(-1, mstype.int32) 132 with pytest.raises(TypeError): 133 P.Argmin(-1, mstype.float32) 134 with pytest.raises(TypeError): 135 P.Argmin(-1, mstype.float64) 136 with pytest.raises(TypeError): 137 P.Argmin(-1, mstype.uint8) 138 with pytest.raises(TypeError): 139 P.Argmin(-1, mstype.bool_) 140 141 142class CustomOP(PrimitiveWithInfer): 143 __mindspore_signature__ = (sig_dtype.T, sig_dtype.T, sig_dtype.T1, 144 sig_dtype.T1, sig_dtype.T2, sig_dtype.T2, 145 sig_dtype.T2, sig_dtype.T3, sig_dtype.T4) 146 147 @prim_attr_register 148 def __init__(self): 149 pass 150 151 def __call__(self, p1, p2, p3, p4, p5, p6, p7, p8, p9): 152 raise NotImplementedError 153 154 155class CustomOP2(PrimitiveWithInfer): 156 __mindspore_signature__ = ( 157 make_sig('p1', sig_rw.RW_WRITE, dtype=sig_dtype.T), 158 make_sig('p2', dtype=sig_dtype.T), 159 make_sig('p3', dtype=sig_dtype.T), 160 ) 161 162 @prim_attr_register 163 def __init__(self): 164 pass 165 166 def __call__(self, p1, p2, p3): 167 raise NotImplementedError 168 169 170class CustNet1(Cell): 171 def __init__(self): 172 super(CustNet1, self).__init__() 173 self.op = CustomOP() 174 self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32) 175 self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16) 176 self.int1 = 3 177 self.float1 = 5.1 178 179 def construct(self): 180 x = self.op(self.t1, self.t1, self.int1, 181 self.float1, self.int1, self.float1, 182 self.t2, self.t1, self.int1) 183 return x 184 185 186class CustNet2(Cell): 187 def __init__(self): 188 super(CustNet2, self).__init__() 189 self.op = CustomOP2() 190 self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32) 191 self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16) 192 self.int1 = 3 193 194 def construct(self): 195 return self.op(self.t1, self.t2, self.int1) 196 197 198class CustNet3(Cell): 199 def __init__(self): 200 super(CustNet3, self).__init__() 201 self.op = P.ReduceSum() 202 self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32) 203 self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16) 204 self.t2 = 1 205 206 def construct(self): 207 return self.op(self.t1, self.t2) 208 209 210class MathBinaryNet1(Cell): 211 def __init__(self): 212 super(MathBinaryNet1, self).__init__() 213 self.add = P.Add() 214 self.mul = P.Mul() 215 self.max = P.Maximum() 216 self.number = 3 217 218 def construct(self, x): 219 return self.add(x, self.number) + self.mul(x, self.number) + self.max(x, self.number) 220 221 222class MathBinaryNet2(Cell): 223 def __init__(self): 224 super(MathBinaryNet2, self).__init__() 225 self.less_equal = P.LessEqual() 226 self.greater = P.Greater() 227 self.logic_or = P.LogicalOr() 228 self.logic_and = P.LogicalAnd() 229 self.number = 3 230 self.flag = True 231 232 def construct(self, x): 233 ret_less_equal = self.logic_and(self.less_equal(x, self.number), self.flag) 234 ret_greater = self.logic_or(self.greater(x, self.number), self.flag) 235 return self.logic_or(ret_less_equal, ret_greater) 236 237 238class BatchToSpaceNet(Cell): 239 def __init__(self): 240 super(BatchToSpaceNet, self).__init__() 241 block_size = 2 242 crops = [[0, 0], [0, 0]] 243 self.batch_to_space = P.BatchToSpace(block_size, crops) 244 245 def construct(self, x): 246 return self.batch_to_space(x) 247 248 249class SpaceToBatchNet(Cell): 250 def __init__(self): 251 super(SpaceToBatchNet, self).__init__() 252 block_size = 2 253 paddings = [[0, 0], [0, 0]] 254 self.space_to_batch = P.SpaceToBatch(block_size, paddings) 255 256 def construct(self, x): 257 return self.space_to_batch(x) 258 259 260class PackNet(Cell): 261 def __init__(self): 262 super(PackNet, self).__init__() 263 self.stack = P.Stack() 264 265 def construct(self, x): 266 return self.stack((x, x)) 267 268 269class UnpackNet(Cell): 270 def __init__(self): 271 super(UnpackNet, self).__init__() 272 self.unstack = P.Unstack() 273 274 def construct(self, x): 275 return self.unstack(x) 276class SpaceToDepthNet(Cell): 277 def __init__(self): 278 super(SpaceToDepthNet, self).__init__() 279 block_size = 2 280 self.space_to_depth = P.SpaceToDepth(block_size) 281 282 def construct(self, x): 283 return self.space_to_depth(x) 284 285 286class DepthToSpaceNet(Cell): 287 def __init__(self): 288 super(DepthToSpaceNet, self).__init__() 289 block_size = 2 290 self.depth_to_space = P.DepthToSpace(block_size) 291 292 def construct(self, x): 293 return self.depth_to_space(x) 294 295 296class BatchToSpaceNDNet(Cell): 297 def __init__(self): 298 super(BatchToSpaceNDNet, self).__init__() 299 block_shape = [2, 2] 300 crops = [[0, 0], [0, 0]] 301 self.batch_to_space_nd = P.BatchToSpaceND(block_shape, crops) 302 303 def construct(self, x): 304 return self.batch_to_space_nd(x) 305 306 307class SpaceToBatchNDNet(Cell): 308 def __init__(self): 309 super(SpaceToBatchNDNet, self).__init__() 310 block_shape = [2, 2] 311 paddings = [[0, 0], [0, 0]] 312 self.space_to_batch_nd = P.SpaceToBatchND(block_shape, paddings) 313 314 def construct(self, x): 315 return self.space_to_batch_nd(x) 316 317 318class RangeNet(Cell): 319 def __init__(self): 320 super(RangeNet, self).__init__() 321 self.range_ops = inner.Range(1.0, 8.0, 2.0) 322 323 def construct(self, x): 324 return self.range_ops(x) 325 326 327test_case_array_ops = [ 328 ('CustNet1', { 329 'block': CustNet1(), 330 'desc_inputs': []}), 331 ('CustNet2', { 332 'block': CustNet2(), 333 'desc_inputs': []}), 334 ('CustNet3', { 335 'block': CustNet3(), 336 'desc_inputs': []}), 337 ('MathBinaryNet1', { 338 'block': MathBinaryNet1(), 339 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}), 340 ('MathBinaryNet2', { 341 'block': MathBinaryNet2(), 342 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}), 343 ('BatchToSpaceNet', { 344 'block': BatchToSpaceNet(), 345 'desc_inputs': [Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]).astype(np.float16))]}), 346 ('SpaceToBatchNet', { 347 'block': SpaceToBatchNet(), 348 'desc_inputs': [Tensor(np.array([[[[1, 2], [3, 4]]]]).astype(np.float16))]}), 349 ('PackNet', { 350 'block': PackNet(), 351 'desc_inputs': [Tensor(np.array([[[1, 2], [3, 4]]]).astype(np.float16))]}), 352 ('UnpackNet', { 353 'block': UnpackNet(), 354 'desc_inputs': [Tensor(np.array([[1, 2], [3, 4]]).astype(np.float16))]}), 355 ('SpaceToDepthNet', { 356 'block': SpaceToDepthNet(), 357 'desc_inputs': [Tensor(np.random.rand(1, 3, 2, 2).astype(np.float16))]}), 358 ('DepthToSpaceNet', { 359 'block': DepthToSpaceNet(), 360 'desc_inputs': [Tensor(np.random.rand(1, 12, 1, 1).astype(np.float16))]}), 361 ('SpaceToBatchNDNet', { 362 'block': SpaceToBatchNDNet(), 363 'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2).astype(np.float16))]}), 364 ('BatchToSpaceNDNet', { 365 'block': BatchToSpaceNDNet(), 366 'desc_inputs': [Tensor(np.random.rand(4, 1, 1, 1).astype(np.float16))]}), 367 ('RangeNet', { 368 'block': RangeNet(), 369 'desc_inputs': [Tensor(np.array([1, 2, 3, 2]), ms.int32)]}), 370] 371 372test_case_lists = [test_case_array_ops] 373test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) 374# use -k to select certain testcast 375# pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm 376 377 378 379@non_graph_engine 380@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) 381def test_exec(): 382 context.set_context(mode=context.GRAPH_MODE) 383 return test_exec_case 384 385 386raise_set = [ 387 ('Squeeze_1_Error', { 388 'block': (lambda x: P.Squeeze(axis=1.2), {'exception': TypeError}), 389 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}), 390 ('Squeeze_2_Error', { 391 'block': (lambda x: P.Squeeze(axis=((1.2, 1.3))), {'exception': TypeError}), 392 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}), 393 ('ReduceSum_Error', { 394 'block': (lambda x: P.ReduceSum(keep_dims=1), {'exception': TypeError}), 395 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}), 396] 397 398 399@mindspore_test(pipeline_for_verify_exception_for_case_by_case_config) 400def test_check_exception(): 401 return raise_set 402