1# Copyright 2021 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15import numpy as np 16import pytest 17import mindspore.context as context 18import mindspore.nn as nn 19import mindspore.ops.operations.array_ops as P 20from mindspore import Tensor 21from mindspore.common.api import ms_function 22from mindspore.common.initializer import initializer 23from mindspore.common.parameter import Parameter 24 25class BatchToSpaceNet(nn.Cell): 26 def __init__(self, nptype, block_size=2, input_shape=(4, 1, 2, 2)): 27 super(BatchToSpaceNet, self).__init__() 28 self.BatchToSpace = P.BatchToSpace(block_size=block_size, crops=[[0, 0], [0, 0]]) 29 input_size = 1 30 for i in input_shape: 31 input_size = input_size*i 32 data_np = np.arange(input_size).reshape(input_shape).astype(nptype) 33 self.x1 = Parameter(initializer(Tensor(data_np), input_shape), name='x1') 34 35 36 @ms_function 37 def construct(self): 38 y1 = self.BatchToSpace(self.x1) 39 return y1 40 41 42def BatchToSpace(nptype, block_size=2, input_shape=(4, 1, 2, 2)): 43 context.set_context(mode=context.GRAPH_MODE, device_target='GPU') 44 input_size = 1 45 for i in input_shape: 46 input_size = input_size*i 47 expect = np.array([[[[0, 4, 1, 5], 48 [8, 12, 9, 13], 49 [2, 6, 3, 7], 50 [10, 14, 11, 15]]]]).astype(nptype) 51 52 dts = BatchToSpaceNet(nptype, block_size, input_shape) 53 output = dts() 54 55 assert (output.asnumpy() == expect).all() 56 57def BatchToSpace_pynative(nptype, block_size=2, input_shape=(4, 1, 2, 2)): 58 context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU') 59 input_size = 1 60 for i in input_shape: 61 input_size = input_size*i 62 expect = np.array([[[[0, 4, 1, 5], 63 [8, 12, 9, 13], 64 [2, 6, 3, 7], 65 [10, 14, 11, 15]]]]).astype(nptype) 66 67 dts = P.BatchToSpace(block_size=block_size, crops=[[0, 0], [0, 0]]) 68 arr_input = Tensor(np.arange(input_size).reshape(input_shape).astype(nptype)) 69 output = dts(arr_input) 70 71 assert (output.asnumpy() == expect).all() 72 73 74@pytest.mark.level0 75@pytest.mark.platform_x86_gpu_training 76@pytest.mark.env_onecard 77def test_batchtospace_graph_float32(): 78 BatchToSpace(np.float32) 79 80@pytest.mark.level0 81@pytest.mark.platform_x86_gpu_training 82@pytest.mark.env_onecard 83def test_batchtospace_graph_float16(): 84 BatchToSpace(np.float16) 85