1# Copyright 2021 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15import numpy as np 16import pytest 17import mindspore.context as context 18import mindspore.nn as nn 19import mindspore.ops.operations.array_ops as P 20from mindspore import Tensor 21from mindspore.common.api import ms_function 22from mindspore.common.initializer import initializer 23from mindspore.common.parameter import Parameter 24 25class SpaceToBatchNet(nn.Cell): 26 def __init__(self, nptype, block_size=2, input_shape=(1, 1, 4, 4)): 27 super(SpaceToBatchNet, self).__init__() 28 self.SpaceToBatch = P.SpaceToBatch(block_size=block_size, paddings=[[0, 0], [0, 0]]) 29 input_size = 1 30 for i in input_shape: 31 input_size = input_size*i 32 data_np = np.arange(input_size).reshape(input_shape).astype(nptype) 33 self.x1 = Parameter(initializer(Tensor(data_np), input_shape), name='x1') 34 35 36 @ms_function 37 def construct(self): 38 y1 = self.SpaceToBatch(self.x1) 39 return y1 40 41 42def SpaceToBatch(nptype, block_size=2, input_shape=(1, 1, 4, 4)): 43 context.set_context(mode=context.GRAPH_MODE, device_target='GPU') 44 input_size = 1 45 for i in input_shape: 46 input_size = input_size*i 47 expect = np.array([[[[0, 2], 48 [8, 10]]], 49 [[[1, 3], 50 [9, 11]]], 51 [[[4, 6], 52 [12, 14]]], 53 [[[5, 7], 54 [13, 15]]]]).astype(nptype) 55 56 dts = SpaceToBatchNet(nptype, block_size, input_shape) 57 output = dts() 58 59 assert (output.asnumpy() == expect).all() 60 61def SpaceToBatch_pynative(nptype, block_size=2, input_shape=(1, 1, 4, 4)): 62 context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU') 63 input_size = 1 64 for i in input_shape: 65 input_size = input_size*i 66 expect = np.array([[[[0, 2], 67 [8, 10]]], 68 [[[1, 3], 69 [9, 11]]], 70 [[[4, 6], 71 [12, 14]]], 72 [[[5, 7], 73 [13, 15]]]]).astype(nptype) 74 75 dts = P.SpaceToBatch(block_size=block_size, paddings=[[0, 0], [0, 0]]) 76 arr_input = Tensor(np.arange(input_size).reshape(input_shape).astype(nptype)) 77 output = dts(arr_input) 78 79 assert (output.asnumpy() == expect).all() 80 81 82@pytest.mark.level0 83@pytest.mark.platform_x86_gpu_training 84@pytest.mark.env_onecard 85def test_spacetobatch_graph_float32(): 86 SpaceToBatch(np.float32) 87 88@pytest.mark.level0 89@pytest.mark.platform_x86_gpu_training 90@pytest.mark.env_onecard 91def test_spacetobatch_graph_float16(): 92 SpaceToBatch(np.float16) 93