1# Copyright 2019 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15 16from functools import reduce 17import numpy as np 18import pytest 19 20import mindspore.context as context 21import mindspore.nn as nn 22import mindspore.ops.operations as P 23from mindspore import Tensor 24 25 26class Net_Pool(nn.Cell): 27 def __init__(self): 28 super(Net_Pool, self).__init__() 29 self.maxpool_fun = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="VALID") 30 31 def construct(self, x): 32 return self.maxpool_fun(x) 33 34 35class Net_Pool2(nn.Cell): 36 def __init__(self): 37 super(Net_Pool2, self).__init__() 38 self.maxpool_fun = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="SAME") 39 40 def construct(self, x): 41 return self.maxpool_fun(x) 42 43 44@pytest.mark.level0 45@pytest.mark.platform_x86_gpu_training 46@pytest.mark.env_onecard 47def test_maxpool2d(): 48 x = Tensor(np.array([[[ 49 [0, 1, 2, 3, -4, -5], 50 [6, 7, 8, 9, -10, -11], 51 [12, 13, 14, -15, -16, -17], 52 [18, 19, 20, 21, 22, 23], 53 [24, 25, 26, 27, 28, 29], 54 [30, 31, 32, 33, 34, 35] 55 ]]]).astype(np.float32)) 56 expect_result = (np.array([[[ 57 [7, 9, -4], 58 [19, 21, 23], 59 [31, 33, 35] 60 ]]])) 61 expect_result2 = (np.array([[[ 62 [14, 14, -4], 63 [26, 28, 29], 64 [32, 34, 35] 65 ]]])) 66 67 context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") 68 maxpool2d = Net_Pool() 69 maxpool2d2 = Net_Pool2() 70 output2 = maxpool2d2(x) 71 output = maxpool2d(x) 72 assert (output.asnumpy() == expect_result).all() 73 assert (output2.asnumpy() == expect_result2).all() 74 75 context.set_context(mode=context.GRAPH_MODE, device_target="GPU") 76 maxpool2d = Net_Pool() 77 maxpool2d2 = Net_Pool2() 78 output2 = maxpool2d2(x) 79 output = maxpool2d(x) 80 assert (output.asnumpy() == expect_result).all() 81 assert (output2.asnumpy() == expect_result2).all() 82 83 84@pytest.mark.level0 85@pytest.mark.platform_x86_gpu_training 86@pytest.mark.env_onecard 87def test_max_pool3d_1(): 88 context.set_context(mode=context.GRAPH_MODE, device_target="GPU") 89 x_shape = (2, 3, 2, 3, 4) 90 kernel_size = (2, 2, 3) 91 strides = 1 92 pad_mode = 'VALID' 93 x_val = np.arange(reduce(lambda x, y: x * y, x_shape)) 94 x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32) 95 output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms) 96 expert_result = (np.array([[[[[18, 19], 97 [22, 23]]], 98 [[[42, 43], 99 [46, 47]]], 100 [[[66, 67], 101 [70, 71]]]], 102 [[[[90, 91], 103 [94, 95]]], 104 [[[114, 115], 105 [118, 119]]], 106 [[[138, 139], 107 [142, 143]]]]])) 108 assert (output_ms.asnumpy() == expert_result).all() 109 110 111@pytest.mark.level0 112@pytest.mark.platform_x86_gpu_training 113@pytest.mark.env_onecard 114def test_max_pool3d_2(): 115 context.set_context(mode=context.GRAPH_MODE, device_target="GPU") 116 x_shape = (2, 3, 2, 3, 4) 117 kernel_size = 2 118 strides = 1 119 pad_mode = 'VALID' 120 x_val = np.arange(reduce(lambda x, y: x * y, x_shape)) 121 x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32) 122 output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms) 123 expert_result = (np.array([[[[[17, 18, 19], 124 [21, 22, 23]]], 125 [[[41, 42, 43], 126 [45, 46, 47]]], 127 [[[65, 66, 67], 128 [69, 70, 71]]]], 129 [[[[89, 90, 91], 130 [93, 94, 95]]], 131 [[[113, 114, 115], 132 [117, 118, 119]]], 133 [[[137, 138, 139], 134 [141, 142, 143]]]]])) 135 assert (output_ms.asnumpy() == expert_result).all() 136 137 138@pytest.mark.level0 139@pytest.mark.platform_x86_gpu_training 140@pytest.mark.env_onecard 141def test_max_pool3d_3(): 142 context.set_context(mode=context.GRAPH_MODE, device_target="GPU") 143 x_shape = (2, 3, 2, 3, 4) 144 kernel_size = 2 145 strides = 3 146 pad_mode = 'VALID' 147 x_val = np.arange(reduce(lambda x, y: x * y, x_shape)) 148 x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32) 149 output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms) 150 expert_result = (np.array([[[[[17]]], 151 [[[41]]], 152 [[[65]]]], 153 [[[[89]]], 154 [[[113]]], 155 [[[137]]]]])) 156 assert (output_ms.asnumpy() == expert_result).all() 157 158 159@pytest.mark.level0 160@pytest.mark.platform_x86_gpu_training 161@pytest.mark.env_onecard 162def test_max_pool3d_4(): 163 context.set_context(mode=context.GRAPH_MODE, device_target="GPU") 164 x_shape = (2, 3, 2, 3, 4) 165 kernel_size = (2, 2, 3) 166 strides = 1 167 pad_mode = 'SAME' 168 x_val = np.arange(reduce(lambda x, y: x * y, x_shape)) 169 x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32) 170 output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms) 171 expert_result = (np.array([[[[[17, 18, 19, 19], 172 [21, 22, 23, 23], 173 [21, 22, 23, 23]], 174 [[17, 18, 19, 19], 175 [21, 22, 23, 23], 176 [21, 22, 23, 23]]], 177 [[[41, 42, 43, 43], 178 [45, 46, 47, 47], 179 [45, 46, 47, 47]], 180 [[41, 42, 43, 43], 181 [45, 46, 47, 47], 182 [45, 46, 47, 47]]], 183 [[[65, 66, 67, 67], 184 [69, 70, 71, 71], 185 [69, 70, 71, 71]], 186 [[65, 66, 67, 67], 187 [69, 70, 71, 71], 188 [69, 70, 71, 71]]]], 189 [[[[89, 90, 91, 91], 190 [93, 94, 95, 95], 191 [93, 94, 95, 95]], 192 [[89, 90, 91, 91], 193 [93, 94, 95, 95], 194 [93, 94, 95, 95]]], 195 [[[113, 114, 115, 115], 196 [117, 118, 119, 119], 197 [117, 118, 119, 119]], 198 [[113, 114, 115, 115], 199 [117, 118, 119, 119], 200 [117, 118, 119, 119]]], 201 [[[137, 138, 139, 139], 202 [141, 142, 143, 143], 203 [141, 142, 143, 143]], 204 [[137, 138, 139, 139], 205 [141, 142, 143, 143], 206 [141, 142, 143, 143]]]]])) 207 assert (output_ms.asnumpy() == expert_result).all() 208 209 210@pytest.mark.level0 211@pytest.mark.platform_x86_gpu_training 212@pytest.mark.env_onecard 213def test_max_pool3d_5(): 214 context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") 215 x_shape = (2, 3, 2, 3, 4) 216 kernel_size = (2, 2, 3) 217 strides = 1 218 pad_mode = 'SAME' 219 x_val = np.arange(reduce(lambda x, y: x * y, x_shape)) 220 x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32) 221 output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms) 222 expert_result = (np.array([[[[[17, 18, 19, 19], 223 [21, 22, 23, 23], 224 [21, 22, 23, 23]], 225 [[17, 18, 19, 19], 226 [21, 22, 23, 23], 227 [21, 22, 23, 23]]], 228 [[[41, 42, 43, 43], 229 [45, 46, 47, 47], 230 [45, 46, 47, 47]], 231 [[41, 42, 43, 43], 232 [45, 46, 47, 47], 233 [45, 46, 47, 47]]], 234 [[[65, 66, 67, 67], 235 [69, 70, 71, 71], 236 [69, 70, 71, 71]], 237 [[65, 66, 67, 67], 238 [69, 70, 71, 71], 239 [69, 70, 71, 71]]]], 240 [[[[89, 90, 91, 91], 241 [93, 94, 95, 95], 242 [93, 94, 95, 95]], 243 [[89, 90, 91, 91], 244 [93, 94, 95, 95], 245 [93, 94, 95, 95]]], 246 [[[113, 114, 115, 115], 247 [117, 118, 119, 119], 248 [117, 118, 119, 119]], 249 [[113, 114, 115, 115], 250 [117, 118, 119, 119], 251 [117, 118, 119, 119]]], 252 [[[137, 138, 139, 139], 253 [141, 142, 143, 143], 254 [141, 142, 143, 143]], 255 [[137, 138, 139, 139], 256 [141, 142, 143, 143], 257 [141, 142, 143, 143]]]]])) 258 assert (output_ms.asnumpy() == expert_result).all() 259