1# Copyright 2023 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15 16import numpy as np 17import pytest 18 19import mindspore as ms 20import mindspore.nn as nn 21from mindspore import Tensor, ops 22 23from tests.st.utils import test_utils 24 25 26class Net(nn.Cell): 27 def __init__(self): 28 super(Net, self).__init__() 29 self.conv3d = nn.Conv3d(in_channels=3, out_channels=32, kernel_size=(4, 3, 3), dtype=ms.float16) 30 31 def construct(self, x): 32 out = self.conv3d(x) 33 return out 34 35 36@pytest.mark.level1 37@pytest.mark.platform_x86_cpu 38@pytest.mark.platform_arm_cpu 39@pytest.mark.platform_arm_ascend_training 40@pytest.mark.platform_x86_ascend_training 41@pytest.mark.env_onecard 42@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE]) 43def test_conv3d_para_customed_dtype(mode): 44 """ 45 Feature: Conv3d 46 Description: Verify the result of Conv3d specifying customed para dtype. 47 Expectation: success 48 """ 49 ms.set_context(mode=mode) 50 net = Net() 51 x = Tensor(np.ones([16, 3, 10, 32, 32]), ms.float16) 52 output = net(x) 53 expect_output_shape = (16, 32, 10, 32, 32) 54 assert np.allclose(expect_output_shape, output.shape) 55 56 57@pytest.mark.level0 58@pytest.mark.platform_arm_ascend_training 59@pytest.mark.platform_x86_ascend_training 60@pytest.mark.platform_arm_ascend910b_training 61@pytest.mark.env_onecard 62@test_utils.run_test_with_On 63def test_conv3d_input_5d(): 64 """ 65 Feature: Conv3d 5d input 66 Description: Verify the result of Conv3d 5d input. 67 Expectation: success 68 """ 69 ms.set_context(mode=ms.GRAPH_MODE, ascend_config={"precision_mode": "force_fp16"}) 70 class Network(nn.Cell): 71 def __init__(self): 72 super().__init__() 73 self.relu = ops.ReLU() 74 self.conv1 = nn.Conv3d(1, 1, kernel_size=5, pad_mode="same", padding=0, has_bias=False, weight_init="One") 75 self.reducemin = ops.ReduceMin(keep_dims=True) 76 self.reducesum = ops.ReduceSum(keep_dims=True) 77 self.add = ops.Add() 78 self.square = ops.Square() 79 self.abs = ops.Abs() 80 self.concat = ops.Concat() 81 self.batchnorm = nn.BatchNorm3d(5) 82 83 def construct(self, data1, data2): 84 batchnorm3d_01 = self.batchnorm(data1) 85 batchnorm3d_02 = self.batchnorm(data1) 86 reducesum_01 = self.reducesum(batchnorm3d_02, 1) 87 add_01 = self.add(reducesum_01, data2) 88 reducemin_01 = self.reducemin(add_01, 1) 89 relu_01 = self.relu(batchnorm3d_01) 90 abs_01 = self.abs(relu_01) 91 square_01 = self.square(abs_01) 92 reducemin_02 = self.reducemin(square_01, 1) 93 concat_01 = self.concat((reducemin_02, reducemin_01)) 94 conv_01 = self.conv1(concat_01) 95 relu_03 = self.relu(conv_01) 96 output = relu_03 97 return output 98 99 data1 = Tensor(np.ones([1, 5, 5, 5, 4]).astype(np.float32)) 100 data2 = Tensor(np.ones([1, 5, 5, 4]).astype(np.float32)) 101 102 ms.set_context(device_target="CPU") 103 cpu_mode = Network() 104 cpu_out = cpu_mode(data1, data2).asnumpy() 105 106 ms.set_context(device_target="Ascend") 107 npu_mode = Network() 108 npu_out = npu_mode(data1, data2).asnumpy() 109 110 assert np.allclose(cpu_out, npu_out, 0.001, 0.001) 111