1# Copyright 2020 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15 16import numpy as np 17import pytest 18 19import mindspore.context as context 20from mindspore.common.tensor import Tensor 21from mindspore.nn import BatchNorm2d 22from mindspore.nn import Cell 23from mindspore.ops import composite as C 24 25 26class Batchnorm_Net(Cell): 27 def __init__(self, c, weight, bias, moving_mean, moving_var_init): 28 super(Batchnorm_Net, self).__init__() 29 self.bn = BatchNorm2d(c, eps=0.00001, momentum=0.1, beta_init=bias, gamma_init=weight, 30 moving_mean_init=moving_mean, moving_var_init=moving_var_init) 31 32 def construct(self, input_data): 33 x = self.bn(input_data) 34 return x 35 36 37class Grad(Cell): 38 def __init__(self, network): 39 super(Grad, self).__init__() 40 self.grad = C.GradOperation(get_all=True, sens_param=True) 41 self.network = network 42 43 def construct(self, input_data, sens): 44 gout = self.grad(self.network)(input_data, sens) 45 return gout 46 47 48@pytest.mark.level0 49@pytest.mark.platform_x86_cpu 50@pytest.mark.env_onecard 51def test_train_forward(): 52 x = np.array([[ 53 [[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]], 54 [[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32) 55 expect_output = np.array([[[[-0.6059, 0.3118, 0.3118, 1.2294], 56 [-0.1471, 0.7706, 1.6882, 2.6059], 57 [0.3118, 1.6882, 2.1471, 2.1471], 58 [0.7706, 0.3118, 2.6059, -0.1471]], 59 60 [[0.9119, 1.8518, 1.3819, -0.0281], 61 [-0.0281, 0.9119, 1.3819, 1.8518], 62 [2.7918, 0.4419, -0.4981, 0.9119], 63 [1.8518, 0.9119, 2.3218, -0.9680]]]]).astype(np.float32) 64 65 weight = np.ones(2).astype(np.float32) 66 bias = np.ones(2).astype(np.float32) 67 moving_mean = np.ones(2).astype(np.float32) 68 moving_var_init = np.ones(2).astype(np.float32) 69 error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-4 70 71 context.set_context(mode=context.GRAPH_MODE, device_target="CPU") 72 bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias), Tensor(moving_mean), Tensor(moving_var_init)) 73 bn_net.set_train() 74 output = bn_net(Tensor(x)) 75 diff = output.asnumpy() - expect_output 76 assert np.all(diff < error) 77 assert np.all(-diff < error) 78 79 context.set_context(mode=context.GRAPH_MODE, device_target="CPU") 80 bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias), Tensor(moving_mean), Tensor(moving_var_init)) 81 bn_net.set_train(False) 82 output = bn_net(Tensor(x)) 83 84 85@pytest.mark.level0 86@pytest.mark.platform_x86_cpu 87@pytest.mark.env_onecard 88def test_train_backward(): 89 x = np.array([[ 90 [[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]], 91 [[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32) 92 grad = np.array([[ 93 [[1, 2, 7, 1], [4, 2, 1, 3], [1, 6, 5, 2], [2, 4, 3, 2]], 94 [[9, 4, 3, 5], [1, 3, 7, 6], [5, 7, 9, 9], [1, 4, 6, 8]]]]).astype(np.float32) 95 expect_output = np.array([[[[-0.69126546, -0.32903028, 1.9651246, -0.88445705], 96 [0.6369296, -0.37732816, -0.93275493, -0.11168876], 97 [-0.7878612, 1.3614, 0.8542711, -0.52222186], 98 [-0.37732816, 0.5886317, -0.11168876, -0.28073236]], 99 100 [[1.6447213, -0.38968924, -1.0174079, -0.55067265], 101 [-2.4305856, -1.1751484, 0.86250514, 0.5502673], 102 [0.39576983, 0.5470243, 1.1715001, 1.6447213], 103 [-1.7996241, -0.7051701, 0.7080077, 0.5437813]]]]).astype(np.float32) 104 105 weight = Tensor(np.ones(2).astype(np.float32)) 106 bias = Tensor(np.ones(2).astype(np.float32)) 107 moving_mean = Tensor(np.ones(2).astype(np.float32)) 108 moving_var_init = Tensor(np.ones(2).astype(np.float32)) 109 error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-6 110 111 context.set_context(mode=context.GRAPH_MODE, device_target="CPU") 112 bn_net = Batchnorm_Net(2, weight, bias, moving_mean, moving_var_init) 113 bn_net.set_train() 114 bn_grad = Grad(bn_net) 115 output = bn_grad(Tensor(x), Tensor(grad)) 116 diff = output[0].asnumpy() - expect_output 117 assert np.all(diff < error) 118 assert np.all(-diff < error) 119