1# Copyright 2020 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15""" test_grad """ 16import numpy as np 17 18import mindspore as ms 19import mindspore.ops.operations as P 20from mindspore import Tensor, context 21from mindspore.common.api import ms_function 22from mindspore.ops import composite as C 23from ...ut_filter import non_graph_engine 24 25 26# pylint: disable=unused-argument 27def setup_module(module): 28 context.set_context(mode=context.PYNATIVE_MODE) 29 30 31grad = C.GradOperation() 32grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) 33 34 35def mul(x, y): 36 return x * y 37 38 39@ms_function 40def mainf(x, y): 41 return grad(mul)(x, y) 42 43 44@non_graph_engine 45def test_grad(): 46 mainf(1, 2) 47 48 49@non_graph_engine 50def Xtest_expand_dims_grad(): 51 """ test_expand_dims_grad """ 52 input_tensor = Tensor(np.array([[2, 2], [2, 2]])) 53 expand_dims = P.ExpandDims() 54 55 def fn(x): 56 output = expand_dims(x, 0) 57 return output 58 59 out = fn(input_tensor) 60 gfn = grad_all_with_sens(fn) 61 sens = Tensor(np.ones_like(out.asnumpy())) 62 args = [input_tensor, sens] 63 gout = gfn(*args) 64 expect = np.ones([2, 2]) 65 assert np.all(gout[0].asnumpy() == expect) 66 67 68def test_cast_grad(): 69 """ test_cast_grad """ 70 input_np = np.random.randn(2, 3).astype(np.float32) 71 input_x = Tensor(input_np) 72 73 td = ms.int32 74 cast = P.Cast() 75 76 def fn(x): 77 output = cast(x, td) 78 return output 79 80 out = fn(input_x) 81 gfn = grad_all_with_sens(fn) 82 sens = Tensor(np.ones_like(out.asnumpy())) 83 args = [input_x, sens] 84 gout = gfn(*args) 85 expect = np.ones((2, 3), dtype=np.float32) 86 assert np.all(gout[0].asnumpy() == expect) 87 88 89@non_graph_engine 90def test_reshape_grad(): 91 """ test_reshape_grad """ 92 input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])) 93 shp = (3, 2) 94 reshape = P.Reshape() 95 96 def fn(x): 97 output = reshape(x, shp) 98 return output 99 100 out = fn(input_tensor) 101 gfn = grad_all_with_sens(fn) 102 sens = Tensor(np.ones_like(out.asnumpy())) 103 args = [input_tensor, sens] 104 gout = gfn(*args) 105 expect = np.ones([2, 3]) 106 assert np.all(gout[0].asnumpy() == expect) 107 108 109def test_transpose_grad(): 110 """ test_transpose_grad """ 111 input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])) 112 perm = (0, 2, 1) 113 transpose = P.Transpose() 114 115 def fn(x): 116 output = transpose(x, perm) 117 return output 118 119 out = fn(input_tensor) 120 gfn = grad_all_with_sens(fn) 121 sens = Tensor(np.ones_like(out.asnumpy())) 122 args = [input_tensor, sens] 123 gout = gfn(*args) 124 expect = np.ones([2, 2, 3]) 125 assert np.all(gout[0].asnumpy() == expect) 126 127 128def test_select_grad(): 129 """ test_select_grad """ 130 select = P.Select() 131 cond = Tensor(np.array([[True, False, False], [False, True, True]])) 132 x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)) 133 y = Tensor(np.array([[7, 8, 9], [10, 11, 12]]).astype(np.float32)) 134 135 def fn(cond, x, y): 136 output = select(cond, x, y) 137 return output 138 139 out = fn(cond, x, y) 140 gfn = grad_all_with_sens(fn) 141 sens = Tensor(np.ones_like(out.asnumpy()).astype(np.float32)) 142 args = [cond, x, y, sens] 143 gout = gfn(*args) 144 expect_cond = np.zeros_like(cond.asnumpy()) 145 expect_x = np.array([[1, 0, 0], [0, 1, 1]]) 146 expect_y = np.array([[0, 1, 1], [1, 0, 0]]) 147 assert np.all(gout[0].asnumpy() == expect_cond) 148 assert np.all(gout[1].asnumpy() == expect_x) 149 assert np.all(gout[2].asnumpy() == expect_y) 150 151 152@non_graph_engine 153def test_squeeze_grad(): 154 """ test_squeeze_grad """ 155 input_tensor = Tensor(np.ones(shape=[3, 2, 1])) 156 squeeze = P.Squeeze(2) 157 158 def fn(x): 159 output = squeeze(x) 160 return output 161 162 out = fn(input_tensor) 163 gfn = grad_all_with_sens(fn) 164 sens = Tensor(np.ones_like(out.asnumpy())) 165 args = [input_tensor, sens] 166 gout = gfn(*args) 167 expect = np.ones([3, 2, 1]) 168 assert np.all(gout[0].asnumpy() == expect) 169 170 171def test_SubGrad(): 172 """ test_SubGrad """ 173 input_x = Tensor(np.array([[2, 2]])) 174 input_y = Tensor(np.array([[2, 2], [2, 2]])) 175 sub = P.Sub() 176 177 def fn(x, y): 178 output = sub(x, y) 179 return output 180 181 out = fn(input_x, input_y) 182 gfn = grad_all_with_sens(fn) 183 sens = Tensor(np.ones_like(out.asnumpy())) 184 args = [input_x, input_y, sens] 185 gout = gfn(*args) 186 expect_dx = np.ones([1, 2]).astype(np.int32) * 2 # reduce sum dout to the shape of x 187 expect_dy = np.ones([2, 2]).astype(np.int32) * (-1) 188 assert np.array_equal(gout[0].asnumpy(), expect_dx) 189 assert np.array_equal(gout[1].asnumpy(), expect_dy) 190 191 192def test_MulGrad(): 193 """ test_MulGrad """ 194 input_x = Tensor(np.array([[2, 2], [2, 2]], np.float32)) 195 input_y = Tensor(np.array([[3, 3], [3, 3]], np.float32)) 196 mymul = P.Mul() 197 198 def fn(x, y): 199 output = mymul(x, y) 200 return output 201 202 out = fn(input_x, input_y) 203 gfn = grad_all_with_sens(fn) 204 sens = Tensor(np.ones_like(out.asnumpy()) * 3) 205 args = [input_x, input_y, sens] 206 gout = gfn(*args) 207 expect_dx = np.ones([2, 2], np.float32) * 9 208 expect_dy = np.ones([2, 2], np.float32) * 6 209 assert np.all(gout[0].asnumpy().shape == expect_dx.shape) 210 assert np.all(gout[0].asnumpy() == expect_dx) 211 assert np.all(gout[1].asnumpy().shape == expect_dy.shape) 212 assert np.all(gout[1].asnumpy() == expect_dy) 213