1# Copyright 2020 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15""" test implicit conversion """ 16import numpy as np 17import pytest 18import mindspore as ms 19 20from mindspore import Tensor, nn, Parameter 21from mindspore.ops import composite as C 22from mindspore.ops import functional as F 23 24 25grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) 26 27 28def test_float_tensor_and_int_add(): 29 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 30 y = 2 31 ret_actual = x + y 32 ret_expect = Tensor(np.array([[2.1, 2.2, 2.3], [2.4, 2.5, 2.6]], dtype=np.float32)) 33 assert ret_actual.dtype == ret_expect.dtype 34 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 35 36 37def test_bool_tensor_and_float_add(): 38 x = Tensor(np.array([[True, False], [False, True]], dtype=np.bool_)) 39 y = 3.3 40 ret_actual = x + y 41 ret_expect = Tensor(np.array([[4.3, 3.3], [3.3, 4.3]], dtype=np.float32)) 42 assert ret_actual.dtype == ret_expect.dtype 43 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 44 45 46def test_bool_tensor_and_int_add(): 47 x = Tensor(np.array([[True, False], [False, True]], dtype=np.bool_)) 48 y = 3 49 ret_actual = x + y 50 ret_expect = Tensor(np.array([[4, 3], [3, 4]], dtype=np.int64)) 51 assert ret_actual.dtype == ret_expect.dtype 52 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 53 54 55def test_bool_and_int_tensor_add(): 56 x = True 57 y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)) 58 ret_actual = x + y 59 ret_expect = Tensor(np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)) 60 assert ret_actual.dtype == ret_expect.dtype 61 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 62 63 64def test_float_tensor_and_int_tensor_add(): 65 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 66 y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)) 67 ret_actual = x + y 68 ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]], dtype=np.float32)) 69 assert ret_actual.dtype == ret_expect.dtype 70 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 71 72 73def test_float_tensor_and_float_tensor_add(): 74 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 75 y = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float16)) 76 ret_actual = x + y 77 ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]], dtype=np.float32)) 78 assert ret_actual.dtype == ret_expect.dtype 79 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 80 81 82def test_int_tensor_and_int_tensor_add(): 83 x = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8)) 84 y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)) 85 ret_actual = x + y 86 ret_expect = Tensor(np.array([[2, 4, 6], [8, 10, 12]], dtype=np.int32)) 87 assert ret_actual.dtype == ret_expect.dtype 88 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 89 90 91def test_float_tensor_and_bool_tensors_add(): 92 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 93 y = Tensor(np.array([[True, True, True], [False, False, False]], dtype=np.bool_)) 94 ret_actual = x + y 95 ret_expect = Tensor(np.array([[1.1, 1.2, 1.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 96 assert ret_actual.dtype == ret_expect.dtype 97 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 98 99 100def test_int8_tensor_and_uint8_tensors_add(): 101 x = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8)) 102 y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8)) 103 ret_actual = x + y 104 ret_expect = Tensor(np.array([[2, 4, 6], [8, 10, 12]], dtype=np.int16)) 105 assert ret_actual.dtype == ret_expect.dtype 106 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 107 108 109def test_float_tensor_and_str_add(): 110 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 111 y = "ok" 112 with pytest.raises(TypeError) as er: 113 ret = x + y 114 assert "For 'Add', the 1th input is a not support implicit conversion type: str" in str(er.value) 115 116 117def test_float_tensor_and_tuple_add(): 118 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 119 y = (1, 2, 3) 120 ret_actual = x + y 121 ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [1.4, 2.5, 3.6]], dtype=np.float32)) 122 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 123 124 125def test_float_tensor_and_list_add(): 126 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 127 y = [1, 2, 3] 128 ret_actual = x + y 129 ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [1.4, 2.5, 3.6]], dtype=np.float32)) 130 assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() 131 132 133def test_float_tensor_and_bool_tensors_add_grad(): 134 class Net(nn.Cell): 135 def __init__(self): 136 super(Net, self).__init__() 137 138 def construct(self, x, y): 139 return x + y 140 141 class GradNet(nn.Cell): 142 def __init__(self, net): 143 super(GradNet, self).__init__() 144 self.net = net 145 146 def construct(self, x, y, sens): 147 return grad_all_with_sens(self.net)(x, y, sens) 148 149 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 150 y = Tensor(np.array([[True, True, True], [False, False, False]], dtype=np.bool_)) 151 sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32)) 152 net = Net() 153 grad_net = GradNet(net) 154 ret = grad_net(x, y, sens) 155 assert ret[0].dtype == x.dtype 156 assert ret[1].dtype == y.dtype 157 assert (ret[0].asnumpy() == sens.asnumpy()).all() 158 assert (ret[1].asnumpy() == sens.asnumpy().astype(np.bool_)).all() 159 160 161def test_float_tensor_and_int_tensors_sub_grad(): 162 class Net(nn.Cell): 163 def __init__(self): 164 super(Net, self).__init__() 165 166 def construct(self, x, y): 167 return x - y 168 169 class GradNet(nn.Cell): 170 def __init__(self, net): 171 super(GradNet, self).__init__() 172 self.net = net 173 174 def construct(self, x, y, sens): 175 return grad_all_with_sens(self.net)(x, y, sens) 176 177 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 178 y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)) 179 sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32)) 180 net = Net() 181 grad_net = GradNet(net) 182 ret = grad_net(x, y, sens) 183 assert ret[0].dtype == x.dtype 184 assert ret[1].dtype == y.dtype 185 assert (ret[0].asnumpy() == sens.asnumpy()).all() 186 assert (ret[1].asnumpy() == sens.asnumpy() * -1).all() 187 188 189def test_float16_tensor_and_float32_tensors_sub_grad(): 190 class Net(nn.Cell): 191 def __init__(self): 192 super(Net, self).__init__() 193 194 def construct(self, x, y): 195 return x - y 196 197 class GradNet(nn.Cell): 198 def __init__(self, net): 199 super(GradNet, self).__init__() 200 self.net = net 201 202 def construct(self, x, y, sens): 203 return grad_all_with_sens(self.net)(x, y, sens) 204 205 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.int32)) 206 y = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32)) 207 sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32)) 208 net = Net() 209 grad_net = GradNet(net) 210 ret = grad_net(x, y, sens) 211 assert ret[0].dtype == x.dtype 212 assert ret[1].dtype == y.dtype 213 assert (ret[0].asnumpy() == sens.asnumpy()).all() 214 assert (ret[1].asnumpy() == sens.asnumpy() * -1).all() 215 216 217def test_float_tensor_and_int_add_grad(): 218 class Net(nn.Cell): 219 def __init__(self): 220 super(Net, self).__init__() 221 222 def construct(self, x): 223 return x + 2 224 225 class GradNet(nn.Cell): 226 def __init__(self, net): 227 super(GradNet, self).__init__() 228 self.net = net 229 230 def construct(self, x, sens): 231 return grad_all_with_sens(self.net)(x, sens) 232 233 x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) 234 sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32)) 235 net = Net() 236 grad_net = GradNet(net) 237 ret = grad_net(x, sens) 238 assert ret[0].dtype == x.dtype 239 assert (ret[0].asnumpy() == sens.asnumpy()).all() 240 241 242def test_int8_tensor_and_uint8_tensors_add_grad(): 243 class Net(nn.Cell): 244 def __init__(self): 245 super(Net, self).__init__() 246 247 def construct(self, x, y): 248 return x + y 249 250 class GradNet(nn.Cell): 251 def __init__(self, net): 252 super(GradNet, self).__init__() 253 self.net = net 254 255 def construct(self, x, y, sens): 256 return grad_all_with_sens(self.net)(x, y, sens) 257 258 x = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8)) 259 y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8)) 260 sens = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16)) 261 net = Net() 262 grad_net = GradNet(net) 263 ret = grad_net(x, y, sens) 264 assert ret[0].dtype == x.dtype 265 assert ret[1].dtype == y.dtype 266 assert (ret[0].asnumpy() == sens.asnumpy()).all() 267 assert (ret[1].asnumpy() == sens.asnumpy()).all() 268 269class AssignCheck(nn.Cell): 270 """ NetWithNDarray definition """ 271 272 def __init__(self): 273 super(AssignCheck, self).__init__() 274 self.cov_step = Parameter(0.0, name="cov_step", requires_grad=False) 275 276 def construct(self, x, y): 277 F.assign(self.cov_step, y) 278 F.assign(x, y) 279 return x 280 281 282def test_assign_check_in_sig(): 283 net = AssignCheck() 284 x = Tensor(2, ms.int8) 285 y = Tensor(3, ms.uint8) 286 with pytest.raises(RuntimeError) as e: 287 net(x, y) 288 assert "can not cast automatically" in e.value.args[0] 289