• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15""" test math ops """
16import functools
17
18import numpy as np
19
20import mindspore as ms
21import mindspore.context as context
22import mindspore.nn as nn
23from mindspore import Tensor
24from mindspore.common import dtype as mstype
25from mindspore.ops import composite as C
26from mindspore.ops import operations as P
27from mindspore.ops import functional as F
28from mindspore.ops import prim_attr_register, PrimitiveWithInfer
29from ..ut_filter import non_graph_engine
30from ....mindspore_test_framework.mindspore_test import mindspore_test
31from ....mindspore_test_framework.pipeline.forward.compile_forward \
32    import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
33from ....mindspore_test_framework.pipeline.forward.verify_exception \
34    import pipeline_for_verify_exception_for_case_by_case_config
35
36context.set_context(mode=context.GRAPH_MODE)
37
38# pylint: disable=W0613
39# pylint: disable=W0231
40# W0613: unused-argument
41# W0231: super-init-not-called
42
43grad = C.GradOperation()
44
45
46def test_multiply():
47    """ test_multiply """
48    input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]))
49    input_y = Tensor(np.array([[0.1, 0.3, -3.6], [0.4, 0.5, -3.2]]))
50
51    mul = P.Mul()
52    result = mul(input_x, input_y)
53    expect = np.array([[-0.01, 0.09, -12.96], [0.16, 0.25, 10.24]])
54    diff = result.asnumpy() - expect
55    error = np.ones(shape=[2, 3]) * 1.0e-6
56    assert np.all(diff < error)
57    assert np.all(-diff < error)
58
59
60def test_sub():
61    """ test_sub """
62    input_x = Tensor(np.ones(shape=[3]))
63    input_y = Tensor(np.zeros(shape=[3]))
64
65    sub = P.Sub()
66    result = sub(input_x, input_y)
67    expect = np.ones(shape=[3])
68    assert np.all(result.asnumpy() == expect)
69
70
71def test_square():
72    """ test_square """
73    input_tensor = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
74    square = P.Square()
75    result = square(input_tensor)
76    expect = np.array([[1, 4, 9], [16, 25, 36]])
77    assert np.all(result.asnumpy() == expect)
78
79
80def test_sqrt():
81    """ test_sqrt """
82    input_tensor = Tensor(np.array([[4, 4], [9, 9]]))
83
84    sqrt = P.Sqrt()
85    expect = np.array([[2, 2], [3, 3]])
86    result = sqrt(input_tensor)
87    assert np.all(result.asnumpy() == expect)
88
89
90class PowNet(nn.Cell):
91    def __init__(self):
92        super(PowNet, self).__init__()
93        self.pow = P.Pow()
94
95    def construct(self, x, y):
96        return self.pow(x, y)
97
98
99def test_pow():
100    """ test_pow """
101    input_tensor = Tensor(np.array([[2, 2], [3, 3]]))
102    power = Tensor(np.array(3.0, np.int64))
103    power2 = Tensor(np.array(True, np.bool))
104    testpow = P.Pow()
105    expect = np.array([[8, 8], [27, 27]])
106    result = testpow(input_tensor, power)
107    assert np.all(result.asnumpy() == expect)
108    net = PowNet()
109    net(input_tensor, power2)
110
111
112def test_exp():
113    """ test_exp """
114    input_tensor = Tensor(np.array([[2, 2], [3, 3]]))
115    testexp = P.Exp()
116    result = testexp(input_tensor)
117    expect = np.exp(np.array([[2, 2], [3, 3]]))
118    assert np.all(result.asnumpy() == expect)
119
120
121def test_realdiv():
122    """ test_realdiv """
123    x = Tensor(2048.0)
124    y = Tensor(128.0)
125    div = P.RealDiv()
126    result = div(x, y)
127    x = x.asnumpy()
128    y = y.asnumpy()
129    expect = x / y
130    assert np.all(result.asnumpy() == expect)
131
132
133def test_eye():
134    """ test_eye """
135    x = np.arange(3)
136    expect = np.ones_like(x)
137    expect = np.diag(expect)
138    eye = P.Eye()
139    eye_output = eye(3, 3, ms.float32)
140    assert np.all(eye_output.asnumpy() == expect)
141
142
143class VirtualLossGrad(PrimitiveWithInfer):
144    """ VirtualLossGrad definition """
145
146    @prim_attr_register
147    def __init__(self):
148        """init VirtualLossGrad"""
149
150    def __call__(self, x, out, dout):
151        raise NotImplementedError
152
153    def infer_shape(self, x_shape, out_shape, dout_shape):
154        return x_shape
155
156    def infer_dtype(self, x_dtype, out_dtype, dout_dtype):
157        return x_dtype
158
159
160class VirtualLoss(PrimitiveWithInfer):
161    """ VirtualLoss definition """
162
163    @prim_attr_register
164    def __init__(self):
165        """init VirtualLoss"""
166
167    def __call__(self, x):
168        raise NotImplementedError
169
170    def get_bprop(self):
171        loss_grad = VirtualLossGrad()
172
173        def bprop(x, out, dout):
174            dx = loss_grad(x, out, dout)
175            return (dx,)
176
177        return bprop
178
179    def infer_shape(self, x_shape):
180        return [1]
181
182    def infer_dtype(self, x_dtype):
183        return x_dtype
184
185
186class NetWithLoss(nn.Cell):
187    """ NetWithLoss definition """
188
189    def __init__(self, network):
190        super(NetWithLoss, self).__init__()
191        self.loss = VirtualLoss()
192        self.network = network
193
194    def construct(self, x, y, b):
195        predict = self.network(x, y, b)
196        return self.loss(predict)
197
198
199class GradWrap(nn.Cell):
200    """ GradWrap definition """
201
202    def __init__(self, network):
203        super(GradWrap, self).__init__()
204        self.network = network
205
206    def construct(self, x, y, b):
207        return grad(self.network)(x, y, b)
208
209
210class MatMulNet(nn.Cell):
211    """ MatMulNet definition """
212
213    def __init__(self):
214        super(MatMulNet, self).__init__()
215        self.matmul = P.MatMul()
216        self.biasAdd = P.BiasAdd()
217
218    def construct(self, x, y, b):
219        return self.biasAdd(self.matmul(x, y), b)
220
221
222class NetWithLossSub(nn.Cell):
223    """ NetWithLossSub definition """
224
225    def __init__(self, network):
226        super(NetWithLossSub, self).__init__()
227        self.loss = VirtualLoss()
228        self.network = network
229
230    def construct(self, x, y):
231        predict = self.network(x, y)
232        return self.loss(predict)
233
234
235class GradWrapSub(nn.Cell):
236    """ GradWrapSub definition """
237
238    def __init__(self, network):
239        super(GradWrapSub, self).__init__()
240        self.network = network
241
242    def construct(self, x, y):
243        return grad(self.network)(x, y)
244
245
246class SubNet(nn.Cell):
247    """ SubNet definition """
248
249    def __init__(self):
250        super(SubNet, self).__init__()
251        self.sub = P.Sub()
252
253    def construct(self, x, y):
254        return self.sub(x, y)
255
256
257class NpuFloatNet(nn.Cell):
258    """ NpuFloat definition """
259
260    def __init__(self):
261        super(NpuFloatNet, self).__init__()
262        self.mul = P.Mul()
263        self.alloc_status = P.NPUAllocFloatStatus()
264        self.get_status = P.NPUGetFloatStatus()
265        self.clear_status = P.NPUClearFloatStatus()
266        self.fill = P.Fill()
267        self.shape_op = P.Shape()
268        self.select = P.Select()
269        self.less = P.Less()
270        self.cast = P.Cast()
271        self.dtype = P.DType()
272        self.reduce_sum = P.ReduceSum(keep_dims=True)
273        self.sub = P.Sub()
274        self.neg = P.Neg()
275
276    def construct(self, x):
277        init = self.alloc_status()
278        clear_status = self.clear_status(init)
279        x = F.depend(x, clear_status) # let x depend on clear_status
280        res = self.sub(x, self.neg(x))
281        init = F.depend(init, res) # let get_status depend on res
282        get_status = self.get_status(init)
283        init = F.depend(init, get_status) # let reduce_sum depend on get_statusk
284        flag_sum = self.reduce_sum(init, (0,))
285        base = self.cast(self.fill(self.dtype(res), self.shape_op(res), 0.0), self.dtype(flag_sum))
286        cond = self.less(base, flag_sum)
287        out = self.select(cond, self.cast(base, self.dtype(res)), res)
288        return out
289
290
291class DiagNet(nn.Cell):
292    """ DiagNet definition """
293
294    def __init__(self):
295        super(DiagNet, self).__init__()
296        self.fill = P.Fill()
297        self.diag = P.Diag()
298
299    def construct(self, x):
300        return x - self.diag(self.fill(mstype.float32, (3,), 1.0))
301
302
303class NetWithLossCumSum(nn.Cell):
304    """ NetWithLossCumSum definition """
305
306    def __init__(self, network):
307        super(NetWithLossCumSum, self).__init__()
308        self.loss = VirtualLoss()
309        self.network = network
310
311    def construct(self, input_):
312        predict = self.network(input_)
313        return self.loss(predict)
314
315
316class GradWrapCumSum(nn.Cell):
317    """ GradWrap definition """
318
319    def __init__(self, network):
320        super(GradWrapCumSum, self).__init__()
321        self.network = network
322
323    def construct(self, input_):
324        return grad(self.network)(input_)
325
326
327class NetCumSum(nn.Cell):
328    """ NetCumSum definition """
329
330    def __init__(self):
331        super(NetCumSum, self).__init__()
332        self.cumsum = P.CumSum()
333        self.axis = 1
334
335    def construct(self, input_):
336        return self.cumsum(input_, self.axis)
337
338
339class SignNet(nn.Cell):
340    def __init__(self):
341        super(SignNet, self).__init__()
342        self.sign = P.Sign()
343
344    def construct(self, x):
345        return self.sign(x)
346
347
348class AssignAdd(nn.Cell):
349    def __init__(self):
350        super().__init__()
351        self.op = P.AssignAdd()
352        self.inputdata = Parameter(initializer(1, [1], ms.float32), name="global_step")
353
354    def construct(self, input_):
355        self.inputdata = input_
356        return self.op(self.inputdata, input_)
357
358
359class FloorNet(nn.Cell):
360    def __init__(self):
361        super(FloorNet, self).__init__()
362        self.floor = P.Floor()
363
364    def construct(self, x):
365        return self.floor(x)
366
367
368class Log1pNet(nn.Cell):
369    def __init__(self):
370        super(Log1pNet, self).__init__()
371        self.log1p = P.Log1p()
372
373    def construct(self, x):
374        return self.log1p(x)
375
376
377class ErfcNet(nn.Cell):
378    def __init__(self):
379        super(ErfcNet, self).__init__()
380        self.erfc = P.Erfc()
381
382    def construct(self, x):
383        return self.erfc(x)
384
385
386test_case_math_ops = [
387    ('MatMulGrad', {
388        'block': GradWrap(NetWithLoss(MatMulNet())),
389        'desc_inputs': [Tensor(np.ones([3, 3]).astype(np.int32)),
390                        Tensor(np.ones([3, 3]).astype(np.int32)),
391                        Tensor(np.ones([3]).astype(np.int32))],
392        'desc_bprop': [Tensor(np.ones([3, 3]).astype(np.int32)),
393                       Tensor(np.ones([3, 3]).astype(np.int32)),
394                       Tensor(np.ones([3]).astype(np.int32))],
395        'skip': ['backward']}),
396    ('CumSumGrad', {
397        'block': GradWrapCumSum(NetWithLossCumSum(NetCumSum())),
398        'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))],
399        'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))],
400        'skip': ['backward']}),
401    ('Diag', {
402        'block': DiagNet(),
403        'desc_inputs': [Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]], np.float32))],
404        'desc_bprop': [Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]], np.float32))],
405        'skip': ['backward']}),
406    ('SubBroadcast', {
407        'block': GradWrapSub(NetWithLossSub(SubNet())),
408        'desc_inputs': [Tensor(np.ones([5, 3])), Tensor(np.ones([8, 5, 3]))],
409        'desc_bprop': [Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]], np.float32))],
410        'skip': ['backward']}),
411    ('NpuFloat_NotOverflow', {
412        'block': NpuFloatNet(),
413        'desc_inputs': [Tensor(np.full((8, 5, 3, 1), 655, dtype=np.float16), dtype=ms.float16)],
414        'desc_bprop': [Tensor(np.full((8, 5, 3, 1), 655, dtype=np.float16), dtype=ms.float16)],
415        'skip': ['backward']}),
416    ('NpuFloat_Overflow', {
417        'block': NpuFloatNet(),
418        'desc_inputs': [Tensor(np.full((8, 5, 3, 1), 65504, dtype=np.float16), dtype=ms.float16)],
419        'desc_bprop': [Tensor(np.full((8, 5, 3, 1), 65504, dtype=np.float16), dtype=ms.float16)],
420        'skip': ['backward']}),
421    ('Sign', {
422        'block': SignNet(),
423        'desc_inputs': [Tensor(np.array([[1., 0., -2.]], np.float32))],
424        'desc_bprop': [Tensor(np.array([[1., 0., -2.]], np.float32))],
425        'skip': ['backward']}),
426    ('Floor', {
427        'block': FloorNet(),
428        'desc_inputs': [Tensor(np.array([[1., 0., -2.]], np.float32))],
429        'desc_bprop': [Tensor(np.array([[1., 0., -2.]], np.float32))],
430        'skip': ['backward']}),
431    ('Log1p', {
432        'block': Log1pNet(),
433        'desc_inputs': [Tensor(np.array([[1.0, 2.0, 4.0]], np.float32))],
434        'desc_bprop': [Tensor(np.array([[1.0, 2.0, 4.0]], np.float32))],
435        'skip': ['backward']}),
436    ('Erfc', {
437        'block': ErfcNet(),
438        'desc_inputs': [Tensor(np.array([[1.0, 2.0, 4.0]], np.float32))],
439        'desc_bprop': [Tensor(np.array([[1.0, 2.0, 4.0]], np.float32))],
440    }),
441]
442
443test_case_lists = [test_case_math_ops]
444test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
445# use -k to select certain testcast
446# pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
447
448
449@non_graph_engine
450@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
451def test_exec():
452    context.set_context(mode=context.GRAPH_MODE)
453    return test_exec_case
454
455
456raise_set = [
457    ('StridedSlice_1_Error', {
458        'block': (lambda x: P.StridedSlice(begin_mask="1"), {'exception': TypeError}),
459        'desc_inputs': [0]}),
460    ('StridedSlice_2_Error', {
461        'block': (lambda x: P.StridedSlice(end_mask="1"), {'exception': TypeError}),
462        'desc_inputs': [0]}),
463    ('StridedSlice_3_Error', {
464        'block': (lambda x: P.StridedSlice(ellipsis_mask=1.1), {'exception': TypeError}),
465        'desc_inputs': [0]}),
466    ('StridedSlice_4_Error', {
467        'block': (lambda x: P.StridedSlice(new_axis_mask="1.1"), {'exception': TypeError}),
468        'desc_inputs': [0]}),
469    ('AssignAdd_Error', {
470        'block': (P.AssignAdd(), {'exception': ValueError}),
471        'desc_inputs': [[1]]}),
472]
473
474
475@mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
476def test_check_exception():
477    return raise_set
478